Compare commits
1173 Commits
1.1.13-bet
...
785c987ba6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
785c987ba6 | ||
|
|
8ecf70bca2 | ||
|
|
eda794ac09 | ||
|
|
c36e4703d0 | ||
|
|
818c3768ad | ||
|
|
5100c9640e | ||
|
|
0a0c8f32fe | ||
|
|
f4e2b5305c | ||
|
|
11e2dea0b1 | ||
|
|
0c28572fbc | ||
|
|
653e792bfd | ||
|
|
94f325a088 | ||
|
|
ebd7fae059 | ||
|
|
12f1d11ee8 | ||
|
|
3d47e6b3b6 | ||
|
|
0f1239f603 | ||
|
|
66cc901027 | ||
|
|
ca969e12a7 | ||
|
|
039fd4598d | ||
|
|
f1b729129e | ||
|
|
0a7bb4d93d | ||
|
|
3c062a1cd3 | ||
|
|
bcc677ab12 | ||
|
|
77ddbf5baa | ||
|
|
71b32f6702 | ||
|
|
32dd3a253f | ||
|
|
dfaa2cc11d | ||
|
|
2106883c67 | ||
|
|
3ebc11d95e | ||
|
|
c9e368bf3f | ||
|
|
2f64154cd2 | ||
|
|
165388ce1b | ||
|
|
fb629891ba | ||
|
|
f0c644f5ec | ||
|
|
5ee31f45a8 | ||
|
|
bfd9fe89dc | ||
|
|
d65ce48882 | ||
|
|
75bba1814c | ||
|
|
146f160802 | ||
|
|
ad26ee7818 | ||
|
|
b5eba8d715 | ||
|
|
d4bdefa9c1 | ||
|
|
506fac03c7 | ||
|
|
343be3b973 | ||
|
|
3c6321faa0 | ||
|
|
161f2ae985 | ||
|
|
2a8a3ab0c8 | ||
|
|
65ae288018 | ||
|
|
1641182ec0 | ||
|
|
2fafd1b064 | ||
|
|
827b7a2173 | ||
|
|
8aa422fd66 | ||
|
|
7e3824c769 | ||
|
|
4f8d4803e1 | ||
|
|
b482b88c37 | ||
|
|
bd6afb60ba | ||
|
|
a87368bd09 | ||
|
|
95c85e906d | ||
|
|
3965bfe082 | ||
|
|
25eabaf841 | ||
|
|
d6d7e0ec65 | ||
|
|
3b5e9d8f95 | ||
|
|
3dad7c18f8 | ||
|
|
ea945a6b2a | ||
|
|
575d36b67f | ||
|
|
6a9d4bf648 | ||
|
|
719eedb8b5 | ||
|
|
ba2d823993 | ||
|
|
36f5c72a65 | ||
|
|
60a2c6168b | ||
|
|
f008763361 | ||
|
|
400092dd84 | ||
|
|
c5c59f2c76 | ||
|
|
c8888cdbad | ||
|
|
5b204501f3 | ||
|
|
5d96bdfda5 | ||
|
|
803768b33a | ||
|
|
cf3009ca02 | ||
|
|
a0be90bbf5 | ||
|
|
14213dd245 | ||
|
|
8837fea957 | ||
|
|
085b599bc4 | ||
|
|
d2499f6bae | ||
|
|
c3f5badc7d | ||
|
|
7e22b4cc22 | ||
|
|
f9a39aa183 | ||
|
|
cadac0a79e | ||
|
|
7589dca948 | ||
|
|
ea37f96abd | ||
|
|
8847518818 | ||
|
|
fbaec93d7d | ||
|
|
5ee467465a | ||
|
|
7480e28eac | ||
|
|
7998944a71 | ||
|
|
280606ae11 | ||
|
|
c9de8370c2 | ||
|
|
8de35bdfa1 | ||
|
|
5f8a6b25c1 | ||
|
|
01d7612a58 | ||
|
|
e8e21eb1b6 | ||
|
|
8fbb40bb76 | ||
|
|
04075cc20e | ||
|
|
92ce2987ea | ||
|
|
c282ebf845 | ||
|
|
38932f0782 | ||
|
|
bf0a46055a | ||
|
|
0fa329ca75 | ||
|
|
577e99ae39 | ||
|
|
5df9359151 | ||
|
|
119a0881e0 | ||
|
|
f4f732b742 | ||
|
|
a8f269aefa | ||
|
|
6930f0cb74 | ||
|
|
170476a705 | ||
|
|
7448e9828b | ||
|
|
6d20fe348f | ||
|
|
5b02358bf1 | ||
|
|
78df903de7 | ||
|
|
4cd70670cc | ||
|
|
dcb532d7c9 | ||
|
|
5820c36ea5 | ||
|
|
c0db1e52ae | ||
|
|
e46656323c | ||
|
|
e96de650bf | ||
|
|
b421a0edaa | ||
|
|
a9fdafdb93 | ||
|
|
a4a6d54d7e | ||
|
|
9358431146 | ||
|
|
a60eda1602 | ||
|
|
c796ad7c7a | ||
|
|
63718882a5 | ||
|
|
89dfec2363 | ||
|
|
39a4a37d7c | ||
|
|
25e5134577 | ||
|
|
a7f1d566ab | ||
|
|
234d9e49fe | ||
|
|
6ea9230382 | ||
|
|
1803a37591 | ||
|
|
c50de9bed7 | ||
|
|
6a97ace933 | ||
|
|
f56d58bf45 | ||
|
|
4c9096a11b | ||
|
|
c9c0c99a2a | ||
|
|
58f71cf6d9 | ||
|
|
befffc98b1 | ||
|
|
006f3cbd1f | ||
|
|
582224abec | ||
|
|
acb59f9e83 | ||
|
|
fab30f3f29 | ||
|
|
2cb6caea8d | ||
|
|
ffdf7d71e1 | ||
|
|
db3d5d6a01 | ||
|
|
8709ef301d | ||
|
|
b8728c5eed | ||
|
|
0ba81f9f86 | ||
|
|
8c85a60f67 | ||
|
|
d089c4bb6a | ||
|
|
8ace830d5e | ||
|
|
893728cbef | ||
|
|
d4a90e8934 | ||
|
|
a529b14459 | ||
|
|
3227105558 | ||
|
|
d62dff49b4 | ||
|
|
2d4d10e31d | ||
|
|
0048901a61 | ||
|
|
a7a9d38428 | ||
|
|
219ede2d5d | ||
|
|
e96cb8ad15 | ||
|
|
0a4aef1a1b | ||
|
|
63832606b1 | ||
|
|
f10ceb3216 | ||
|
|
d8adbbecdd | ||
|
|
f043da6b62 | ||
|
|
77e551e582 | ||
|
|
9d389970b8 | ||
|
|
a44e037311 | ||
|
|
cc50e373dc | ||
|
|
9350a07f50 | ||
|
|
6325a2a707 | ||
|
|
ea96c44d84 | ||
|
|
4c8a4dcbd3 | ||
|
|
bd53678442 | ||
|
|
c370baa6a2 | ||
|
|
45c604b332 | ||
|
|
64db58ed3d | ||
|
|
ab8f4a3702 | ||
|
|
c28dc19df6 | ||
|
|
56d8c507e2 | ||
|
|
10a1554e73 | ||
|
|
c8017c4269 | ||
|
|
3cb4dca63f | ||
|
|
beeb6336e9 | ||
|
|
8cb1140614 | ||
|
|
f243e8c39e | ||
|
|
890750819a | ||
|
|
20806f95a2 | ||
|
|
13646a306d | ||
|
|
3082aae124 | ||
|
|
76a92c8431 | ||
|
|
385a46fc16 | ||
|
|
e452fa153b | ||
|
|
3fd1c13ecb | ||
|
|
76f23d4a02 | ||
|
|
5f1ddee7ce | ||
|
|
9803c9ad09 | ||
|
|
42448fa250 | ||
|
|
6b0dca2f51 | ||
|
|
6ab3a89a35 | ||
|
|
3389c72a63 | ||
|
|
59aae5b122 | ||
|
|
063b04c543 | ||
|
|
77d340d04d | ||
|
|
69a9566f42 | ||
|
|
24002c66e7 | ||
|
|
bf87a76fdf | ||
|
|
d0312e050b | ||
|
|
44b4857fc3 | ||
|
|
6132af3bb5 | ||
|
|
c91c7edd73 | ||
|
|
6f9fbc73d8 | ||
|
|
888720b544 | ||
|
|
5e6682566f | ||
|
|
6351afb36c | ||
|
|
898ccef5c0 | ||
|
|
0198eb9e2b | ||
|
|
0457e19913 | ||
|
|
e5925b8ebc | ||
|
|
710760dc91 | ||
|
|
5d7e348a0e | ||
|
|
979a54e2b8 | ||
|
|
afc0aa4a78 | ||
|
|
a552f05b23 | ||
|
|
7bbc3f3e2c | ||
|
|
a4941a93f0 | ||
|
|
d82cd95849 | ||
|
|
5010ca60e9 | ||
|
|
419461c905 | ||
|
|
32b570ee5b | ||
|
|
9849b5f6f9 | ||
|
|
706c46f2bb | ||
|
|
d1986a5d53 | ||
|
|
e864e2db48 | ||
|
|
af9c8afad7 | ||
|
|
4e5d8885c6 | ||
|
|
215a4680f4 | ||
|
|
f712952b87 | ||
|
|
14f2599ba1 | ||
|
|
2897611006 | ||
|
|
250d777159 | ||
|
|
6c3b63abd9 | ||
|
|
bada694fd4 | ||
|
|
a40438d38c | ||
|
|
3d443e0908 | ||
|
|
b761763c4c | ||
|
|
71b79bdc91 | ||
|
|
2faac18597 | ||
|
|
e9a592df50 | ||
|
|
94b94b76dc | ||
|
|
62240bf2f4 | ||
|
|
ffb4efbcd7 | ||
|
|
b2f95faac4 | ||
|
|
93be16f7eb | ||
|
|
8b0683f67c | ||
|
|
851339d4e3 | ||
|
|
5cf54ab511 | ||
|
|
384ac5e33a | ||
|
|
7271caccc9 | ||
|
|
0c9e846bfb | ||
|
|
a2a57b6da0 | ||
|
|
35ec334c28 | ||
|
|
7383b18924 | ||
|
|
e0f1f7c356 | ||
|
|
6b8b961ff7 | ||
|
|
4c6a1d3215 | ||
|
|
64dbf9e981 | ||
|
|
27e3803414 | ||
|
|
591b6bcc44 | ||
|
|
6ac2e32612 | ||
|
|
887c383229 | ||
|
|
64c909facb | ||
|
|
23ceda33bd | ||
|
|
7e63070f13 | ||
|
|
247ee01d6e | ||
|
|
f61b91acd6 | ||
|
|
6951113717 | ||
|
|
73269c7c9d | ||
|
|
f00cd1568c | ||
|
|
f9d79ead9d | ||
|
|
c01d6aaa3a | ||
|
|
dd8767ad81 | ||
|
|
0bbdaa96cf | ||
|
|
96bbbe51e7 | ||
|
|
16088aec72 | ||
|
|
199167c50b | ||
|
|
9359cd877d | ||
|
|
003b68b3d3 | ||
|
|
29dc7ad830 | ||
|
|
770cce5ac0 | ||
|
|
235e62814f | ||
|
|
cd2d40a379 | ||
|
|
d63123b77b | ||
|
|
8b4bf8d51f | ||
|
|
d98f815ce0 | ||
|
|
787f3e8ea1 | ||
|
|
064795fac9 | ||
|
|
9208a80ab0 | ||
|
|
a681abb854 | ||
|
|
996397b9d5 | ||
|
|
8fb180390d | ||
|
|
c311b8e351 | ||
|
|
af059b8775 | ||
|
|
de3a9352ea | ||
|
|
d104ae1e8e | ||
|
|
88c2980e5d | ||
|
|
8bcd51f49b | ||
|
|
de084ffff9 | ||
|
|
eb6c2ed72b | ||
|
|
c99b691041 | ||
|
|
48fd1c2897 | ||
|
|
37c809db2a | ||
|
|
51db3e1249 | ||
|
|
c99f3fa083 | ||
|
|
6f3a5a8860 | ||
|
|
ebd99cb144 | ||
|
|
b1a9b0b016 | ||
|
|
0929a6678b | ||
|
|
69824412ce | ||
|
|
0d9756f8b0 | ||
|
|
244cd9101d | ||
|
|
3df263858d | ||
|
|
b45c39043b | ||
|
|
9eae71fb62 | ||
|
|
9a95adf47d | ||
|
|
956c383e5f | ||
|
|
5155762711 | ||
|
|
ea43eccd78 | ||
|
|
ff2547e7f2 | ||
|
|
163cf44751 | ||
|
|
14ce8a759f | ||
|
|
22d92e1ded | ||
|
|
3c3700838b | ||
|
|
05423c8270 | ||
|
|
d277eb332b | ||
|
|
dcad32ade0 | ||
|
|
dd0b637566 | ||
|
|
bad8b85874 | ||
|
|
938f760a37 | ||
|
|
f382c2f814 | ||
|
|
4e75731024 | ||
|
|
920a0ed1af | ||
|
|
9eb50da744 | ||
|
|
2e2d886cb2 | ||
|
|
5738433c2b | ||
|
|
4a33dbde46 | ||
|
|
10a48634bd | ||
|
|
2492d96fb3 | ||
|
|
87248503b4 | ||
|
|
7705e7ea1f | ||
|
|
54b0630891 | ||
|
|
27e70b966f | ||
|
|
ad8b92743c | ||
|
|
22b44c87ca | ||
|
|
2eca743f20 | ||
|
|
bb4be306cc | ||
|
|
768ef0b6bc | ||
|
|
b2d3869488 | ||
|
|
44e9a47a8b | ||
|
|
215587d9a4 | ||
|
|
7430e59b64 | ||
|
|
09490b8ebf | ||
|
|
1e4a3b2484 | ||
|
|
b9bf3be4b2 | ||
|
|
a1e4cec94f | ||
|
|
65e857af8b | ||
|
|
8887d48b3e | ||
|
|
e14714e26b | ||
|
|
8ec16528ab | ||
|
|
e9e619c992 | ||
|
|
a6b60a4317 | ||
|
|
69615c6c07 | ||
|
|
da6b2b02f4 | ||
|
|
3dfdae4033 | ||
|
|
23021ba632 | ||
|
|
bc335f1686 | ||
|
|
999d3eb497 | ||
|
|
bf67c6d270 | ||
|
|
df762746ec | ||
|
|
6687e5c6ca | ||
|
|
2becec0fb6 | ||
|
|
fbe56f4db9 | ||
|
|
085543321a | ||
|
|
f8c0ca195a | ||
|
|
dda0cb521a | ||
|
|
bb1a83b4ba | ||
|
|
f34e8200dd | ||
|
|
539aac1307 | ||
|
|
f75ee58ac0 | ||
|
|
d27621ccd7 | ||
|
|
1ca585a65c | ||
|
|
39407286b3 | ||
|
|
6e56872121 | ||
|
|
888c50d72a | ||
|
|
231b600a0e | ||
|
|
db00736f58 | ||
|
|
5a714e40d9 | ||
|
|
230a4b6558 | ||
|
|
f7bd6ee4f3 | ||
|
|
1ef6e40c29 | ||
|
|
7d1bf8525b | ||
|
|
59694993ff | ||
|
|
109d8efc0b | ||
|
|
c8507c08a9 | ||
|
|
28be4d9dd7 | ||
|
|
ceb3b30e5c | ||
|
|
8dccedc229 | ||
|
|
c3a8221d99 | ||
|
|
ed480720aa | ||
|
|
f18f961dcd | ||
|
|
df781f67e3 | ||
|
|
addddaf44e | ||
|
|
4660b14453 | ||
|
|
9c231d7e11 | ||
|
|
989470772f | ||
|
|
8b7443945b | ||
|
|
da373764e0 | ||
|
|
fd868d9596 | ||
|
|
ae5e246180 | ||
|
|
04b3b6b4ab | ||
|
|
564ce24988 | ||
|
|
3b2e763d7d | ||
|
|
50859d07c4 | ||
|
|
04bf7f484e | ||
|
|
4c1247f49c | ||
|
|
17a8513efc | ||
|
|
7ada13bcc3 | ||
|
|
5b1c92e7b8 | ||
|
|
45643cc594 | ||
|
|
ab6b970063 | ||
|
|
9571020217 | ||
|
|
bb67ab009e | ||
|
|
f3b235ae14 | ||
|
|
0de95777b4 | ||
|
|
9d36ed0dc6 | ||
|
|
e0eec002fa | ||
|
|
79779b7a46 | ||
|
|
df24ad0008 | ||
|
|
651c5aed37 | ||
|
|
3c83dbd038 | ||
|
|
fc6e0c3db3 | ||
|
|
c5cfd3ebdc | ||
|
|
cead69f8e3 | ||
|
|
4d2b9e1157 | ||
|
|
f977e70562 | ||
|
|
12dd06c558 | ||
|
|
70541cc9ee | ||
|
|
d37c7a680d | ||
|
|
1ff6f1768b | ||
|
|
99325f40cf | ||
|
|
65948cd9cd | ||
|
|
305eb1dec5 | ||
|
|
9aad872ae6 | ||
|
|
a478a35f66 | ||
|
|
128cab077c | ||
|
|
9dc6f8914f | ||
|
|
57873136b6 | ||
|
|
987f3fc564 | ||
|
|
10776dbb07 | ||
|
|
2d3f68167c | ||
|
|
770f64b746 | ||
|
|
235c12bd53 | ||
|
|
10b19606e0 | ||
|
|
a7d1084a4d | ||
|
|
21575a9fb8 | ||
|
|
2258d70d7b | ||
|
|
b23c3195e3 | ||
|
|
bd9b3522d8 | ||
|
|
78060dff61 | ||
|
|
4a29040c74 | ||
|
|
496f3f0e75 | ||
|
|
f03b2e58cf | ||
|
|
29ddc3779a | ||
|
|
7842109ca2 | ||
|
|
7527dc4fd8 | ||
|
|
8dfd38a15c | ||
|
|
6227edb0a3 | ||
|
|
114a0bb615 | ||
|
|
abfd97d915 | ||
|
|
582b8cc57b | ||
|
|
97a24d8d52 | ||
|
|
edb087abde | ||
|
|
96c5c4aa28 | ||
|
|
4b93262d5f | ||
|
|
78a890f900 | ||
|
|
5bdbe7d181 | ||
|
|
f250d2c5c3 | ||
|
|
b6d5fe7013 | ||
|
|
80f3dd7ce4 | ||
|
|
0c63f77e53 | ||
|
|
5688cdea89 | ||
|
|
2949626f6d | ||
|
|
319aa582e5 | ||
|
|
058651cc29 | ||
|
|
5874f3bcaf | ||
|
|
c6522865ab | ||
|
|
5684694055 | ||
|
|
360a9e6308 | ||
|
|
015959bd97 | ||
|
|
8feade923a | ||
|
|
df3e7912b3 | ||
|
|
919561099e | ||
|
|
e7cc05679f | ||
|
|
99461c54f1 | ||
|
|
56f172e7b5 | ||
|
|
ddd98ee86d | ||
|
|
1d25179171 | ||
|
|
7efef0bb44 | ||
|
|
366e9cf6e8 | ||
|
|
57abe22515 | ||
|
|
c7a49b3643 | ||
|
|
1125788bb7 | ||
|
|
034a25a813 | ||
|
|
f72c0c8224 | ||
|
|
f6be7919d7 | ||
|
|
0a2340b6dc | ||
|
|
bf2b4ab268 | ||
|
|
40bd3d5bb8 | ||
|
|
61d2a8b833 | ||
|
|
b04dad8015 | ||
|
|
3ade47a7e0 | ||
|
|
5bc44650d6 | ||
|
|
8b1bcd93e6 | ||
|
|
d70a98ed29 | ||
|
|
05e6eaf88e | ||
|
|
90eb1c3980 | ||
|
|
7a63474769 | ||
|
|
0f07fc3153 | ||
|
|
e832b19f2f | ||
|
|
9499aeae10 | ||
|
|
f72ebdb149 | ||
|
|
ea84031b87 | ||
|
|
611c40fe0b | ||
|
|
2c3a2566cc | ||
|
|
1b6307f9c2 | ||
|
|
548ad4a816 | ||
|
|
27f71833b3 | ||
|
|
6c07fab985 | ||
|
|
4151c0e113 | ||
|
|
3119d68ea2 | ||
|
|
f43f51aa2f | ||
|
|
19986b64d0 | ||
|
|
00200334fb | ||
|
|
cde980b470 | ||
|
|
f90f373d20 | ||
|
|
c246b96845 | ||
|
|
053afaa75e | ||
|
|
3848aaeda3 | ||
|
|
16b13a6fe0 | ||
|
|
3f180612d3 | ||
|
|
37cc66cbae | ||
|
|
81b15a5877 | ||
|
|
14a4055040 | ||
|
|
2e01672e68 | ||
|
|
4a7aae4045 | ||
|
|
2187ddece8 | ||
|
|
fba5518d06 | ||
|
|
31cf687e2f | ||
|
|
526069dabf | ||
|
|
635cb037f1 | ||
|
|
861584df3a | ||
|
|
a53fda9fec | ||
|
|
af5a0e50e0 | ||
|
|
7a91acb60c | ||
|
|
3a287504ae | ||
|
|
82a22d25ea | ||
|
|
783e10a9a1 | ||
|
|
e8f13b1f9e | ||
|
|
4b415b376f | ||
|
|
122bdf7eb1 | ||
|
|
2afb604ab3 | ||
|
|
a912c7392b | ||
|
|
3b92993ef6 | ||
|
|
c3892082f5 | ||
|
|
92e2cb42e8 | ||
|
|
b8065e0f10 | ||
|
|
a395e5541f | ||
|
|
d191750231 | ||
|
|
e72347656b | ||
|
|
8e2411a086 | ||
|
|
97e64fa918 | ||
|
|
661d758315 | ||
|
|
364d870fe0 | ||
|
|
2da64fd52d | ||
|
|
057725c5da | ||
|
|
5996bd3588 | ||
|
|
fdf407898e | ||
|
|
70d544b7bd | ||
|
|
c583f63c8c | ||
|
|
d65a120eb5 | ||
|
|
60f47546c2 | ||
|
|
0b77078a93 | ||
|
|
2598fc546a | ||
|
|
ddf4407b77 | ||
|
|
6cf259191e | ||
|
|
30f1db1c73 | ||
|
|
ff15bff94c | ||
|
|
83aabfd9c3 | ||
|
|
d3ff40c249 | ||
|
|
c07e1c4168 | ||
|
|
1dc93c351d | ||
|
|
f94c9ef857 | ||
|
|
14fa70e608 | ||
|
|
ec65132cf2 | ||
|
|
941bbf545f | ||
|
|
afdb08fa15 | ||
|
|
c4b7411261 | ||
|
|
5b3e9c9026 | ||
|
|
e70c47d12a | ||
|
|
c1aba269a9 | ||
|
|
bf55037690 | ||
|
|
e2dfcc91ce | ||
|
|
33796aa475 | ||
|
|
4218e3558b | ||
|
|
271bfac834 | ||
|
|
9e86b5e331 | ||
|
|
c9638ba0d9 | ||
|
|
428879120a | ||
|
|
f0b9bc6c77 | ||
|
|
6133b886fb | ||
|
|
dacd767162 | ||
|
|
4d90417ecf | ||
|
|
c3e889279b | ||
|
|
9bf998ca9e | ||
|
|
5b2a06870a | ||
|
|
fca5818874 | ||
|
|
eaf0ef2f1b | ||
|
|
09fb34c5ff | ||
|
|
924467cc57 | ||
|
|
2611c284b8 | ||
|
|
b4a3e8c2ee | ||
|
|
118429f84c | ||
|
|
8b9332e150 | ||
|
|
5b5a483e25 | ||
|
|
33ea8da5bc | ||
|
|
aba59bdbfe | ||
|
|
316bd52f21 | ||
|
|
59893b1d1c | ||
|
|
fb83863654 | ||
|
|
f131c650fb | ||
|
|
f439797b03 | ||
|
|
bd5e23f93f | ||
|
|
fefb3ce6cd | ||
|
|
a24bd1c719 | ||
|
|
02fd8beda8 | ||
|
|
628dd5e456 | ||
|
|
c437532622 | ||
|
|
0714b94ca1 | ||
|
|
5ecaf89d15 | ||
|
|
2491999a33 | ||
|
|
9c7bf2e235 | ||
|
|
0c1093d58e | ||
|
|
a41c5a8af5 | ||
|
|
b727b1288d | ||
|
|
73738010b8 | ||
|
|
2fde11a704 | ||
|
|
6a6a3320cb | ||
|
|
83a8d5d5e1 | ||
|
|
4b3b9d8691 | ||
|
|
3422a1093d | ||
|
|
4eb9e008ce | ||
|
|
5e86605a46 | ||
|
|
8146b0c90e | ||
|
|
983937cdea | ||
|
|
e5b15abf91 | ||
|
|
4a5d02119e | ||
|
|
4b6c9fd066 | ||
|
|
79a6cef794 | ||
|
|
43cb68b38b | ||
|
|
ad68726e1d | ||
|
|
ba4b779145 | ||
|
|
d987a811e3 | ||
|
|
ee426e6473 | ||
|
|
9aa42c1ca7 | ||
|
|
d12325b7f8 | ||
|
|
ce5205902a | ||
|
|
94aabcdd40 | ||
|
|
839a918330 | ||
|
|
053295e028 | ||
|
|
c6e3266f60 | ||
|
|
7c4e5b775b | ||
|
|
bc02a9a2a2 | ||
|
|
2c5d419ee9 | ||
|
|
46899255c8 | ||
|
|
6a650514fa | ||
|
|
0f10e6e848 | ||
|
|
0d69ba3c49 | ||
|
|
d0e3b487eb | ||
|
|
c80627575a | ||
|
|
92eb79df71 | ||
|
|
ad48ad757c | ||
|
|
2de241cdd5 | ||
|
|
5d66815765 | ||
|
|
100e0f2101 | ||
|
|
55e3b7c7e0 | ||
|
|
f6698f7f0a | ||
|
|
50614d52fc | ||
|
|
712986ee69 | ||
|
|
2f7e3921ef | ||
|
|
80f42fdc3f | ||
|
|
725b2c66d3 | ||
|
|
5394b9f667 | ||
|
|
fad103a7ad | ||
|
|
87cd106b28 | ||
|
|
2d8c47edca | ||
|
|
0ac5b59a1e | ||
|
|
7c735b3555 | ||
|
|
9d8cf41cd3 | ||
|
|
ee3a06db46 | ||
|
|
7df2e3fdc0 | ||
|
|
20e7de5b5f | ||
|
|
f83f72fa12 | ||
|
|
fb4786159d | ||
|
|
734b83cade | ||
|
|
746c98ad1c | ||
|
|
9f00af4bba | ||
|
|
92fa4a874b | ||
|
|
a33b00d77e | ||
|
|
a7f6349aa4 | ||
|
|
d4b4544b2f | ||
|
|
521d5634f3 | ||
|
|
1d9840913a | ||
|
|
53a0b23230 | ||
|
|
9004ee1a6b | ||
|
|
440479da8c | ||
|
|
e5c3692bb9 | ||
|
|
103379e548 | ||
|
|
eca421e0f2 | ||
|
|
18566a0592 | ||
|
|
48c6372cf4 | ||
|
|
f3917c6e4d | ||
|
|
9bb5225301 | ||
|
|
e9cef87154 | ||
|
|
da01dde2b9 | ||
|
|
53445759f7 | ||
|
|
9aff3ae38e | ||
|
|
0302511f5f | ||
|
|
028949f216 | ||
|
|
af0d7b878b | ||
|
|
460a5bc4f4 | ||
|
|
3f6f8540c4 | ||
|
|
17d865b72f | ||
|
|
da21dc110d | ||
|
|
3870cd0f53 | ||
|
|
ed1df400d8 | ||
|
|
82d737407f | ||
|
|
d0719e7201 | ||
|
|
19112ac79b | ||
|
|
a64d753d77 | ||
|
|
970752435c | ||
|
|
b1436ee76e | ||
|
|
8eba44cce4 | ||
|
|
5fc5a14bd9 | ||
|
|
10f36e9868 | ||
|
|
aab7e37bb2 | ||
|
|
2860093b6f | ||
|
|
ad7b270650 | ||
|
|
70dcb9768a | ||
|
|
873d976662 | ||
|
|
fc4eb4f002 | ||
|
|
129e19ac9d | ||
|
|
0dede72692 | ||
|
|
83ac9f91b5 | ||
|
|
858bc303d8 | ||
|
|
005d7b72f4 | ||
|
|
91b863fcb1 | ||
|
|
e5f6a7d1d6 | ||
|
|
e7f937ecd2 | ||
|
|
d75f39fe93 | ||
|
|
12d9befc25 | ||
|
|
3e8ee864b7 | ||
|
|
134c4a60e9 | ||
|
|
3f9e5457f6 | ||
|
|
cc2ef8593c | ||
|
|
c5a5fc8bdb | ||
|
|
1cbed64299 | ||
|
|
c608ff80a1 | ||
|
|
52cc692b58 | ||
|
|
31894a66ec | ||
|
|
aa11a47164 | ||
|
|
093d20a52b | ||
|
|
38c3014222 | ||
|
|
df87f81698 | ||
|
|
cf12e891b0 | ||
|
|
76fb565d4e | ||
|
|
06ffd9f6be | ||
|
|
dfef425af3 | ||
|
|
880b1be401 | ||
|
|
04ad588a58 | ||
|
|
6b4abcf061 | ||
|
|
629b28f258 | ||
|
|
c34902449f | ||
|
|
63e6174cf2 | ||
|
|
9da14e0f95 | ||
|
|
c469fdb25e | ||
|
|
67be086638 | ||
|
|
a724fd8430 | ||
|
|
685ce014b6 | ||
|
|
62bf1d3808 | ||
|
|
d55d75cd79 | ||
|
|
19e5f10a7b | ||
|
|
e5e9617052 | ||
|
|
b4f6820f56 | ||
|
|
b07aa03c5f | ||
|
|
2f54b1b36b | ||
|
|
70293a0819 | ||
|
|
8592fdee74 | ||
|
|
075faaea5a | ||
|
|
870dc5e9b6 | ||
|
|
86402af8b1 | ||
|
|
d7976cf9d2 | ||
|
|
b67765d9aa | ||
|
|
618e15600f | ||
|
|
8cac2c255f | ||
|
|
4f42fef4fc | ||
|
|
73dd33dc64 | ||
|
|
3774ab0568 | ||
|
|
f8807675d6 | ||
|
|
79137a12f8 | ||
|
|
d33d274725 | ||
|
|
26851475ea | ||
|
|
a06d88efc0 | ||
|
|
dcf853515c | ||
|
|
bf06b94284 | ||
|
|
561dc28044 | ||
|
|
43ec4848ef | ||
|
|
aad83c8c03 | ||
|
|
4514ae80d0 | ||
|
|
cab69a32be | ||
|
|
c5ad75370f | ||
|
|
d23258f359 | ||
|
|
c9cd58fecb | ||
|
|
58904a927f | ||
|
|
fb1616aaa1 | ||
|
|
4be12d857d | ||
|
|
e1ab72ec2a | ||
|
|
8a8dea8aa4 | ||
|
|
43464724bd | ||
|
|
34163fe9d7 | ||
|
|
9aa29f1445 | ||
|
|
3ea44b7ca7 | ||
|
|
c1c8f4eb6e | ||
|
|
a14c24a78a | ||
|
|
18d861a2be | ||
|
|
ac15a4dd72 | ||
|
|
6a98afb89c | ||
|
|
21873d3830 | ||
|
|
2daf9b3ed8 | ||
|
|
a6d55cd21a | ||
|
|
d37e4607ee | ||
|
|
00e95178cd | ||
|
|
4034123e6d | ||
|
|
5587bfac31 | ||
|
|
4b6d35fd3a | ||
|
|
3cf75cf2ec | ||
|
|
30dbe758d4 | ||
|
|
55384790f8 | ||
|
|
acaf5ed510 | ||
|
|
d213db3129 | ||
|
|
6a717377df | ||
|
|
904561fb8e | ||
|
|
be6b71dec7 | ||
|
|
63b654a173 | ||
|
|
bc25acde9f | ||
|
|
03677ce4b8 | ||
|
|
535afcb4c6 | ||
|
|
06255f7848 | ||
|
|
00e649bb4c | ||
|
|
078f569ec6 | ||
|
|
315cf7d920 | ||
|
|
e9cc6a16a8 | ||
|
|
26eb6985fe | ||
|
|
be983c61bc | ||
|
|
77a53a6834 | ||
|
|
860a3147d2 | ||
|
|
8ecb87fa26 | ||
|
|
f17f560705 | ||
|
|
aadeb07c49 | ||
|
|
e07fe9e8d1 | ||
|
|
f2a68d6c8b | ||
|
|
94be266e17 | ||
|
|
5a19eaf9a0 | ||
|
|
28cbbbece7 | ||
|
|
40314367c9 | ||
|
|
6e7660c3d9 | ||
|
|
99030fae6b | ||
|
|
947dc81c74 | ||
|
|
c0880c9afe | ||
|
|
e6414fba96 | ||
|
|
a00891f622 | ||
|
|
9ba8b2876c | ||
|
|
46d3e99d48 | ||
|
|
d206f5f581 | ||
|
|
ec83667d77 | ||
|
|
0bbf417133 | ||
|
|
a3e1153283 | ||
|
|
ccb461ae76 | ||
|
|
d24b51f94e | ||
|
|
def2635ac2 | ||
|
|
b72fcaa9a9 | ||
|
|
3ddfacd89e | ||
|
|
6eb5fa7ac7 | ||
|
|
68efcc74fb | ||
|
|
3d84af3746 | ||
|
|
cb5b321539 | ||
|
|
20ec8c38c2 | ||
|
|
8bdf91ab96 | ||
|
|
fbbd36ab4d | ||
|
|
95643fdace | ||
|
|
6c65c2ad56 | ||
|
|
292a69a204 | ||
|
|
5c6e7d6f3e | ||
|
|
7e033857ba | ||
|
|
d9c02b0115 | ||
|
|
b9af606f87 | ||
|
|
d3c29ae40a | ||
|
|
ff73cbf2f9 | ||
|
|
3369a24343 | ||
|
|
ce693b55f1 | ||
|
|
db37ec7204 | ||
|
|
470b5c0a17 | ||
|
|
04409a55c7 | ||
|
|
bb7fbb4e38 | ||
|
|
5bb48cf816 | ||
|
|
b5e6e41043 | ||
|
|
62d927a104 | ||
|
|
4c9fa4f716 | ||
|
|
e8fa51ad45 | ||
|
|
fd4c453854 | ||
|
|
c19ed49e05 | ||
|
|
36adf91744 | ||
|
|
8b73a87360 | ||
|
|
8c591a8a3b | ||
|
|
c5772c75e5 | ||
|
|
ff02d25eea | ||
|
|
98a7ee35ee | ||
|
|
59d48619b1 | ||
|
|
10056c4229 | ||
|
|
7e772abda7 | ||
|
|
09ea531a90 | ||
|
|
710d9bf6a5 | ||
|
|
bb81f921ff | ||
|
|
1468b1932f | ||
|
|
74d95b6a50 | ||
|
|
d33fb6ef31 | ||
|
|
4201558483 | ||
|
|
983b3d08f6 | ||
|
|
eec715551a | ||
|
|
d3f552173e | ||
|
|
3e3dcb03f9 | ||
|
|
44b0e70399 | ||
|
|
38aedac101 | ||
|
|
9a9d97f3bb | ||
|
|
a4cb8b51a6 | ||
|
|
1bbdebff42 | ||
|
|
783c4e1c5b | ||
|
|
eb5360a38b | ||
|
|
205d337751 | ||
|
|
d469ee82d8 | ||
|
|
c464283962 | ||
|
|
48467b14b5 | ||
|
|
70df9d0682 | ||
|
|
049971a78a | ||
|
|
052e95e53b | ||
|
|
fa0c193730 | ||
|
|
a98eb2f81b | ||
|
|
ae4de0b3e6 | ||
|
|
84b762877f | ||
|
|
2bb7aaeddf | ||
|
|
08434a703e | ||
|
|
552a319298 | ||
|
|
b9e72bf7a1 | ||
|
|
135544c0db | ||
|
|
c297fd7fe7 | ||
|
|
168f24b139 | ||
|
|
89ddea7e9b | ||
|
|
bfe005cb63 | ||
|
|
48c2e91f7e | ||
|
|
02f365b93f | ||
|
|
d78c3e3039 | ||
|
|
f18513fd0e | ||
|
|
caa94c4e28 | ||
|
|
7037877a77 | ||
|
|
6cccf22d54 | ||
|
|
ceb2b2861e | ||
|
|
298f50cb45 | ||
|
|
e616aa8373 | ||
|
|
0fe881df59 | ||
|
|
f3f48ea958 | ||
|
|
9a9d36dc65 | ||
|
|
028b728d82 | ||
|
|
23f323f52d | ||
|
|
49210e67c5 | ||
|
|
e519bf79be | ||
|
|
4f08610a28 | ||
|
|
544bdcb4e3 | ||
|
|
f3095144f5 | ||
|
|
75f31c7cb2 | ||
|
|
f7f4e41c95 | ||
|
|
6da177471b | ||
|
|
8a74e5b02b | ||
|
|
5658f261b0 | ||
|
|
6da3bf764e | ||
|
|
5e06d35057 | ||
|
|
82bcc876b3 | ||
|
|
d7a6882577 | ||
|
|
5e7e1b1513 | ||
|
|
cd9a02c255 | ||
|
|
b47f816dd5 | ||
|
|
d1a649c0ba | ||
|
|
b7759506fe | ||
|
|
97777d61d2 | ||
|
|
e622b56dae | ||
|
|
a24251e5b4 | ||
|
|
d4470a2015 | ||
|
|
d37022b71f | ||
|
|
5f38241bcb | ||
|
|
4fb9461491 | ||
|
|
c9b5bd625f | ||
|
|
558072a330 | ||
|
|
26fa7eeabb | ||
|
|
c50cef568e | ||
|
|
2db80399a6 | ||
|
|
4936c31c18 | ||
|
|
ada88d719f | ||
|
|
1b28623fe3 | ||
|
|
593f568ea7 | ||
|
|
7b4dba35b5 | ||
|
|
c95e700025 | ||
|
|
e10f7dd7a7 | ||
|
|
84dc148cff | ||
|
|
14c9609efe | ||
|
|
2a3620ea21 | ||
|
|
8c5d4869f9 | ||
|
|
c0aa665347 | ||
|
|
6900368251 | ||
|
|
ac1bdf2f9c | ||
|
|
c840724c9c | ||
|
|
220606a046 | ||
|
|
223269cc2e | ||
|
|
31b96fdbb9 | ||
|
|
908a500e7e | ||
|
|
ae20a2eec8 | ||
|
|
287c5f39c1 | ||
|
|
cfd2489228 | ||
|
|
86a83021a6 | ||
|
|
d7595f5ca1 | ||
|
|
5a2bb66d5b | ||
|
|
5de2ce65a4 | ||
|
|
95d167561d | ||
|
|
7d2702c3b6 | ||
|
|
d0f96b6511 | ||
|
|
ba71e61d87 | ||
|
|
191d72554c | ||
|
|
628251c75b | ||
|
|
71499c3d7c | ||
|
|
03b8bf4671 | ||
|
|
773735bf6e | ||
|
|
b62e291749 | ||
|
|
a66b5ea0e3 | ||
|
|
615650f822 | ||
|
|
ed16199940 | ||
|
|
7005bd296e | ||
|
|
cdeca34791 | ||
|
|
aefe778b36 | ||
|
|
c6e1dc87dc | ||
|
|
ef37158e57 | ||
|
|
444e67100c | ||
|
|
82d054fd05 | ||
|
|
f82c024f8d | ||
|
|
da4daa6a8a | ||
|
|
6e1e8959c9 | ||
|
|
aedc5bedb4 | ||
|
|
93f5061c8f | ||
|
|
d46e171bd6 | ||
|
|
e7fe520660 | ||
|
|
91f288e8f4 | ||
|
|
d7bd3bb94b | ||
|
|
9e0b0ac01c | ||
|
|
03a8d906ea | ||
|
|
fff28cf6ae | ||
|
|
9ee95b8d5e | ||
|
|
11bf5a9709 | ||
|
|
af4b3af14e | ||
|
|
9bb7fbbc9e | ||
|
|
beb7c57a6b | ||
|
|
ce48730bd5 | ||
|
|
806b65db24 | ||
|
|
cdf9a40227 | ||
|
|
0adac47968 | ||
|
|
096a89eab4 | ||
|
|
f877d620af | ||
|
|
c175e46b15 | ||
|
|
f0bc669d40 | ||
|
|
db3db48e5c | ||
|
|
cec585f8e0 | ||
|
|
d71a48d8d4 | ||
|
|
9e4a560911 | ||
|
|
f244255386 | ||
|
|
254e2c25ee | ||
|
|
7455cf17c8 | ||
|
|
d93cb50896 | ||
|
|
3316cab775 | ||
|
|
c01f00f6c3 | ||
|
|
06ff25550e | ||
|
|
1f7ef44556 | ||
|
|
fabf2b4df6 | ||
|
|
0fbaeb861e | ||
|
|
3dcc04a318 | ||
|
|
933e053df3 | ||
|
|
5f22a583e8 | ||
|
|
3174b49d94 | ||
|
|
93ce311359 | ||
|
|
bc43c5e329 | ||
|
|
9bf7aa20fb | ||
|
|
5416bb15c3 | ||
|
|
562a659195 | ||
|
|
1d3d6e2741 | ||
|
|
c9724527b5 | ||
|
|
2891209b4e | ||
|
|
5b87e19d3e | ||
|
|
674e24fc41 | ||
|
|
91f82fd6d3 | ||
|
|
cf43513d52 | ||
|
|
a7288a94cc | ||
|
|
d0918c92e4 | ||
|
|
4ff2061568 | ||
|
|
08c402149b | ||
|
|
184dbf0684 | ||
|
|
ed0050ba05 | ||
|
|
68030a1024 | ||
|
|
983ad1fcf4 | ||
|
|
d959ac0401 | ||
|
|
2a550db02a | ||
|
|
6369fa5fda | ||
|
|
d5a13a4206 | ||
|
|
b2532ce03a | ||
|
|
79a67d8c29 | ||
|
|
d9bd38674c | ||
|
|
a0154aaaae | ||
|
|
17f74cf296 | ||
|
|
3f112cd578 | ||
|
|
f6439049d8 | ||
|
|
2fe818872c | ||
|
|
a419969b85 | ||
|
|
ee52448f17 | ||
|
|
79103990fa | ||
|
|
22dbafbc00 | ||
|
|
0df283778c | ||
|
|
a6282b5449 | ||
|
|
5574280ad6 | ||
|
|
19b907b742 | ||
|
|
a9ff8f37b0 | ||
|
|
0769111f8c | ||
|
|
cf6ae8b5ae | ||
|
|
1d6846ced3 | ||
|
|
d516d80093 | ||
|
|
bf9ab71fd9 | ||
|
|
33b00ad323 | ||
|
|
301ff084f1 | ||
|
|
0c146bb245 | ||
|
|
08cc4a1acb | ||
|
|
f97a1653d9 | ||
|
|
d9dbab301a | ||
|
|
3d93197101 |
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Attach logs**
|
||||
`%LOCALAPPDATA%\ComicTagger\logs` on windows
|
||||
`~/Library/Logs/ComicTagger` on macOS
|
||||
`~/.cache/ComicTagger/log` on Linux
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. Fedora]
|
||||
- Version [e.g. 1.6.0b2]
|
||||
- Where did you install ComicTagger from? [e.g. releases page]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: feature-request
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
98
.github/workflows/build.yaml
vendored
Normal file
98
.github/workflows/build.yaml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: CI
|
||||
|
||||
env:
|
||||
LC_COLLATE: en_US.UTF-8
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags-ignore:
|
||||
- '**'
|
||||
jobs:
|
||||
lint:
|
||||
permissions:
|
||||
checks: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install flake8
|
||||
|
||||
- uses: reviewdog/action-setup@v1
|
||||
with:
|
||||
reviewdog_version: nightly
|
||||
- run: flake8 | reviewdog -f=flake8 -reporter=github-pr-review -tee -level=error -fail-on-error
|
||||
env:
|
||||
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
build-and-test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9, 3.13]
|
||||
os: [ubuntu-22.04, macos-13, macos-14, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install tox
|
||||
run: |
|
||||
python -m pip install --upgrade --upgrade-strategy eager tox
|
||||
|
||||
- name: Install macos dependencies
|
||||
run: |
|
||||
brew upgrade icu4c pkg-config || brew install icu4c pkg-config
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt6gui6 libfuse2 desktop-file-utils
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build and install PyPi packages
|
||||
run: |
|
||||
export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig:/opt/homebrew/opt/icu4c/lib/pkgconfig${PKG_CONFIG_PATH+:$PKG_CONFIG_PATH}";
|
||||
export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin${PATH+:$PATH}"
|
||||
python -m tox r -m build
|
||||
shell: bash
|
||||
|
||||
- name: Archive production artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "${{ format('ComicTagger-{0}', matrix.os) }}"
|
||||
path: |
|
||||
dist/*.whl
|
||||
dist/binary/*.zip
|
||||
dist/binary/*.tar.gz
|
||||
dist/binary/*.dmg
|
||||
dist/binary/*.AppImage
|
||||
if: matrix.python == 3.12
|
||||
|
||||
- name: PyTest
|
||||
run: |
|
||||
python -m tox p -e py${{ matrix.python-version }}-none,py${{ matrix.python-version }}-gui,py${{ matrix.python-version }}-7z,py${{ matrix.python-version }}-cbr,py${{ matrix.python-version }}-all
|
||||
shell: bash
|
||||
43
.github/workflows/contributions.yaml
vendored
Normal file
43
.github/workflows/contributions.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Contributions
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
contrib-readme-job:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CI_COMMIT_AUTHOR: github-actions[bot]
|
||||
CI_COMMIT_EMAIL: <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
CI_COMMIT_MESSAGE: Update AUTHORS
|
||||
name: A job to automate contrib in readme
|
||||
steps:
|
||||
- name: Contribute List
|
||||
uses: akhilmhdh/contributors-readme-action@v2.3.6
|
||||
with:
|
||||
use_username: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Update AUTHORS
|
||||
run: |
|
||||
git config --global log.mailmap true
|
||||
git log --reverse '--format=%aN <%aE>' | cat -n | sort -uk2 | sort -n | cut -f2- >AUTHORS
|
||||
|
||||
- name: Commit and push AUTHORS
|
||||
run: |
|
||||
if ! git diff --exit-code; then
|
||||
git pull
|
||||
git config --global user.name "${{ env.CI_COMMIT_AUTHOR }}"
|
||||
git config --global user.email "${{ env.CI_COMMIT_EMAIL }}"
|
||||
git commit -a -m "${{ env.CI_COMMIT_MESSAGE }}"
|
||||
git push
|
||||
fi
|
||||
76
.github/workflows/package.yaml
vendored
Normal file
76
.github/workflows/package.yaml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Package
|
||||
|
||||
env:
|
||||
LC_COLLATE: en_US.UTF-8
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
package:
|
||||
permissions:
|
||||
# IMPORTANT: this permission is mandatory for trusted publishing
|
||||
id-token: write
|
||||
contents: write
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.13]
|
||||
os: [ubuntu-22.04, ubuntu-22.04-arm, macos-13, macos-14, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install tox
|
||||
run: |
|
||||
python -m pip install --upgrade --upgrade-strategy eager tox
|
||||
|
||||
- name: Install macos dependencies
|
||||
run: |
|
||||
brew upgrade icu4c pkg-config || brew install icu4c pkg-config
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt6gui6 libfuse2 desktop-file-utils
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build, Install and Test PyPi packages
|
||||
run: |
|
||||
export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig:/opt/homebrew/opt/icu4c/lib/pkgconfig${PKG_CONFIG_PATH+:$PKG_CONFIG_PATH}";
|
||||
export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin${PATH+:$PATH}"
|
||||
python -m tox p
|
||||
|
||||
- name: Release PyPi package
|
||||
run: |
|
||||
python -m tox r -e pypi-upload
|
||||
shell: bash
|
||||
if: matrix.os == 'ubuntu-22.04'
|
||||
|
||||
- name: Get release name
|
||||
shell: bash
|
||||
run: |
|
||||
git fetch --depth=1 origin +refs/tags/*:refs/tags/* # github is dumb
|
||||
echo "release_name=$(git tag -l --format "%(refname:strip=2): %(contents:lines=1)" ${{ github.ref_name }})" >> $GITHUB_ENV
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: "${{ env.release_name }}"
|
||||
prerelease: "${{ contains(github.ref, '-') }}" # alpha-releases should be 1.3.0-alpha.x full releases should be 1.3.0
|
||||
draft: false
|
||||
# upload the single application zip file for each OS and include the wheel built on linux
|
||||
files: |
|
||||
dist/binary/*.zip
|
||||
dist/binary/*.tar.gz
|
||||
dist/binary/*.dmg
|
||||
dist/binary/*.AppImage
|
||||
dist/*${{ fromJSON('["never", ""]')[matrix.os == 'ubuntu-22.04'] }}.whl
|
||||
160
.gitignore
vendored
Normal file
160
.gitignore
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
# generated by setuptools_scm
|
||||
ctversion.py
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
|
||||
### Other editors
|
||||
.*.swp
|
||||
nbproject/
|
||||
.vscode
|
||||
|
||||
comictaggerlib/_version.py
|
||||
*.exe
|
||||
*.zip
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# for testing
|
||||
temp/
|
||||
9
.mailmap
Normal file
9
.mailmap
Normal file
@@ -0,0 +1,9 @@
|
||||
Andrew W. Buchanan <buchanan@difference.com>
|
||||
Davide Romanini <d.romanini@cineca.it> <davide.romanini@gmail.com>
|
||||
Davide Romanini <d.romanini@cineca.it> <user159033@92-63-141-211.rdns.melbourne.co.uk>
|
||||
Michael Fitzurka <MichaelFitzurka@users.noreply.github.com> <MichaelFitzurka@github.com>
|
||||
Timmy Welch <timmy@narnian.us>
|
||||
beville <beville@users.noreply.github.com> <(no author)@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@gmail.com@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@users.noreply.github.com>
|
||||
46
.pre-commit-config.yaml
Normal file
46
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
exclude: ^(scripts|comictaggerlib/graphics/resources.py)
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: name-tests-test
|
||||
- id: requirements-txt-fixer
|
||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||
rev: v2.8.0
|
||||
hooks:
|
||||
- id: setup-cfg-fmt
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.19.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py39-plus]
|
||||
- repo: https://github.com/PyCQA/autoflake
|
||||
rev: v2.3.1
|
||||
hooks:
|
||||
- id: autoflake
|
||||
args: [-i, --remove-all-unused-imports, --ignore-init-module-imports]
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 6.0.1
|
||||
hooks:
|
||||
- id: isort
|
||||
args: [--af,--add-import, 'from __future__ import annotations']
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 25.1.0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.2.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-encodings, flake8-builtins, flake8-print, flake8-no-nested-comprehensions]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.15.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies: [types-setuptools, types-requests, settngs>=0.10.4, pillow>=9.1.0]
|
||||
ci:
|
||||
skip: [mypy]
|
||||
23
AUTHORS
Normal file
23
AUTHORS
Normal file
@@ -0,0 +1,23 @@
|
||||
beville <beville@users.noreply.github.com>
|
||||
Davide Romanini <d.romanini@cineca.it>
|
||||
fcanc <f.canc@icloud.com>
|
||||
Alban Seurat <alkpone@alkpone.com>
|
||||
tlc <tlc@users.noreply.github.com>
|
||||
Marek Pawlak <francuz14@gmail.com>
|
||||
Timmy Welch <timmy@narnian.us>
|
||||
J.P. Cranford <philipcranford4@gmail.com>
|
||||
thFrgttn <39759781+thFrgttn@users.noreply.github.com>
|
||||
Andrew W. Buchanan <buchanan@difference.com>
|
||||
Michael Fitzurka <MichaelFitzurka@users.noreply.github.com>
|
||||
Richard Haussmann <richard.haussmann@gmail.com>
|
||||
Mizaki <jinxybob@hotmail.com>
|
||||
Xavier Jouvenot <x.jouvenot@gmail.com>
|
||||
github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
Ben Longman <deck@steamdeck.lan>
|
||||
Sven Hesse <drmccoy@drmccoy.de>
|
||||
pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
|
||||
kcgthb <kcgthb@users.noreply.github.com>
|
||||
Kilian Cavalotti <kcgthb@users.noreply.github.com>
|
||||
David Bugl <david.bugl@gmx.at>
|
||||
HSN <64664577+N-Hertstein@users.noreply.github.com>
|
||||
Emmanuel Ferdman <emmanuelferdman@gmail.com>
|
||||
98
CONTRIBUTING.md
Normal file
98
CONTRIBUTING.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# How to contribute
|
||||
|
||||
If your not sure what you can do or you need to ask a question or just want to talk about ComicTagger head over to the [discussions tab](https://github.com/comictagger/comictagger/discussions/categories/general) and start a discussion
|
||||
|
||||
## Tests
|
||||
|
||||
We have tests written using pytest! Some of them even pass! If you are contributing code any tests you can write are appreciated.
|
||||
|
||||
A great place to start is extending the tests that are already made.
|
||||
|
||||
For example the file tests/filenames.py has lists of filenames to be parsed in the format:
|
||||
```py
|
||||
pytest.param(
|
||||
"Star Wars - War of the Bounty Hunters - IG-88 (2021) (Digital) (Kileko-Empire).cbz",
|
||||
"number ends series, no-issue",
|
||||
{
|
||||
"issue": "",
|
||||
"series": "Star Wars - War of the Bounty Hunters - IG-88",
|
||||
"volume": "",
|
||||
"year": "2021",
|
||||
"remainder": "(Digital) (Kileko-Empire)",
|
||||
"issue_count": "",
|
||||
},
|
||||
marks=pytest.mark.xfail,
|
||||
)
|
||||
```
|
||||
|
||||
A test consists of 3-4 parts
|
||||
1. The filename to be parsed
|
||||
2. The reason it might fail
|
||||
3. What the result of parsing the filename should be
|
||||
4. `marks=pytest.mark.xfail` This marks the test as expected to fail
|
||||
|
||||
If you are not comfortable creating a pull request you can [open an issue](https://github.com/comictagger/comictagger/issues/new/choose) or [start a discussion](https://github.com/comictagger/comictagger/discussions/new)
|
||||
|
||||
## Submitting changes
|
||||
|
||||
Please open a [GitHub Pull Request](https://github.com/comictagger/comictagger/pull/new/develop) with a clear list of what you've done (read more about [pull requests](http://help.github.com/pull-requests/)). When you send a pull request, we will love you forever if you include tests. We can always use more test coverage. Please run the code tools below and make sure all of your commits are atomic (one feature per commit).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Currently only python 3.9 is supported however 3.10 will probably work if you try it
|
||||
|
||||
Those on linux should install `Pillow` from the system package manager if possible and if the GUI `PyQt6` should be installed from the system package manager
|
||||
|
||||
Those on macOS will need to ensure that you are using python3 in x86 mode either by installing an x86 only version of python or using the universal installer and using `python3-intel64` instead of `python3`
|
||||
|
||||
1. Clone the repository
|
||||
```
|
||||
git clone https://github.com/comictagger/comictagger.git
|
||||
```
|
||||
|
||||
2. It is preferred to use a virtual env for running from source:
|
||||
|
||||
```
|
||||
python3 -m venv venv
|
||||
```
|
||||
|
||||
3. Activate the virtual env:
|
||||
```
|
||||
. venv/bin/activate
|
||||
```
|
||||
or if on windows PowerShell
|
||||
```
|
||||
. venv/bin/activate.ps1
|
||||
```
|
||||
|
||||
4. Install tox:
|
||||
```bash
|
||||
pip install tox
|
||||
```
|
||||
|
||||
5. If you are on an M1 Mac you will need to export two environment variables for tests to pass.
|
||||
```
|
||||
export tox_python=python3.9-intel64
|
||||
export tox_env=m1env
|
||||
```
|
||||
|
||||
6. install ComicTagger
|
||||
```
|
||||
tox run -e venv
|
||||
```
|
||||
|
||||
7. Make your changes
|
||||
8. Build to ensure that your changes work: this will produce a binary build in the dist folder
|
||||
```bash
|
||||
tox run -m build
|
||||
```
|
||||
|
||||
The build runs these formatters and linters automatically
|
||||
|
||||
setup-cfg-fmt: Formats the setup.cfg file
|
||||
autoflake: Removes unused imports
|
||||
isort: sorts imports so that you can always find where an import is located<br>
|
||||
black: formats all of the code consistently so there are no surprises<br>
|
||||
flake8: checks for code quality and style (warns for unused imports and similar issues)<br>
|
||||
mypy: checks the types of variables and functions to catch errors
|
||||
pytest: runs tests for ComicTagger functionality
|
||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,4 +0,0 @@
|
||||
include README.txt
|
||||
include release_notes.txt
|
||||
include requirements.txt
|
||||
recursive-include scripts *.py *.txt
|
||||
61
Makefile
61
Makefile
@@ -1,61 +0,0 @@
|
||||
TAGGER_BASE ?= $(HOME)/Dropbox/tagger/comictagger
|
||||
TAGGER_SRC := $(TAGGER_BASE)/comictaggerlib
|
||||
VERSION_STR := $(shell grep version $(TAGGER_SRC)/ctversion.py| cut -d= -f2 | sed 's/\"//g')
|
||||
PASSWORD := $(shell cat $(TAGGER_BASE)/project_password.txt)
|
||||
UPLOAD_TOOL := $(TAGGER_BASE)/google/googlecode_upload.py
|
||||
all: clean
|
||||
|
||||
clean:
|
||||
rm -rf *~ *.pyc *.pyo
|
||||
rm -rf scripts/*.pyc
|
||||
cd comictaggerlib; rm -f *~ *.pyc *.pyo
|
||||
rm -rf dist MANIFEST
|
||||
rm -rf *.deb
|
||||
rm -rf logdict*.log
|
||||
make -C mac clean
|
||||
make -C windows clean
|
||||
rm -rf build
|
||||
|
||||
pydist:
|
||||
mkdir -p release
|
||||
rm -f release/*.zip
|
||||
python setup.py sdist --formats=zip #,gztar
|
||||
mv dist/comictagger-$(VERSION_STR).zip release
|
||||
@echo When satisfied with release, do this:
|
||||
@echo make svn_tag
|
||||
|
||||
remove_test_install:
|
||||
sudo rm -rf /usr/local/bin/comictagger.py
|
||||
sudo rm -rf /usr/local/lib/python2.7/dist-packages/comictagger*
|
||||
|
||||
#deb:
|
||||
# fpm -s python -t deb \
|
||||
# -n 'comictagger' \
|
||||
# --category 'utilities' \
|
||||
# --maintainer 'comictagger@gmail.com' \
|
||||
# --after-install debian_scripts/after_install.sh \
|
||||
# --before-remove debian_scripts/before_remove.sh \
|
||||
# -d 'python >= 2.6' \
|
||||
# -d 'python < 2.8' \
|
||||
# -d 'python-imaging' \
|
||||
# -d 'python-bs4' \
|
||||
# --deb-suggests 'rar' \
|
||||
# --deb-suggests 'unrar-free' \
|
||||
# --python-install-bin /usr/share/comictagger \
|
||||
# --python-install-lib /usr/share/comictagger \
|
||||
# setup.py
|
||||
#
|
||||
# # For now, don't require PyQt, since command-line is available without it
|
||||
# #-d 'python-qt4 >= 4.8'
|
||||
|
||||
upload:
|
||||
#$(UPLOAD_TOOL) -p comictagger -s "ComicTagger $(VERSION_STR) Source" -l Featured,Type-Source -u beville -w $(PASSWORD) "release/comictagger-$(VERSION_STR).zip"
|
||||
#$(UPLOAD_TOOL) -p comictagger -s "ComicTagger $(VERSION_STR) Mac OS X" -l Featured,Type-Archive -u beville -w $(PASSWORD) "release/ComicTagger-$(VERSION_STR).dmg"
|
||||
#$(UPLOAD_TOOL) -p comictagger -s "ComicTagger $(VERSION_STR) Windows" -l Featured,Type-Installer -u beville -w $(PASSWORD) "release/ComicTagger v$(VERSION_STR).exe"
|
||||
#python setup.py register
|
||||
python setup.py sdist --formats=zip upload
|
||||
|
||||
svn_tag:
|
||||
svn copy https://comictagger.googlecode.com/svn/trunk \
|
||||
https://comictagger.googlecode.com/svn/tags/$(VERSION_STR) -m "Release $(VERSION_STR)"
|
||||
|
||||
221
README.md
Normal file
221
README.md
Normal file
@@ -0,0 +1,221 @@
|
||||
[](https://github.com/comictagger/comictagger/actions/workflows/build.yaml)
|
||||
[](https://github.com/comictagger/comictagger/releases/latest)
|
||||
[](https://pypi.org/project/comictagger/)
|
||||
[](https://pypistats.org/packages/comictagger)
|
||||
[](https://community.chocolatey.org/packages/comictagger)
|
||||
[](https://github.com/microsoft/winget-pkgs/tree/master/manifests/c/ComicTagger/ComicTagger)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
[](https://github.com/comictagger/comictagger/discussions)
|
||||
[](https://gitter.im/comictagger/community)
|
||||
[](https://groups.google.com/forum/#!forum/comictagger)
|
||||
[](https://twitter.com/comictagger)
|
||||
[](https://www.facebook.com/ComicTagger-139615369550787/)
|
||||
|
||||
# ComicTagger
|
||||
|
||||
ComicTagger is a **multi-platform** app for **writing metadata to digital comics**, written in Python and PyQt.
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
* Runs on macOS, Microsoft Windows, and Linux systems
|
||||
* Get comic information from [Comic Vine](https://comicvine.gamespot.com/)
|
||||
* **Automatic issue matching** using advanced image processing techniques
|
||||
* **Batch processing** in the GUI for tagging hundreds or more comics at a time
|
||||
* Support for **ComicRack** and **ComicBookLover** tagging formats
|
||||
* Native full support for **CBZ** digital comics
|
||||
* Native read only support for **CBR** digital comics: full support enabled installing additional [rar tools](https://www.rarlab.com/download.htm)
|
||||
* Command line interface (CLI) enabling **custom scripting** and **batch operations on large collections**
|
||||
|
||||
For details, screen-shots, and more, visit [the Wiki](https://github.com/comictagger/comictagger/wiki)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Binaries
|
||||
|
||||
Windows, Linux and MacOS binaries are provided in the [Releases Page](https://github.com/comictagger/comictagger/releases).
|
||||
|
||||
Just unzip the archive in any folder and run, no additional installation steps are required.
|
||||
|
||||
### PIP installation
|
||||
|
||||
A pip package is provided, you can install it with:
|
||||
|
||||
```
|
||||
$ pip3 install comictagger[GUI]
|
||||
```
|
||||
|
||||
There are optional dependencies. You can install the optional dependencies by specifying one or more of them in braces e.g. `comictagger[CBR,GUI]`
|
||||
|
||||
Optional dependencies:
|
||||
1. `ICU`: Ensures that comic pages are supported correctly. This should always be installed. *Currently only exists in the latest alpha release *
|
||||
1. `CBR`: Provides support for CBR/RAR files.
|
||||
1. `GUI`: Installs the GUI.
|
||||
1. `7Z`: Provides support for CB7/7Z files.
|
||||
1. `all`: Installs all of the above optional dependencies.
|
||||
|
||||
### Chocolatey installation (Windows only)
|
||||
|
||||
A [Chocolatey package](https://community.chocolatey.org/packages/comictagger), maintained by @Xav83, is provided, you can install it with:
|
||||
```powershell
|
||||
choco install comictagger
|
||||
```
|
||||
### WinGet installation (Windows only)
|
||||
|
||||
A [WinGet package](https://github.com/microsoft/winget-pkgs/tree/master/manifests/c/ComicTagger/ComicTagger), maintained by @Sn1cket, is provided, you can install it with:
|
||||
```powershell
|
||||
winget install ComicTagger.ComicTagger
|
||||
```
|
||||
### From source
|
||||
|
||||
1. Ensure you have python 3.9 installed
|
||||
2. Clone this repository `git clone https://github.com/comictagger/comictagger.git`
|
||||
7. `pip3 install .[ICU]` or `pip3 install .[GUI,ICU]`
|
||||
|
||||
|
||||
## Contributors
|
||||
|
||||
<!-- readme: beville,davide-romanini,collaborators,contributors -start -->
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/beville">
|
||||
<img src="https://avatars.githubusercontent.com/u/7294848?v=4" width="100;" alt="beville"/>
|
||||
<br />
|
||||
<sub><b>beville</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/davide-romanini">
|
||||
<img src="https://avatars.githubusercontent.com/u/731199?v=4" width="100;" alt="davide-romanini"/>
|
||||
<br />
|
||||
<sub><b>davide-romanini</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/fcanc">
|
||||
<img src="https://avatars.githubusercontent.com/u/4999486?v=4" width="100;" alt="fcanc"/>
|
||||
<br />
|
||||
<sub><b>fcanc</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/lordwelch">
|
||||
<img src="https://avatars.githubusercontent.com/u/7547075?v=4" width="100;" alt="lordwelch"/>
|
||||
<br />
|
||||
<sub><b>lordwelch</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/mizaki">
|
||||
<img src="https://avatars.githubusercontent.com/u/1141189?v=4" width="100;" alt="mizaki"/>
|
||||
<br />
|
||||
<sub><b>mizaki</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/MichaelFitzurka">
|
||||
<img src="https://avatars.githubusercontent.com/u/27830765?v=4" width="100;" alt="MichaelFitzurka"/>
|
||||
<br />
|
||||
<sub><b>MichaelFitzurka</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/abuchanan920">
|
||||
<img src="https://avatars.githubusercontent.com/u/368793?v=4" width="100;" alt="abuchanan920"/>
|
||||
<br />
|
||||
<sub><b>abuchanan920</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/N-Hertstein">
|
||||
<img src="https://avatars.githubusercontent.com/u/64664577?v=4" width="100;" alt="N-Hertstein"/>
|
||||
<br />
|
||||
<sub><b>N-Hertstein</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/kcgthb">
|
||||
<img src="https://avatars.githubusercontent.com/u/186807?v=4" width="100;" alt="kcgthb"/>
|
||||
<br />
|
||||
<sub><b>kcgthb</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/rhaussmann">
|
||||
<img src="https://avatars.githubusercontent.com/u/7084007?v=4" width="100;" alt="rhaussmann"/>
|
||||
<br />
|
||||
<sub><b>rhaussmann</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/AlbanSeurat">
|
||||
<img src="https://avatars.githubusercontent.com/u/500180?v=4" width="100;" alt="AlbanSeurat"/>
|
||||
<br />
|
||||
<sub><b>AlbanSeurat</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Sn1cket">
|
||||
<img src="https://avatars.githubusercontent.com/u/32904645?v=4" width="100;" alt="Sn1cket"/>
|
||||
<br />
|
||||
<sub><b>Sn1cket</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/emmanuel-ferdman">
|
||||
<img src="https://avatars.githubusercontent.com/u/35470921?v=4" width="100;" alt="emmanuel-ferdman"/>
|
||||
<br />
|
||||
<sub><b>emmanuel-ferdman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/jpcranford">
|
||||
<img src="https://avatars.githubusercontent.com/u/21347202?v=4" width="100;" alt="jpcranford"/>
|
||||
<br />
|
||||
<sub><b>jpcranford</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/PawlakMarek">
|
||||
<img src="https://avatars.githubusercontent.com/u/26022173?v=4" width="100;" alt="PawlakMarek"/>
|
||||
<br />
|
||||
<sub><b>PawlakMarek</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/DrMcCoy">
|
||||
<img src="https://avatars.githubusercontent.com/u/156130?v=4" width="100;" alt="DrMcCoy"/>
|
||||
<br />
|
||||
<sub><b>DrMcCoy</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Xav83">
|
||||
<img src="https://avatars.githubusercontent.com/u/6787157?v=4" width="100;" alt="Xav83"/>
|
||||
<br />
|
||||
<sub><b>Xav83</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/thFrgttn">
|
||||
<img src="https://avatars.githubusercontent.com/u/39759781?v=4" width="100;" alt="thFrgttn"/>
|
||||
<br />
|
||||
<sub><b>thFrgttn</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/tlc">
|
||||
<img src="https://avatars.githubusercontent.com/u/19436?v=4" width="100;" alt="tlc"/>
|
||||
<br />
|
||||
<sub><b>tlc</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
</table>
|
||||
<!-- readme: beville,davide-romanini,collaborators,contributors -end -->
|
||||
31
README.txt
31
README.txt
@@ -1,31 +0,0 @@
|
||||
ComicTagger is a multi-platform app for writing metadata to digital comics, written in Python and PyQt.
|
||||
|
||||
Features:
|
||||
|
||||
* Runs on Mac OSX, Microsoft Windows, and Linux systems
|
||||
* Communicates with an online database (Comic Vine) for acquiring metadata
|
||||
* Uses image processing to automatically match a given archive with the correct issue data
|
||||
* Batch processing in the GUI for tagging hundreds or more comics at a time
|
||||
* Reads and writes multiple tagging schemes ( ComicBookLover and ComicRack, with more planned).
|
||||
* Reads and writes RAR and Zip archives (external tools needed for writing RAR)
|
||||
* Command line interface (CLI) on all platforms (including Windows), which supports batch operations, and which can be used in native scripts for complex operations. For example, to recusrively scrape and tag all archives in a folder
|
||||
comictagger.py -R -s -o -f -t cr -v -i --nooverwrite /path/to/comics/
|
||||
|
||||
For details, screenshots, release notes, and more, visit http://code.google.com/p/comictagger/
|
||||
|
||||
Requires:
|
||||
|
||||
* python 2.6 or 2.7
|
||||
* configparser
|
||||
* python imaging (PIL) >= 1.1.6
|
||||
* beautifulsoup > 4.1
|
||||
|
||||
Optional requirement (for GUI):
|
||||
|
||||
* pyqt4
|
||||
|
||||
Install and run:
|
||||
|
||||
* ComicTagger can be run directly from this directory, using the launcher script "comictagger.py"
|
||||
|
||||
* To install on your system use: "python setup.py install". Take note in the output where comictagger.py goes!
|
||||
11
build-tools/ComicTagger.desktop
Normal file
11
build-tools/ComicTagger.desktop
Normal file
@@ -0,0 +1,11 @@
|
||||
[Desktop Entry]
|
||||
Encoding=UTF-8
|
||||
Name=ComicTagger
|
||||
GenericName=Comic Metadata Editor
|
||||
Comment=A cross-platform GUI/CLI app for writing metadata to comic archives
|
||||
Exec=comictagger %F
|
||||
Icon=/usr/local/share/comictagger/app.png
|
||||
Terminal=false
|
||||
Type=Application
|
||||
MimeType=text/plain;
|
||||
Categories=Application;
|
||||
241
build-tools/comictagger.spec
Normal file
241
build-tools/comictagger.spec
Normal file
@@ -0,0 +1,241 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
import platform
|
||||
|
||||
from comictaggerlib import ctversion
|
||||
|
||||
enable_console = False
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(
|
||||
["../comictaggerlib/__main__.py"],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
exe_binaries = []
|
||||
exe_zipfiles = []
|
||||
exe_datas = []
|
||||
exe_exclude_binaries = True
|
||||
|
||||
coll_binaries = a.binaries
|
||||
coll_zipfiles = a.zipfiles
|
||||
coll_datas = a.datas
|
||||
|
||||
if platform.system() in ["Windows"]:
|
||||
enable_console = True
|
||||
exe_binaries = a.binaries
|
||||
exe_zipfiles = a.zipfiles
|
||||
exe_datas = a.datas
|
||||
exe_exclude_binaries = False
|
||||
|
||||
coll_binaries = []
|
||||
coll_zipfiles = []
|
||||
coll_datas = []
|
||||
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
exe_binaries,
|
||||
exe_zipfiles,
|
||||
exe_datas,
|
||||
[],
|
||||
exclude_binaries=exe_exclude_binaries,
|
||||
name="comictagger",
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=enable_console,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
icon="windows/app.ico",
|
||||
)
|
||||
if platform.system() not in ["Windows"]:
|
||||
coll = COLLECT(
|
||||
exe,
|
||||
coll_binaries,
|
||||
coll_zipfiles,
|
||||
coll_datas,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name="comictagger",
|
||||
)
|
||||
app = BUNDLE(
|
||||
coll,
|
||||
name="ComicTagger.app",
|
||||
icon="mac/app.icns",
|
||||
info_plist={
|
||||
"NSHighResolutionCapable": "True",
|
||||
"NSPrincipalClass": "NSApplication",
|
||||
"NSRequiresAquaSystemAppearance": "False",
|
||||
"CFBundleDisplayName": "ComicTagger",
|
||||
"CFBundleShortVersionString": ctversion.version,
|
||||
"CFBundleVersion": ctversion.version,
|
||||
"CFBundleDocumentTypes": [
|
||||
{
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
"LSItemContentTypes": [
|
||||
"public.folder",
|
||||
],
|
||||
"CFBundleTypeName": "Folder",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cbz",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "ZIP Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"public.zip-comic-archive",
|
||||
"com.simplecomic.cbz-archive",
|
||||
"com.macitbetter.cbz-archive",
|
||||
"public.cbz-archive",
|
||||
"cx.c3.cbz-archive",
|
||||
"com.yacreader.yacreader.cbz",
|
||||
"com.milke.cbz-archive",
|
||||
"com.bitcartel.comicbooklover.cbz",
|
||||
"public.archive.cbz",
|
||||
"public.zip-archive",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cb7",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "7-Zip Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"org.7-zip.7-zip-archive",
|
||||
"com.simplecomic.cb7-archive",
|
||||
"public.cb7-archive",
|
||||
"com.macitbetter.cb7-archive",
|
||||
"cx.c3.cb7-archive",
|
||||
"org.7-zip.7-zip-comic-archive",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cbr",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "RAR Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"com.rarlab.rar-archive",
|
||||
"com.rarlab.rar-comic-archive",
|
||||
"com.simplecomic.cbr-archive",
|
||||
"com.macitbetter.cbr-archive",
|
||||
"public.cbr-archive",
|
||||
"cx.c3.cbr-archive",
|
||||
"com.bitcartel.comicbooklover.cbr",
|
||||
"com.milke.cbr-archive",
|
||||
"public.archive.cbr",
|
||||
"com.yacreader.yacreader.cbr",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
],
|
||||
"UTImportedTypeDeclarations": [
|
||||
{
|
||||
"UTTypeIdentifier": "com.rarlab.rar-archive",
|
||||
"UTTypeDescription": "RAR Archive",
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
],
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/x-rar",
|
||||
"application/x-rar-compressed",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"rar",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"com.rarlab.rar-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "com.rarlab.rar-comic-archive",
|
||||
"UTTypeDescription": "RAR Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/vnd.comicbook-rar",
|
||||
"application/x-cbr",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"cbr",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"public.zip-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "public.zip-comic-archive",
|
||||
"UTTypeDescription": "ZIP Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.filename-extension": [
|
||||
"cbz",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"org.7-zip.7-zip-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "org.7-zip.7-zip-comic-archive",
|
||||
"UTTypeDescription": "7-Zip Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/vnd.comicbook+7-zip",
|
||||
"application/x-cb7-compressed",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"cb7",
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
bundle_identifier="com.comictagger",
|
||||
)
|
||||
19
build-tools/dmgbuild.conf
Normal file
19
build-tools/dmgbuild.conf
Normal file
@@ -0,0 +1,19 @@
|
||||
import pathlib
|
||||
|
||||
app = "ComicTagger"
|
||||
app_name = f"{app}.app"
|
||||
path = f"dist/{app_name}"
|
||||
|
||||
|
||||
# dmgbuild settings
|
||||
format = 'ULMO'
|
||||
files = (str(path),)
|
||||
|
||||
symlinks = {'Applications': '/Applications'}
|
||||
|
||||
icon = pathlib.Path().cwd() / 'build-tools' / 'mac' / 'volume.icns'
|
||||
|
||||
icon_locations = {
|
||||
app_name: (100, 100),
|
||||
'Applications': (300, 100)
|
||||
}
|
||||
26
build-tools/generate_settngs.py
Normal file
26
build-tools/generate_settngs.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
import settngs
|
||||
|
||||
import comictaggerlib.main
|
||||
|
||||
|
||||
def generate() -> str:
|
||||
app = comictaggerlib.main.App()
|
||||
app.load_plugins(app.initial_arg_parser.parse_known_args()[0])
|
||||
app.register_settings(True)
|
||||
imports, types = settngs.generate_dict(app.manager.definitions)
|
||||
imports2, types2 = settngs.generate_ns(app.manager.definitions)
|
||||
i = imports.splitlines()
|
||||
i.extend(set(imports2.splitlines()) - set(i))
|
||||
os.linesep
|
||||
return (os.linesep * 2).join((os.linesep.join(i), types2, types))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
src = generate()
|
||||
pathlib.Path("./comictaggerlib/ctsettings/settngs_namespace.py").write_text(src)
|
||||
print(src, end="")
|
||||
38
build-tools/get_appimage.py
Normal file
38
build-tools/get_appimage.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
|
||||
try:
|
||||
import niquests as requests
|
||||
except ImportError:
|
||||
import requests
|
||||
|
||||
arch = platform.machine()
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("APPIMAGETOOL", default=f"build/appimagetool-{arch}.AppImage", type=pathlib.Path, nargs="?")
|
||||
|
||||
opts = parser.parse_args()
|
||||
opts.APPIMAGETOOL = opts.APPIMAGETOOL.absolute()
|
||||
|
||||
|
||||
def urlretrieve(url: str, dest: pathlib.Path) -> None:
|
||||
resp = requests.get(url)
|
||||
if resp.status_code == 200:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.write_bytes(resp.content)
|
||||
|
||||
|
||||
if opts.APPIMAGETOOL.exists():
|
||||
raise SystemExit(0)
|
||||
|
||||
urlretrieve(
|
||||
f"https://github.com/AppImage/appimagetool/releases/latest/download/appimagetool-{arch}.AppImage",
|
||||
opts.APPIMAGETOOL,
|
||||
)
|
||||
os.chmod(opts.APPIMAGETOOL, 0o0700)
|
||||
|
||||
if not opts.APPIMAGETOOL.exists():
|
||||
raise SystemExit(1)
|
||||
@@ -1,35 +1,26 @@
|
||||
#PYINSTALLER_CMD := VERSIONER_PYTHON_PREFER_32_BIT=yes arch -i386 python $(HOME)/pyinstaller-2.0/pyinstaller.py
|
||||
PYINSTALLER_CMD := python $(HOME)/pyinstaller-2.0/pyinstaller.py
|
||||
TAGGER_BASE ?= $(HOME)/Dropbox/tagger/comictagger
|
||||
PYINSTALLER_CMD := pyinstaller
|
||||
TAGGER_BASE ?= ../
|
||||
TAGGER_SRC := $(TAGGER_BASE)/comictaggerlib
|
||||
|
||||
APP_NAME := ComicTagger
|
||||
VERSION_STR := $(shell grep version $(TAGGER_SRC)/ctversion.py| cut -d= -f2 | sed 's/\"//g')
|
||||
VERSION_STR := $(shell cd .. && python setup.py --version)
|
||||
|
||||
MAC_BASE := $(TAGGER_BASE)/mac
|
||||
DIST_DIR := $(MAC_BASE)/dist
|
||||
STAGING := $(MAC_BASE)/$(APP_NAME)
|
||||
APP_BUNDLE := $(DIST_DIR)/$(APP_NAME).app
|
||||
VOLUME_NAME := $(APP_NAME)-$(VERSION_STR)
|
||||
VOLUME_NAME := "$(APP_NAME)-$(VERSION_STR)"
|
||||
DMG_FILE := $(VOLUME_NAME).dmg
|
||||
|
||||
all: clean dist diskimage
|
||||
|
||||
dist:
|
||||
$(PYINSTALLER_CMD) $(TAGGER_BASE)/comictagger.py -o $(MAC_BASE) -w -n $(APP_NAME) -s
|
||||
$(PYINSTALLER_CMD) $(TAGGER_BASE)/comictagger.py -w -n $(APP_NAME) -s
|
||||
cp -a $(TAGGER_SRC)/ui $(APP_BUNDLE)/Contents/MacOS
|
||||
cp -a $(TAGGER_SRC)/graphics $(APP_BUNDLE)/Contents/MacOS
|
||||
cp $(MAC_BASE)/app.icns $(APP_BUNDLE)/Contents/Resources/icon-windowed.icns
|
||||
# fix the version string in the Info.plist
|
||||
sed -i -e 's/0\.0\.0/$(VERSION_STR)/' $(MAC_BASE)/dist/ComicTagger.app/Contents/Info.plist
|
||||
# strip out PPC/x64
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/accessible
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/bearer
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/codecs
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/graphicssystems
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/iconengines
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/imageformats
|
||||
|
||||
clean:
|
||||
rm -rf $(DIST_DIR) $(MAC_BASE)/build
|
||||
@@ -39,7 +30,7 @@ clean:
|
||||
rm -f raw*.dmg
|
||||
echo $(VERSION_STR)
|
||||
diskimage:
|
||||
#Set up disk image staging folder
|
||||
# Set up disk image staging folder
|
||||
rm -rf $(STAGING)
|
||||
mkdir $(STAGING)
|
||||
cp $(TAGGER_BASE)/release_notes.txt $(STAGING)
|
||||
@@ -48,28 +39,27 @@ diskimage:
|
||||
cp $(MAC_BASE)/volume.icns $(STAGING)/.VolumeIcon.icns
|
||||
SetFile -c icnC $(STAGING)/.VolumeIcon.icns
|
||||
|
||||
##generate raw disk image
|
||||
# generate raw disk image
|
||||
rm -f $(DMG_FILE)
|
||||
hdiutil create -srcfolder $(STAGING) -volname $(VOLUME_NAME) -format UDRW -ov raw-$(DMG_FILE)
|
||||
hdiutil create -srcfolder $(STAGING) -volname $(VOLUME_NAME) -format UDRW -ov raw-$(DMG_FILE)
|
||||
|
||||
#remove working files and folders
|
||||
# remove working files and folders
|
||||
rm -rf $(STAGING)
|
||||
|
||||
|
||||
# we now have a raw DMG file.
|
||||
|
||||
|
||||
# remount it so we can set the volume icon properly
|
||||
mkdir -p $(STAGING)
|
||||
hdiutil attach raw-$(DMG_FILE) -mountpoint $(STAGING)
|
||||
SetFile -a C $(STAGING)
|
||||
hdiutil detach $(STAGING)
|
||||
rm -rf $(STAGING)
|
||||
|
||||
|
||||
# convert the raw image
|
||||
rm -f $(DMG_FILE)
|
||||
hdiutil convert raw-$(DMG_FILE) -format UDZO -o $(DMG_FILE)
|
||||
rm -f raw-$(DMG_FILE)
|
||||
|
||||
#move finished product to release folder
|
||||
|
||||
# move finished product to release folder
|
||||
mkdir -p $(TAGGER_BASE)/release
|
||||
mv $(DMG_FILE) $(TAGGER_BASE)/release
|
||||
|
||||
@@ -8,12 +8,12 @@ do
|
||||
then
|
||||
echo "Fat Binary: $FILE"
|
||||
mkdir -p thin
|
||||
lipo -thin i386 -output thin/$FILE $BINFOLDER/$FILE
|
||||
lipo -thin i386 -output thin/$FILE $BINFOLDER/$FILE
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -d thin ]
|
||||
then
|
||||
then
|
||||
mv thin/* $BINFOLDER
|
||||
else
|
||||
echo No files to lipo
|
||||
267
build-tools/oidc-exchange.py
Normal file
267
build-tools/oidc-exchange.py
Normal file
@@ -0,0 +1,267 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
from typing import NoReturn
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import keyring
|
||||
import requests
|
||||
from id import IdentityError, detect_credential
|
||||
|
||||
_GITHUB_STEP_SUMMARY = Path(os.getenv("GITHUB_STEP_SUMMARY", "fail.txt"))
|
||||
|
||||
# The top-level error message that gets rendered.
|
||||
# This message wraps one of the other templates/messages defined below.
|
||||
_ERROR_SUMMARY_MESSAGE = """
|
||||
Trusted publishing exchange failure:
|
||||
|
||||
{message}
|
||||
|
||||
You're seeing this because the action wasn't given the inputs needed to
|
||||
perform password-based or token-based authentication. If you intended to
|
||||
perform one of those authentication methods instead of trusted
|
||||
publishing, then you should double-check your secret configuration and variable
|
||||
names.
|
||||
|
||||
Read more about trusted publishers at https://docs.pypi.org/trusted-publishers/
|
||||
|
||||
Read more about how this action uses trusted publishers at
|
||||
https://github.com/marketplace/actions/pypi-publish#trusted-publishing
|
||||
"""
|
||||
|
||||
# Rendered if OIDC identity token retrieval fails for any reason.
|
||||
_TOKEN_RETRIEVAL_FAILED_MESSAGE = """
|
||||
OpenID Connect token retrieval failed: {identity_error}
|
||||
|
||||
This generally indicates a workflow configuration error, such as insufficient
|
||||
permissions. Make sure that your workflow has `id-token: write` configured
|
||||
at the job level, e.g.:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
id-token: write
|
||||
```
|
||||
|
||||
Learn more at https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings.
|
||||
""" # noqa: S105; not a password
|
||||
|
||||
# Specialization of the token retrieval failure case, when we know that
|
||||
# the failure cause is use within a third-party PR.
|
||||
_TOKEN_RETRIEVAL_FAILED_FORK_PR_MESSAGE = """
|
||||
OpenID Connect token retrieval failed: {identity_error}
|
||||
|
||||
The workflow context indicates that this action was called from a
|
||||
pull request on a fork. GitHub doesn't give these workflows OIDC permissions,
|
||||
even if `id-token: write` is explicitly configured.
|
||||
|
||||
To fix this, change your publishing workflow to use an event that
|
||||
forks of your repository cannot trigger (such as tag or release
|
||||
creation, or a manually triggered workflow dispatch).
|
||||
""" # noqa: S105; not a password
|
||||
|
||||
# Rendered if the package index refuses the given OIDC token.
|
||||
_SERVER_REFUSED_TOKEN_EXCHANGE_MESSAGE = """
|
||||
Token request failed: the server refused the request for the following reasons:
|
||||
|
||||
{reasons}
|
||||
|
||||
This generally indicates a trusted publisher configuration error, but could
|
||||
also indicate an internal error on GitHub or PyPI's part.
|
||||
|
||||
{rendered_claims}
|
||||
""" # noqa: S105; not a password
|
||||
|
||||
_RENDERED_CLAIMS = """
|
||||
The claims rendered below are **for debugging purposes only**. You should **not**
|
||||
use them to configure a trusted publisher unless they already match your expectations.
|
||||
|
||||
If a claim is not present in the claim set, then it is rendered as `MISSING`.
|
||||
|
||||
* `sub`: `{sub}`
|
||||
* `repository`: `{repository}`
|
||||
* `repository_owner`: `{repository_owner}`
|
||||
* `repository_owner_id`: `{repository_owner_id}`
|
||||
* `job_workflow_ref`: `{job_workflow_ref}`
|
||||
* `ref`: `{ref}`
|
||||
|
||||
See https://docs.pypi.org/trusted-publishers/troubleshooting/ for more help.
|
||||
"""
|
||||
|
||||
# Rendered if the package index's token response isn't valid JSON.
|
||||
_SERVER_TOKEN_RESPONSE_MALFORMED_JSON = """
|
||||
Token request failed: the index produced an unexpected
|
||||
{status_code} response.
|
||||
|
||||
This strongly suggests a server configuration or downtime issue; wait
|
||||
a few minutes and try again.
|
||||
|
||||
You can monitor PyPI's status here: https://status.python.org/
|
||||
""" # noqa: S105; not a password
|
||||
|
||||
# Rendered if the package index's token response isn't a valid API token payload.
|
||||
_SERVER_TOKEN_RESPONSE_MALFORMED_MESSAGE = """
|
||||
Token response error: the index gave us an invalid response.
|
||||
|
||||
This strongly suggests a server configuration or downtime issue; wait
|
||||
a few minutes and try again.
|
||||
""" # noqa: S105; not a password
|
||||
|
||||
|
||||
def die(msg: str) -> NoReturn:
|
||||
with _GITHUB_STEP_SUMMARY.open("a", encoding="utf-8") as io:
|
||||
print(_ERROR_SUMMARY_MESSAGE.format(message=msg), file=io)
|
||||
|
||||
# HACK: GitHub Actions' annotations don't work across multiple lines naively;
|
||||
# translating `\n` into `%0A` (i.e., HTML percent-encoding) is known to work.
|
||||
# See: https://github.com/actions/toolkit/issues/193
|
||||
msg = msg.replace("\n", "%0A")
|
||||
print(f"::error::Trusted publishing exchange failure: {msg}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def debug(msg: str) -> None:
|
||||
print(f"::debug::{msg.title()}", file=sys.stderr)
|
||||
|
||||
|
||||
def assert_successful_audience_call(resp: requests.Response, domain: str) -> None:
|
||||
if resp.ok:
|
||||
return
|
||||
|
||||
if resp.status_code == HTTPStatus.FORBIDDEN:
|
||||
# This index supports OIDC, but forbids the client from using
|
||||
# it (either because it's disabled, ratelimited, etc.)
|
||||
die(
|
||||
f"audience retrieval failed: repository at {domain} has trusted publishing disabled",
|
||||
)
|
||||
elif resp.status_code == HTTPStatus.NOT_FOUND:
|
||||
# This index does not support OIDC.
|
||||
die(
|
||||
f"audience retrieval failed: repository at {domain} does not indicate trusted publishing support",
|
||||
)
|
||||
else:
|
||||
status = HTTPStatus(resp.status_code)
|
||||
# Unknown: the index may or may not support OIDC, but didn't respond with
|
||||
# something we expect. This can happen if the index is broken, in maintenance mode,
|
||||
# misconfigured, etc.
|
||||
die(
|
||||
f"audience retrieval failed: repository at {domain} responded with unexpected {resp.status_code}: {status.phrase}",
|
||||
)
|
||||
|
||||
|
||||
def render_claims(token: str) -> str:
|
||||
_, payload, _ = token.split(".", 2)
|
||||
|
||||
# urlsafe_b64decode needs padding; JWT payloads don't contain any.
|
||||
payload += "=" * (4 - (len(payload) % 4))
|
||||
claims = json.loads(base64.urlsafe_b64decode(payload))
|
||||
|
||||
def _get(name: str) -> str:
|
||||
return claims.get(name, "MISSING")
|
||||
|
||||
return _RENDERED_CLAIMS.format(
|
||||
sub=_get("sub"),
|
||||
repository=_get("repository"),
|
||||
repository_owner=_get("repository_owner"),
|
||||
repository_owner_id=_get("repository_owner_id"),
|
||||
job_workflow_ref=_get("job_workflow_ref"),
|
||||
ref=_get("ref"),
|
||||
)
|
||||
|
||||
|
||||
def event_is_third_party_pr() -> bool:
|
||||
# Non-`pull_request` events cannot be from third-party PRs.
|
||||
if os.getenv("GITHUB_EVENT_NAME") != "pull_request":
|
||||
return False
|
||||
|
||||
event_path = os.getenv("GITHUB_EVENT_PATH")
|
||||
if not event_path:
|
||||
# No GITHUB_EVENT_PATH indicates a weird GitHub or runner bug.
|
||||
debug("unexpected: no GITHUB_EVENT_PATH to check")
|
||||
return False
|
||||
|
||||
try:
|
||||
event = json.loads(Path(event_path).read_bytes())
|
||||
except json.JSONDecodeError:
|
||||
debug("unexpected: GITHUB_EVENT_PATH does not contain valid JSON")
|
||||
return False
|
||||
|
||||
try:
|
||||
return event["pull_request"]["head"]["repo"]["fork"]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("repository_url", default="https://upload.pypi.org/legacy/", type=urlparse, nargs="?")
|
||||
|
||||
opts = parser.parse_args()
|
||||
repository_domain = opts.repository_url.netloc
|
||||
token_exchange_url = f"https://{repository_domain}/_/oidc/mint-token"
|
||||
|
||||
# Indices are expected to support `https://{domain}/_/oidc/audience`,
|
||||
# which tells OIDC exchange clients which audience to use.
|
||||
audience_url = f"https://{repository_domain}/_/oidc/audience"
|
||||
audience_resp = requests.get(audience_url, timeout=5)
|
||||
assert_successful_audience_call(audience_resp, repository_domain)
|
||||
|
||||
oidc_audience = audience_resp.json()["audience"]
|
||||
|
||||
debug(f"selected trusted publishing exchange endpoint: {token_exchange_url}")
|
||||
|
||||
try:
|
||||
oidc_token = detect_credential(audience=oidc_audience)
|
||||
except IdentityError as identity_error:
|
||||
cause_msg_tmpl = (
|
||||
_TOKEN_RETRIEVAL_FAILED_FORK_PR_MESSAGE if event_is_third_party_pr() else _TOKEN_RETRIEVAL_FAILED_MESSAGE
|
||||
)
|
||||
for_cause_msg = cause_msg_tmpl.format(identity_error=identity_error)
|
||||
die(for_cause_msg)
|
||||
if not oidc_token:
|
||||
die("Unabled to detect credentials. Is this runnnig in CI?")
|
||||
|
||||
# Now we can do the actual token exchange.
|
||||
mint_token_resp = requests.post(
|
||||
token_exchange_url,
|
||||
json={"token": oidc_token},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
try:
|
||||
mint_token_payload = mint_token_resp.json()
|
||||
except requests.JSONDecodeError:
|
||||
# Token exchange failure normally produces a JSON error response, but
|
||||
# we might have hit a server error instead.
|
||||
die(
|
||||
_SERVER_TOKEN_RESPONSE_MALFORMED_JSON.format(
|
||||
status_code=mint_token_resp.status_code,
|
||||
),
|
||||
)
|
||||
|
||||
# On failure, the JSON response includes the list of errors that
|
||||
# occurred during minting.
|
||||
if not mint_token_resp.ok:
|
||||
reasons = "\n".join(f'* `{error["code"]}`: {error["description"]}' for error in mint_token_payload["errors"])
|
||||
|
||||
rendered_claims = render_claims(oidc_token)
|
||||
|
||||
die(
|
||||
_SERVER_REFUSED_TOKEN_EXCHANGE_MESSAGE.format(
|
||||
reasons=reasons,
|
||||
rendered_claims=rendered_claims,
|
||||
),
|
||||
)
|
||||
|
||||
pypi_token = mint_token_payload.get("token")
|
||||
if pypi_token is None:
|
||||
die(_SERVER_TOKEN_RESPONSE_MALFORMED_MESSAGE)
|
||||
|
||||
# Mask the newly minted PyPI token, so that we don't accidentally leak it in logs.
|
||||
print(f"::add-mask::{pypi_token}", file=sys.stderr)
|
||||
|
||||
keyring.set_password(opts.repository_url.geturl(), "__token__", pypi_token)
|
||||
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 62 KiB |
85
build-tools/zip_artifacts.py
Normal file
85
build-tools/zip_artifacts.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import sys
|
||||
import tarfile
|
||||
import zipfile
|
||||
|
||||
from comictaggerlib.ctversion import __version__
|
||||
|
||||
|
||||
def addToZip(zf: zipfile.ZipFile, path: str, zippath: str) -> None:
|
||||
if os.path.isfile(path):
|
||||
zf.write(path, zippath)
|
||||
elif os.path.isdir(path):
|
||||
if zippath:
|
||||
zf.write(path, zippath)
|
||||
for nm in sorted(os.listdir(path)):
|
||||
addToZip(zf, os.path.join(path, nm), os.path.join(zippath, nm))
|
||||
|
||||
|
||||
def Zip(zip_file: pathlib.Path, path: pathlib.Path) -> None:
|
||||
zip_file.unlink(missing_ok=True)
|
||||
with zipfile.ZipFile(f"{zip_file}.zip", "w", compression=zipfile.ZIP_DEFLATED, compresslevel=8) as zf:
|
||||
zippath = os.path.basename(path)
|
||||
if not zippath:
|
||||
zippath = os.path.basename(os.path.dirname(path))
|
||||
if zippath in ("", os.curdir, os.pardir):
|
||||
zippath = ""
|
||||
addToZip(zf, str(path), zippath)
|
||||
|
||||
|
||||
def addToTar(tf: tarfile.TarFile, path: str, zippath: str) -> None:
|
||||
if os.path.isfile(path):
|
||||
tf.add(path, zippath)
|
||||
elif os.path.isdir(path):
|
||||
if zippath:
|
||||
tf.add(path, zippath, recursive=False)
|
||||
for nm in sorted(os.listdir(path)):
|
||||
addToTar(tf, os.path.join(path, nm), os.path.join(zippath, nm))
|
||||
|
||||
|
||||
def Tar(tar_file: pathlib.Path, path: pathlib.Path) -> None:
|
||||
tar_file.unlink(missing_ok=True)
|
||||
with tarfile.open(f"{tar_file}.tar.gz", "w:gz") as tf:
|
||||
zippath = os.path.basename(path)
|
||||
if not zippath:
|
||||
zippath = os.path.basename(os.path.dirname(path))
|
||||
if zippath in ("", os.curdir, os.pardir):
|
||||
zippath = ""
|
||||
addToTar(tf, str(path), zippath)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = "ComicTagger"
|
||||
exe = app.casefold()
|
||||
final_name = f"{app}-{__version__}-{platform.system()}-{platform.machine()}"
|
||||
if platform.system() == "Windows":
|
||||
exe = f"{exe}.exe"
|
||||
elif platform.system() == "Darwin":
|
||||
exe = f"{app}.app"
|
||||
ver = platform.mac_ver()
|
||||
final_name = f"{app}-{__version__}-macOS-{ver[0]}-{ver[2]}"
|
||||
|
||||
path = pathlib.Path(f"dist/{exe}")
|
||||
binary_path = pathlib.Path("dist/binary")
|
||||
binary_path.mkdir(parents=True, exist_ok=True)
|
||||
archive_destination = binary_path / final_name
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
from dmgbuild.__main__ import main as dmg_main
|
||||
|
||||
sys.argv = [
|
||||
"zip_artifacts",
|
||||
"-s",
|
||||
str(pathlib.Path(__file__).parent / "dmgbuild.conf"),
|
||||
f"{app} {__version__}",
|
||||
f"{archive_destination}.dmg",
|
||||
]
|
||||
dmg_main()
|
||||
elif platform.system() == "Windows":
|
||||
Zip(archive_destination, path)
|
||||
else:
|
||||
Tar(archive_destination, path)
|
||||
3
comicapi/__init__.py
Normal file
3
comicapi/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from __future__ import annotations
|
||||
|
||||
__author__ = "dromanin"
|
||||
7
comicapi/__pyinstaller/__init__.py
Normal file
7
comicapi/__pyinstaller/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def get_hook_dirs() -> list[str]:
|
||||
return [os.path.dirname(__file__)]
|
||||
10
comicapi/__pyinstaller/hook-comicapi.py
Normal file
10
comicapi/__pyinstaller/hook-comicapi.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from PyInstaller.utils.hooks import collect_data_files, collect_entry_point
|
||||
|
||||
datas, hiddenimports = collect_entry_point("comicapi.archiver")
|
||||
mdatas, mhiddenimports = collect_entry_point("comicapi.tags")
|
||||
|
||||
hiddenimports += mhiddenimports
|
||||
datas += mdatas
|
||||
datas += collect_data_files("comicapi.data")
|
||||
468
comicapi/_url.py
Normal file
468
comicapi/_url.py
Normal file
@@ -0,0 +1,468 @@
|
||||
# mypy: disable-error-code="no-redef"
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
from urllib3.exceptions import HTTPError, LocationParseError, LocationValueError
|
||||
from urllib3.util import Url, parse_url
|
||||
except ImportError:
|
||||
|
||||
import re
|
||||
import typing
|
||||
|
||||
class HTTPError(Exception):
|
||||
"""Base exception used by this module."""
|
||||
|
||||
class LocationValueError(ValueError, HTTPError):
|
||||
"""Raised when there is something wrong with a given URL input."""
|
||||
|
||||
class LocationParseError(LocationValueError):
|
||||
"""Raised when get_host or similar fails to parse the URL input."""
|
||||
|
||||
def __init__(self, location: str) -> None:
|
||||
message = f"Failed to parse: {location}"
|
||||
super().__init__(message)
|
||||
|
||||
self.location = location
|
||||
|
||||
def to_str(x: str | bytes, encoding: str | None = None, errors: str | None = None) -> str:
|
||||
if isinstance(x, str):
|
||||
return x
|
||||
elif not isinstance(x, bytes):
|
||||
raise TypeError(f"not expecting type {type(x).__name__}")
|
||||
if encoding or errors:
|
||||
return x.decode(encoding or "utf-8", errors=errors or "strict")
|
||||
return x.decode()
|
||||
|
||||
# We only want to normalize urls with an HTTP(S) scheme.
|
||||
# urllib3 infers URLs without a scheme (None) to be http.
|
||||
_NORMALIZABLE_SCHEMES = ("http", "https", None)
|
||||
|
||||
# Almost all of these patterns were derived from the
|
||||
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
|
||||
_PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
|
||||
_SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
|
||||
_URI_RE = re.compile(
|
||||
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?" r"(?://([^\\/?#]*))?" r"([^?#]*)" r"(?:\?([^#]*))?" r"(?:#(.*))?$",
|
||||
re.UNICODE | re.DOTALL,
|
||||
)
|
||||
|
||||
_IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
|
||||
_HEX_PAT = "[0-9A-Fa-f]{1,4}"
|
||||
_LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=_HEX_PAT, ipv4=_IPV4_PAT)
|
||||
_subs = {"hex": _HEX_PAT, "ls32": _LS32_PAT}
|
||||
_variations = [
|
||||
# 6( h16 ":" ) ls32
|
||||
"(?:%(hex)s:){6}%(ls32)s",
|
||||
# "::" 5( h16 ":" ) ls32
|
||||
"::(?:%(hex)s:){5}%(ls32)s",
|
||||
# [ h16 ] "::" 4( h16 ":" ) ls32
|
||||
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
|
||||
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
|
||||
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
|
||||
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
|
||||
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
|
||||
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
|
||||
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
|
||||
# [ *4( h16 ":" ) h16 ] "::" ls32
|
||||
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
|
||||
# [ *5( h16 ":" ) h16 ] "::" h16
|
||||
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
|
||||
# [ *6( h16 ":" ) h16 ] "::"
|
||||
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
|
||||
]
|
||||
|
||||
_UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._\-~"
|
||||
_IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
|
||||
_ZONE_ID_PAT = "(?:%25|%)(?:[" + _UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
|
||||
_IPV6_ADDRZ_PAT = r"\[" + _IPV6_PAT + r"(?:" + _ZONE_ID_PAT + r")?\]"
|
||||
_REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
|
||||
_TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
|
||||
|
||||
_IPV4_RE = re.compile("^" + _IPV4_PAT + "$")
|
||||
_IPV6_RE = re.compile("^" + _IPV6_PAT + "$")
|
||||
_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT + "$")
|
||||
_BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT[2:-2] + "$")
|
||||
_ZONE_ID_RE = re.compile("(" + _ZONE_ID_PAT + r")\]$")
|
||||
|
||||
_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % (
|
||||
_REG_NAME_PAT,
|
||||
_IPV4_PAT,
|
||||
_IPV6_ADDRZ_PAT,
|
||||
)
|
||||
_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
|
||||
|
||||
_UNRESERVED_CHARS = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~")
|
||||
_SUB_DELIM_CHARS = set("!$&'()*+,;=")
|
||||
_USERINFO_CHARS = _UNRESERVED_CHARS | _SUB_DELIM_CHARS | {":"}
|
||||
_PATH_CHARS = _USERINFO_CHARS | {"@", "/"}
|
||||
_QUERY_CHARS = _FRAGMENT_CHARS = _PATH_CHARS | {"?"}
|
||||
|
||||
class Url(
|
||||
typing.NamedTuple(
|
||||
"Url",
|
||||
[
|
||||
("scheme", typing.Optional[str]),
|
||||
("auth", typing.Optional[str]),
|
||||
("host", typing.Optional[str]),
|
||||
("port", typing.Optional[int]),
|
||||
("path", typing.Optional[str]),
|
||||
("query", typing.Optional[str]),
|
||||
("fragment", typing.Optional[str]),
|
||||
],
|
||||
)
|
||||
):
|
||||
"""
|
||||
Data structure for representing an HTTP URL. Used as a return value for
|
||||
:func:`parse_url`. Both the scheme and host are normalized as they are
|
||||
both case-insensitive according to RFC 3986.
|
||||
"""
|
||||
|
||||
def __new__( # type: ignore[no-untyped-def]
|
||||
cls,
|
||||
scheme: str | None = None,
|
||||
auth: str | None = None,
|
||||
host: str | None = None,
|
||||
port: int | None = None,
|
||||
path: str | None = None,
|
||||
query: str | None = None,
|
||||
fragment: str | None = None,
|
||||
):
|
||||
if path and not path.startswith("/"):
|
||||
path = "/" + path
|
||||
if scheme is not None:
|
||||
scheme = scheme.lower()
|
||||
return super().__new__(cls, scheme, auth, host, port, path, query, fragment)
|
||||
|
||||
@property
|
||||
def hostname(self) -> str | None:
|
||||
"""For backwards-compatibility with urlparse. We're nice like that."""
|
||||
return self.host
|
||||
|
||||
@property
|
||||
def request_uri(self) -> str:
|
||||
"""Absolute path including the query string."""
|
||||
uri = self.path or "/"
|
||||
|
||||
if self.query is not None:
|
||||
uri += "?" + self.query
|
||||
|
||||
return uri
|
||||
|
||||
@property
|
||||
def authority(self) -> str | None:
|
||||
"""
|
||||
Authority component as defined in RFC 3986 3.2.
|
||||
This includes userinfo (auth), host and port.
|
||||
|
||||
i.e.
|
||||
userinfo@host:port
|
||||
"""
|
||||
userinfo = self.auth
|
||||
netloc = self.netloc
|
||||
if netloc is None or userinfo is None:
|
||||
return netloc
|
||||
else:
|
||||
return f"{userinfo}@{netloc}"
|
||||
|
||||
@property
|
||||
def netloc(self) -> str | None:
|
||||
"""
|
||||
Network location including host and port.
|
||||
|
||||
If you need the equivalent of urllib.parse's ``netloc``,
|
||||
use the ``authority`` property instead.
|
||||
"""
|
||||
if self.host is None:
|
||||
return None
|
||||
if self.port:
|
||||
return f"{self.host}:{self.port}"
|
||||
return self.host
|
||||
|
||||
@property
|
||||
def url(self) -> str:
|
||||
"""
|
||||
Convert self into a url
|
||||
|
||||
This function should more or less round-trip with :func:`.parse_url`. The
|
||||
returned url may not be exactly the same as the url inputted to
|
||||
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
|
||||
with a blank port will have : removed).
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import urllib3
|
||||
|
||||
U = urllib3.util.parse_url("https://google.com/mail/")
|
||||
|
||||
print(U.url)
|
||||
# "https://google.com/mail/"
|
||||
|
||||
print( urllib3.util.Url("https", "username:password",
|
||||
"host.com", 80, "/path", "query", "fragment"
|
||||
).url
|
||||
)
|
||||
# "https://username:password@host.com:80/path?query#fragment"
|
||||
"""
|
||||
scheme, auth, host, port, path, query, fragment = self
|
||||
url = ""
|
||||
|
||||
# We use "is not None" we want things to happen with empty strings (or 0 port)
|
||||
if scheme is not None:
|
||||
url += scheme + "://"
|
||||
if auth is not None:
|
||||
url += auth + "@"
|
||||
if host is not None:
|
||||
url += host
|
||||
if port is not None:
|
||||
url += ":" + str(port)
|
||||
if path is not None:
|
||||
url += path
|
||||
if query is not None:
|
||||
url += "?" + query
|
||||
if fragment is not None:
|
||||
url += "#" + fragment
|
||||
|
||||
return url
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.url
|
||||
|
||||
@typing.overload
|
||||
def _encode_invalid_chars(component: str, allowed_chars: typing.Container[str]) -> str: # Abstract
|
||||
...
|
||||
|
||||
@typing.overload
|
||||
def _encode_invalid_chars(component: None, allowed_chars: typing.Container[str]) -> None: # Abstract
|
||||
...
|
||||
|
||||
def _encode_invalid_chars(component: str | None, allowed_chars: typing.Container[str]) -> str | None:
|
||||
"""Percent-encodes a URI component without reapplying
|
||||
onto an already percent-encoded component.
|
||||
"""
|
||||
if component is None:
|
||||
return component
|
||||
|
||||
component = to_str(component)
|
||||
|
||||
# Normalize existing percent-encoded bytes.
|
||||
# Try to see if the component we're encoding is already percent-encoded
|
||||
# so we can skip all '%' characters but still encode all others.
|
||||
component, percent_encodings = _PERCENT_RE.subn(lambda match: match.group(0).upper(), component)
|
||||
|
||||
uri_bytes = component.encode("utf-8", "surrogatepass")
|
||||
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
|
||||
encoded_component = bytearray()
|
||||
|
||||
for i in range(0, len(uri_bytes)):
|
||||
# Will return a single character bytestring
|
||||
byte = uri_bytes[i : i + 1]
|
||||
byte_ord = ord(byte)
|
||||
if (is_percent_encoded and byte == b"%") or (byte_ord < 128 and byte.decode() in allowed_chars):
|
||||
encoded_component += byte
|
||||
continue
|
||||
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
|
||||
|
||||
return encoded_component.decode()
|
||||
|
||||
def _remove_path_dot_segments(path: str) -> str:
|
||||
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
|
||||
segments = path.split("/") # Turn the path into a list of segments
|
||||
output = [] # Initialize the variable to use to store output
|
||||
|
||||
for segment in segments:
|
||||
# '.' is the current directory, so ignore it, it is superfluous
|
||||
if segment == ".":
|
||||
continue
|
||||
# Anything other than '..', should be appended to the output
|
||||
if segment != "..":
|
||||
output.append(segment)
|
||||
# In this case segment == '..', if we can, we should pop the last
|
||||
# element
|
||||
elif output:
|
||||
output.pop()
|
||||
|
||||
# If the path starts with '/' and the output is empty or the first string
|
||||
# is non-empty
|
||||
if path.startswith("/") and (not output or output[0]):
|
||||
output.insert(0, "")
|
||||
|
||||
# If the path starts with '/.' or '/..' ensure we add one more empty
|
||||
# string to add a trailing '/'
|
||||
if path.endswith(("/.", "/..")):
|
||||
output.append("")
|
||||
|
||||
return "/".join(output)
|
||||
|
||||
@typing.overload
|
||||
def _normalize_host(host: None, scheme: str | None) -> None: ...
|
||||
|
||||
@typing.overload
|
||||
def _normalize_host(host: str, scheme: str | None) -> str: ...
|
||||
|
||||
def _normalize_host(host: str | None, scheme: str | None) -> str | None:
|
||||
if host:
|
||||
if scheme in _NORMALIZABLE_SCHEMES:
|
||||
is_ipv6 = _IPV6_ADDRZ_RE.match(host)
|
||||
if is_ipv6:
|
||||
# IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
|
||||
# such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
|
||||
# separator as necessary to return a valid RFC 4007 scoped IP.
|
||||
match = _ZONE_ID_RE.search(host)
|
||||
if match:
|
||||
start, end = match.span(1)
|
||||
zone_id = host[start:end]
|
||||
|
||||
if zone_id.startswith("%25") and zone_id != "%25":
|
||||
zone_id = zone_id[3:]
|
||||
else:
|
||||
zone_id = zone_id[1:]
|
||||
zone_id = _encode_invalid_chars(zone_id, _UNRESERVED_CHARS)
|
||||
return f"{host[:start].lower()}%{zone_id}{host[end:]}"
|
||||
else:
|
||||
return host.lower()
|
||||
elif not _IPV4_RE.match(host):
|
||||
return to_str(
|
||||
b".".join([_idna_encode(label) for label in host.split(".")]),
|
||||
"ascii",
|
||||
)
|
||||
return host
|
||||
|
||||
def _idna_encode(name: str) -> bytes:
|
||||
if not name.isascii():
|
||||
try:
|
||||
import idna
|
||||
except ImportError:
|
||||
raise LocationParseError("Unable to parse URL without the 'idna' module") from None
|
||||
|
||||
try:
|
||||
return idna.encode(name.lower(), strict=True, std3_rules=True)
|
||||
except idna.IDNAError:
|
||||
raise LocationParseError(f"Name '{name}' is not a valid IDNA label") from None
|
||||
|
||||
return name.lower().encode("ascii")
|
||||
|
||||
def _encode_target(target: str) -> str:
|
||||
"""Percent-encodes a request target so that there are no invalid characters
|
||||
|
||||
Pre-condition for this function is that 'target' must start with '/'.
|
||||
If that is the case then _TARGET_RE will always produce a match.
|
||||
"""
|
||||
match = _TARGET_RE.match(target)
|
||||
if not match: # Defensive:
|
||||
raise LocationParseError(f"{target!r} is not a valid request URI")
|
||||
|
||||
path, query = match.groups()
|
||||
encoded_target = _encode_invalid_chars(path, _PATH_CHARS)
|
||||
if query is not None:
|
||||
query = _encode_invalid_chars(query, _QUERY_CHARS)
|
||||
encoded_target += "?" + query
|
||||
return encoded_target
|
||||
|
||||
def parse_url(url: str) -> Url:
|
||||
"""
|
||||
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
|
||||
performed to parse incomplete urls. Fields not provided will be None.
|
||||
This parser is RFC 3986 and RFC 6874 compliant.
|
||||
|
||||
The parser logic and helper functions are based heavily on
|
||||
work done in the ``rfc3986`` module.
|
||||
|
||||
:param str url: URL to parse into a :class:`.Url` namedtuple.
|
||||
|
||||
Partly backwards-compatible with :mod:`urllib.parse`.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import urllib3
|
||||
|
||||
print( urllib3.util.parse_url('http://google.com/mail/'))
|
||||
# Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
|
||||
|
||||
print( urllib3.util.parse_url('google.com:80'))
|
||||
# Url(scheme=None, host='google.com', port=80, path=None, ...)
|
||||
|
||||
print( urllib3.util.parse_url('/foo?bar'))
|
||||
# Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
|
||||
"""
|
||||
if not url:
|
||||
# Empty
|
||||
return Url()
|
||||
|
||||
source_url = url
|
||||
if not _SCHEME_RE.search(url):
|
||||
url = "//" + url
|
||||
|
||||
scheme: str | None
|
||||
authority: str | None
|
||||
auth: str | None
|
||||
host: str | None
|
||||
port: str | None
|
||||
port_int: int | None
|
||||
path: str | None
|
||||
query: str | None
|
||||
fragment: str | None
|
||||
|
||||
try:
|
||||
scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]
|
||||
normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES
|
||||
|
||||
if scheme:
|
||||
scheme = scheme.lower()
|
||||
|
||||
if authority:
|
||||
auth, _, host_port = authority.rpartition("@")
|
||||
auth = auth or None
|
||||
host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]
|
||||
if auth and normalize_uri:
|
||||
auth = _encode_invalid_chars(auth, _USERINFO_CHARS)
|
||||
if port == "":
|
||||
port = None
|
||||
else:
|
||||
auth, host, port = None, None, None
|
||||
|
||||
if port is not None:
|
||||
port_int = int(port)
|
||||
if not (0 <= port_int <= 65535):
|
||||
raise LocationParseError(url)
|
||||
else:
|
||||
port_int = None
|
||||
|
||||
host = _normalize_host(host, scheme)
|
||||
|
||||
if normalize_uri and path:
|
||||
path = _remove_path_dot_segments(path)
|
||||
path = _encode_invalid_chars(path, _PATH_CHARS)
|
||||
if normalize_uri and query:
|
||||
query = _encode_invalid_chars(query, _QUERY_CHARS)
|
||||
if normalize_uri and fragment:
|
||||
fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)
|
||||
|
||||
except (ValueError, AttributeError) as e:
|
||||
raise LocationParseError(source_url) from e
|
||||
|
||||
# For the sake of backwards compatibility we put empty
|
||||
# string values for path if there are any defined values
|
||||
# beyond the path in the URL.
|
||||
# TODO: Remove this when we break backwards compatibility.
|
||||
if not path:
|
||||
if query is not None or fragment is not None:
|
||||
path = ""
|
||||
else:
|
||||
path = None
|
||||
|
||||
return Url(
|
||||
scheme=scheme,
|
||||
auth=auth,
|
||||
host=host,
|
||||
port=port_int,
|
||||
path=path,
|
||||
query=query,
|
||||
fragment=fragment,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ("Url", "parse_url", "HTTPError", "LocationParseError", "LocationValueError")
|
||||
13
comicapi/archivers/__init__.py
Normal file
13
comicapi/archivers/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.archivers.archiver import Archiver
|
||||
from comicapi.archivers.folder import FolderArchiver
|
||||
from comicapi.archivers.zip import ZipArchiver
|
||||
|
||||
|
||||
class UnknownArchiver(Archiver):
|
||||
def name(self) -> str:
|
||||
return "Unknown"
|
||||
|
||||
|
||||
__all__ = ["Archiver", "UnknownArchiver", "FolderArchiver", "ZipArchiver"]
|
||||
146
comicapi/archivers/archiver.py
Normal file
146
comicapi/archivers/archiver.py
Normal file
@@ -0,0 +1,146 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pathlib
|
||||
from collections.abc import Collection
|
||||
from typing import Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Archiver(Protocol):
|
||||
"""Archiver Protocol"""
|
||||
|
||||
"""The path to the archive"""
|
||||
path: pathlib.Path
|
||||
|
||||
"""
|
||||
The name of the executable used for this archiver. This should be the base name of the executable.
|
||||
For example if 'rar.exe' is needed this should be "rar".
|
||||
If an executable is not used this should be the empty string.
|
||||
"""
|
||||
exe: str = ""
|
||||
|
||||
"""
|
||||
Whether or not this archiver is enabled.
|
||||
If external imports are required and are not available this should be false. See rar.py and sevenzip.py.
|
||||
"""
|
||||
enabled: bool = True
|
||||
|
||||
"""
|
||||
If self.path is a single file that can be hashed.
|
||||
For example directories cannot be hashed.
|
||||
"""
|
||||
hashable: bool = True
|
||||
|
||||
supported_extensions: Collection[str] = set()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.path = pathlib.Path()
|
||||
|
||||
def get_comment(self) -> str:
|
||||
"""
|
||||
Returns the comment from the current archive as a string.
|
||||
Should always return a string. If comments are not supported in the archive the empty string should be returned.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
"""
|
||||
Returns True if the comment was successfully set on the current archive.
|
||||
Should always return a boolean. If comments are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
"""
|
||||
Returns True if the current archive supports comments.
|
||||
Should always return a boolean. If comments are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
"""
|
||||
Reads the named file from the current archive.
|
||||
archive_file should always come from the output of get_filename_list.
|
||||
Should always return a bytes object. Exceptions should be of the type OSError.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
"""
|
||||
Removes the named file from the current archive.
|
||||
archive_file should always come from the output of get_filename_list.
|
||||
Should always return a boolean. Failures should return False.
|
||||
|
||||
Rebuilding the archive without the named file is a standard way to remove a file.
|
||||
"""
|
||||
return False
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
"""
|
||||
Writes the named file to the current archive.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
"""
|
||||
Returns a list of filenames in the current archive.
|
||||
Should always return a list of string. Failures should return an empty list.
|
||||
"""
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
"""
|
||||
Returns True if the current archive supports arbitrary non-picture files.
|
||||
Should always return a boolean.
|
||||
If arbitrary non-picture files are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""
|
||||
Copies the contents of another achive to the current archive.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
"""
|
||||
Retuns True if the current archive is writeable
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def extension(self) -> str:
|
||||
"""
|
||||
Returns the extension that this archiver should use eg ".cbz".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def name(self) -> str:
|
||||
"""
|
||||
Returns the name of this archiver for display purposes eg "CBZ".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
"""
|
||||
Returns True if the given path can be opened by this archiver.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def open(cls, path: pathlib.Path) -> Archiver:
|
||||
"""
|
||||
Opens the given archive.
|
||||
Should always return a an Archver.
|
||||
Should never cause an exception no file operations should take place in this method,
|
||||
is_valid will always be called before open.
|
||||
"""
|
||||
archiver = cls()
|
||||
archiver.path = path
|
||||
return archiver
|
||||
115
comicapi/archivers/folder.py
Normal file
115
comicapi/archivers/folder.py
Normal file
@@ -0,0 +1,115 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FolderArchiver(Archiver):
|
||||
"""Folder implementation"""
|
||||
|
||||
hashable = False
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.comment_file_name = "ComicTaggerFolderComment.txt"
|
||||
self._filename_list: list[str] = []
|
||||
|
||||
def get_comment(self) -> str:
|
||||
try:
|
||||
return (self.path / self.comment_file_name).read_text()
|
||||
except OSError:
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
self._filename_list = []
|
||||
if comment:
|
||||
return self.write_file(self.comment_file_name, comment.encode("utf-8"))
|
||||
(self.path / self.comment_file_name).unlink(missing_ok=True)
|
||||
return True
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
try:
|
||||
data = (self.path / archive_file).read_bytes()
|
||||
except OSError as e:
|
||||
logger.error("Error reading folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
self._filename_list = []
|
||||
try:
|
||||
(self.path / archive_file).unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.error("Error removing file for folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
self._filename_list = []
|
||||
try:
|
||||
file_path = self.path / archive_file
|
||||
file_path.parent.mkdir(exist_ok=True, parents=True)
|
||||
with open(self.path / archive_file, mode="wb") as f:
|
||||
f.write(data)
|
||||
except OSError as e:
|
||||
logger.error("Error writing folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
if self._filename_list:
|
||||
return self._filename_list
|
||||
filenames = []
|
||||
try:
|
||||
for root, _dirs, files in os.walk(self.path):
|
||||
for f in files:
|
||||
filenames.append(os.path.relpath(os.path.join(root, f), self.path).replace(os.path.sep, "/"))
|
||||
self._filename_list = filenames
|
||||
return filenames
|
||||
except OSError as e:
|
||||
logger.error("Error listing files in folder archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
self._filename_list = []
|
||||
try:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
self.write_file(filename, data)
|
||||
|
||||
# preserve the old comment
|
||||
comment = other_archive.get_comment()
|
||||
if comment is not None:
|
||||
if not self.set_comment(comment):
|
||||
return False
|
||||
except Exception:
|
||||
logger.exception("Error while copying archive from %s to %s", other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def name(self) -> str:
|
||||
return "Folder"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return path.is_dir()
|
||||
347
comicapi/archivers/rar.py
Normal file
347
comicapi/archivers/rar.py
Normal file
@@ -0,0 +1,347 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
try:
|
||||
import rarfile
|
||||
|
||||
rar_support = True
|
||||
except ImportError:
|
||||
rar_support = False
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if not rar_support:
|
||||
logger.error("rar unavailable")
|
||||
# windows only, keeps the cmd.exe from popping up
|
||||
STARTUPINFO = None
|
||||
if platform.system() == "Windows":
|
||||
STARTUPINFO = subprocess.STARTUPINFO() # type: ignore
|
||||
STARTUPINFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
|
||||
|
||||
|
||||
class RarArchiver(Archiver):
|
||||
"""RAR implementation"""
|
||||
|
||||
enabled = rar_support
|
||||
exe = "rar"
|
||||
supported_extensions = frozenset({".cbr", ".rar"})
|
||||
|
||||
_rar: rarfile.RarFile | None = None
|
||||
_rar_setup: rarfile.ToolSetup | None = None
|
||||
_writeable: bool | None = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._filename_list: list[str] = []
|
||||
|
||||
def get_comment(self) -> str:
|
||||
rarc = self.get_rar_obj()
|
||||
return (rarc.comment if rarc else "") or ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
self._reset()
|
||||
if rar_support and self.exe:
|
||||
try:
|
||||
# write comment to temp file
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
tmp_file = pathlib.Path(tmp_dir) / "rar_comment.txt"
|
||||
tmp_file.write_text(comment, encoding="utf-8")
|
||||
|
||||
working_dir = os.path.dirname(os.path.abspath(self.path))
|
||||
|
||||
# use external program to write comment to Rar archive
|
||||
proc_args = [
|
||||
self.exe,
|
||||
"c",
|
||||
f"-w{working_dir}",
|
||||
"-c-",
|
||||
f"-z{tmp_file}",
|
||||
str(self.path),
|
||||
]
|
||||
result = subprocess.run(
|
||||
proc_args,
|
||||
startupinfo=STARTUPINFO,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
cwd=tmp_dir,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error writing comment to rar archive [exitcode: %d]: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
except OSError as e:
|
||||
logger.exception("Error writing comment to rar archive [%s]: %s", e, self.path)
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
rarc = self.get_rar_obj()
|
||||
if rarc is None:
|
||||
return b""
|
||||
|
||||
tries = 0
|
||||
while tries < 7:
|
||||
try:
|
||||
tries = tries + 1
|
||||
data: bytes = rarc.open(archive_file).read()
|
||||
entries = [(rarc.getinfo(archive_file), data)]
|
||||
|
||||
if entries[0][0].file_size != len(entries[0][1]):
|
||||
logger.info(
|
||||
"Error reading rar archive [file is not expected size: %d vs %d] %s :: %s :: tries #%d",
|
||||
entries[0][0].file_size,
|
||||
len(entries[0][1]),
|
||||
self.path,
|
||||
archive_file,
|
||||
tries,
|
||||
)
|
||||
continue
|
||||
|
||||
except OSError as e:
|
||||
logger.error("Error reading rar archive [%s]: %s :: %s :: tries #%d", e, self.path, archive_file, tries)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unexpected exception reading rar archive [%s]: %s :: %s :: tries #%d",
|
||||
e,
|
||||
self.path,
|
||||
archive_file,
|
||||
tries,
|
||||
)
|
||||
break
|
||||
|
||||
else:
|
||||
# Success. Entries is a list of of tuples: ( rarinfo, filedata)
|
||||
if len(entries) == 1:
|
||||
return entries[0][1]
|
||||
|
||||
raise OSError
|
||||
|
||||
raise OSError
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
self._reset()
|
||||
if self.exe:
|
||||
working_dir = os.path.dirname(os.path.abspath(self.path))
|
||||
# use external program to remove file from Rar archive
|
||||
result = subprocess.run(
|
||||
[self.exe, "d", f"-w{working_dir}", "-c-", self.path, archive_file],
|
||||
startupinfo=STARTUPINFO,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
cwd=self.path.absolute().parent,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error removing file from rar archive [exitcode: %d]: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
archive_file,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
self._reset()
|
||||
if self.exe:
|
||||
archive_path = pathlib.PurePosixPath(archive_file)
|
||||
archive_name = archive_path.name
|
||||
archive_parent = str(archive_path.parent).lstrip("./")
|
||||
working_dir = os.path.dirname(os.path.abspath(self.path))
|
||||
|
||||
# use external program to write file to Rar archive
|
||||
result = subprocess.run(
|
||||
[
|
||||
self.exe,
|
||||
"a",
|
||||
f"-w{working_dir}",
|
||||
f"-si{archive_name}",
|
||||
f"-ap{archive_parent}",
|
||||
"-c-",
|
||||
"-ep",
|
||||
self.path,
|
||||
],
|
||||
input=data,
|
||||
startupinfo=STARTUPINFO,
|
||||
capture_output=True,
|
||||
cwd=self.path.absolute().parent,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error writing rar archive [exitcode: %d]: %s :: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
archive_file,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
if self._filename_list:
|
||||
return self._filename_list
|
||||
rarc = self.get_rar_obj()
|
||||
tries = 0
|
||||
if rar_support and rarc:
|
||||
while tries < 7:
|
||||
try:
|
||||
tries = tries + 1
|
||||
namelist = []
|
||||
for item in rarc.infolist():
|
||||
if item.file_size != 0:
|
||||
namelist.append(item.filename)
|
||||
|
||||
except OSError as e:
|
||||
logger.error("Error listing files in rar archive [%s]: %s :: attempt #%d", e, self.path, tries)
|
||||
|
||||
else:
|
||||
self._filename_list = namelist
|
||||
return namelist
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current archive with one copied from another archive"""
|
||||
self._reset()
|
||||
try:
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
tmp_path = pathlib.Path(tmp_dir)
|
||||
rar_cwd = tmp_path / "rar"
|
||||
rar_cwd.mkdir(exist_ok=True)
|
||||
rar_path = (tmp_path / self.path.name).with_suffix(".rar")
|
||||
working_dir = os.path.dirname(os.path.abspath(self.path))
|
||||
|
||||
for filename in other_archive.get_filename_list():
|
||||
(rar_cwd / filename).parent.mkdir(exist_ok=True, parents=True)
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
with open(rar_cwd / filename, mode="w+b") as tmp_file:
|
||||
tmp_file.write(data)
|
||||
result = subprocess.run(
|
||||
[self.exe, "a", f"-w{working_dir}", "-r", "-c-", str(rar_path.absolute()), "."],
|
||||
cwd=rar_cwd.absolute(),
|
||||
startupinfo=STARTUPINFO,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error while copying to rar archive [exitcode: %d]: %s: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
|
||||
self.path.unlink(missing_ok=True)
|
||||
shutil.move(rar_path, self.path)
|
||||
except Exception as e:
|
||||
logger.exception("Error while copying to rar archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@functools.cache
|
||||
def _log_not_writeable(cls, exe: str) -> None:
|
||||
logger.warning("Unable to find a useable copy of %r, will not be able to write rar files", exe)
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return bool(self._writeable and bool(self.exe and (os.path.exists(self.exe) or shutil.which(self.exe))))
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cbr"
|
||||
|
||||
def name(self) -> str:
|
||||
return "RAR"
|
||||
|
||||
@classmethod
|
||||
def _setup_rar(cls) -> None:
|
||||
if cls._rar_setup is None:
|
||||
assert rarfile
|
||||
orig = rarfile.UNRAR_TOOL
|
||||
rarfile.UNRAR_TOOL = cls.exe
|
||||
try:
|
||||
cls._rar_setup = rarfile.tool_setup(sevenzip=False, sevenzip2=False, force=True)
|
||||
except rarfile.RarCannotExec:
|
||||
rarfile.UNRAR_TOOL = orig
|
||||
|
||||
try:
|
||||
cls._rar_setup = rarfile.tool_setup(force=True)
|
||||
except rarfile.RarCannotExec as e:
|
||||
logger.info(e)
|
||||
if cls._writeable is None:
|
||||
try:
|
||||
cls._writeable = (
|
||||
subprocess.run(
|
||||
(cls.exe,),
|
||||
startupinfo=STARTUPINFO,
|
||||
capture_output=True,
|
||||
# cwd=cls.path.absolute().parent,
|
||||
)
|
||||
.stdout.strip()
|
||||
.startswith(b"RAR")
|
||||
)
|
||||
except OSError:
|
||||
cls._writeable = False
|
||||
|
||||
if not cls._writeable:
|
||||
cls._log_not_writeable(cls.exe or "rar")
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
if rar_support:
|
||||
assert rarfile
|
||||
cls._setup_rar()
|
||||
|
||||
# Fallback to standard
|
||||
try:
|
||||
return rarfile.is_rarfile(str(path))
|
||||
except rarfile.RarCannotExec as e:
|
||||
logger.info(e)
|
||||
return False
|
||||
|
||||
def _reset(self) -> None:
|
||||
self._rar = None
|
||||
self._filename_list = []
|
||||
|
||||
def get_rar_obj(self) -> rarfile.RarFile | None:
|
||||
if self._rar is not None:
|
||||
return self._rar
|
||||
if rar_support:
|
||||
try:
|
||||
rarc = rarfile.RarFile(str(self.path))
|
||||
self._rar = rarc
|
||||
except (OSError, rarfile.RarFileError) as e:
|
||||
logger.error("Unable to get rar object [%s]: %s", e, self.path)
|
||||
else:
|
||||
return rarc
|
||||
|
||||
return None
|
||||
143
comicapi/archivers/sevenzip.py
Normal file
143
comicapi/archivers/sevenzip.py
Normal file
@@ -0,0 +1,143 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
try:
|
||||
import py7zr
|
||||
|
||||
z7_support = True
|
||||
except ImportError:
|
||||
z7_support = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SevenZipArchiver(Archiver):
|
||||
"""7Z implementation"""
|
||||
|
||||
enabled = z7_support
|
||||
supported_extensions = frozenset({".7z", ".cb7"})
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._filename_list: list[str] = []
|
||||
|
||||
# @todo: Implement Comment?
|
||||
def get_comment(self) -> str:
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
return False
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
data = b""
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "r") as zf:
|
||||
data = zf.read([archive_file])[archive_file].read()
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error reading 7zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
self._filename_list = []
|
||||
return self.rebuild([archive_file])
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
# At the moment, no other option but to rebuild the whole
|
||||
# archive w/o the indicated file. Very sucky, but maybe
|
||||
# another solution can be found
|
||||
files = self.get_filename_list()
|
||||
self._filename_list = []
|
||||
if archive_file in files:
|
||||
if not self.rebuild([archive_file]):
|
||||
return False
|
||||
|
||||
try:
|
||||
# now just add the archive file as a new one
|
||||
with py7zr.SevenZipFile(self.path, "a") as zf:
|
||||
zf.writestr(data, archive_file)
|
||||
return True
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error writing 7zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
if self._filename_list:
|
||||
return self._filename_list
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "r") as zf:
|
||||
namelist: list[str] = [file.filename for file in zf.list() if not file.is_directory]
|
||||
|
||||
self._filename_list = namelist
|
||||
return namelist
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error listing files in 7zip archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def rebuild(self, exclude_list: list[str]) -> bool:
|
||||
"""Zip helper func
|
||||
|
||||
This recompresses the zip archive, without the files in the exclude_list
|
||||
"""
|
||||
|
||||
self._filename_list = []
|
||||
try:
|
||||
# py7zr treats all archives as if they used solid compression
|
||||
# so we need to get the filename list first to read all the files at once
|
||||
with py7zr.SevenZipFile(self.path, mode="r") as zin:
|
||||
targets = [f for f in zin.getnames() if f not in exclude_list]
|
||||
with tempfile.NamedTemporaryFile(dir=os.path.dirname(self.path), delete=False) as tmp_file:
|
||||
with py7zr.SevenZipFile(tmp_file.file, mode="w") as zout:
|
||||
with py7zr.SevenZipFile(self.path, mode="r") as zin:
|
||||
for filename, buffer in zin.read(targets).items():
|
||||
zout.writef(buffer, filename)
|
||||
|
||||
self.path.unlink(missing_ok=True)
|
||||
tmp_file.close() # Required on windows
|
||||
|
||||
shutil.move(tmp_file.name, self.path)
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error rebuilding 7zip file [%s]: %s", e, self.path)
|
||||
return False
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
self._filename_list = []
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "w") as zout:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(
|
||||
filename
|
||||
) # This will be very inefficient if other_archive is a 7z file
|
||||
if data is not None:
|
||||
zout.writestr(data, filename)
|
||||
except Exception as e:
|
||||
logger.error("Error while copying to 7zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cb7"
|
||||
|
||||
def name(self) -> str:
|
||||
return "Seven Zip"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return py7zr.is_7zfile(path)
|
||||
160
comicapi/archivers/zip.py
Normal file
160
comicapi/archivers/zip.py
Normal file
@@ -0,0 +1,160 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import tempfile
|
||||
import zipfile
|
||||
from typing import cast
|
||||
|
||||
import chardet
|
||||
from zipremove import ZipFile
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ZipArchiver(Archiver):
|
||||
"""ZIP implementation"""
|
||||
|
||||
supported_extensions = frozenset((".cbz", ".zip"))
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._filename_list: list[str] = []
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def get_comment(self) -> str:
|
||||
with ZipFile(self.path, "r") as zf:
|
||||
encoding = chardet.detect(zf.comment, True)
|
||||
if encoding["confidence"] > 60:
|
||||
try:
|
||||
comment = zf.comment.decode(encoding["encoding"])
|
||||
except UnicodeDecodeError:
|
||||
comment = zf.comment.decode("utf-8", errors="replace")
|
||||
else:
|
||||
comment = zf.comment.decode("utf-8", errors="replace")
|
||||
return comment
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
with ZipFile(self.path, mode="a") as zf:
|
||||
zf.comment = bytes(comment, "utf-8")
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
with ZipFile(self.path, mode="r") as zf:
|
||||
try:
|
||||
data = zf.read(archive_file)
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.exception("Error reading zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
files = self.get_filename_list()
|
||||
self._filename_list = []
|
||||
try:
|
||||
with ZipFile(self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED) as zf:
|
||||
if archive_file in files:
|
||||
zf.repack([zf.remove(archive_file)])
|
||||
return True
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error writing zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
files = self.get_filename_list()
|
||||
self._filename_list = []
|
||||
|
||||
try:
|
||||
# now just add the archive file as a new one
|
||||
with ZipFile(self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED) as zf:
|
||||
if archive_file in files:
|
||||
zf.repack([zf.remove(archive_file)])
|
||||
zf.writestr(archive_file, data)
|
||||
return True
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error writing zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
if self._filename_list:
|
||||
return self._filename_list
|
||||
try:
|
||||
with ZipFile(self.path, mode="r") as zf:
|
||||
self._filename_list = [file.filename for file in zf.infolist() if not file.is_dir()]
|
||||
return self._filename_list
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error listing files in zip archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def rebuild(self, exclude_list: list[str]) -> bool:
|
||||
"""Zip helper func
|
||||
|
||||
This recompresses the zip archive, without the files in the exclude_list
|
||||
"""
|
||||
self._filename_list = []
|
||||
try:
|
||||
with ZipFile(
|
||||
tempfile.NamedTemporaryFile(dir=os.path.dirname(self.path), delete=False), "w", allowZip64=True
|
||||
) as zout:
|
||||
with ZipFile(self.path, mode="r") as zin:
|
||||
for item in zin.infolist():
|
||||
buffer = zin.read(item.filename)
|
||||
if item.filename not in exclude_list:
|
||||
zout.writestr(item, buffer)
|
||||
|
||||
# preserve the old comment
|
||||
zout.comment = zin.comment
|
||||
|
||||
# replace with the new file
|
||||
self.path.unlink(missing_ok=True)
|
||||
zout.close() # Required on windows
|
||||
|
||||
shutil.move(cast(str, zout.filename), self.path)
|
||||
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error rebuilding zip file [%s]: %s", e, self.path)
|
||||
return False
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
self._filename_list = []
|
||||
try:
|
||||
with ZipFile(self.path, mode="w", allowZip64=True) as zout:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
zout.writestr(filename, data)
|
||||
|
||||
# preserve the old comment
|
||||
comment = other_archive.get_comment()
|
||||
if comment is not None:
|
||||
if not self.set_comment(comment):
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Error while copying to zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cbz"
|
||||
|
||||
def name(self) -> str:
|
||||
return "ZIP"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return zipfile.is_zipfile(path) # only checks central directory ot the end of the archive
|
||||
473
comicapi/comicarchive.py
Normal file
473
comicapi/comicarchive.py
Normal file
@@ -0,0 +1,473 @@
|
||||
"""A class to represent a single comic, be it file or folder of images"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import importlib.util
|
||||
import inspect
|
||||
import io
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver, UnknownArchiver, ZipArchiver
|
||||
from comicapi.genericmetadata import FileHash, GenericMetadata
|
||||
from comicapi.tags import Tag
|
||||
from comictaggerlib.ctversion import version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
archivers: list[type[Archiver]] = []
|
||||
tags: dict[str, Tag] = {}
|
||||
|
||||
|
||||
def load_archive_plugins(local_plugins: Iterable[type[Archiver]] = tuple()) -> None:
|
||||
if archivers:
|
||||
return
|
||||
if sys.version_info < (3, 10):
|
||||
from importlib_metadata import entry_points
|
||||
else:
|
||||
from importlib.metadata import entry_points
|
||||
builtin: list[type[Archiver]] = []
|
||||
archive_plugins: list[type[Archiver]] = []
|
||||
# A list is used first matching plugin wins
|
||||
|
||||
for ep in itertools.chain(entry_points(group="comicapi.archiver")):
|
||||
try:
|
||||
spec = importlib.util.find_spec(ep.module)
|
||||
except ValueError:
|
||||
spec = None
|
||||
try:
|
||||
archiver: type[Archiver] = ep.load()
|
||||
|
||||
if ep.module.startswith("comicapi"):
|
||||
builtin.append(archiver)
|
||||
else:
|
||||
archive_plugins.append(archiver)
|
||||
except Exception:
|
||||
if spec and spec.has_location:
|
||||
logger.exception("Failed to load archive plugin: %s from %s", ep.name, spec.origin)
|
||||
else:
|
||||
logger.exception("Failed to load archive plugin: %s", ep.name)
|
||||
archivers.clear()
|
||||
archivers.extend(local_plugins)
|
||||
archivers.extend(archive_plugins)
|
||||
archivers.extend(builtin)
|
||||
|
||||
|
||||
def load_tag_plugins(version: str = f"ComicAPI/{version}", local_plugins: Iterable[type[Tag]] = tuple()) -> None:
|
||||
if tags:
|
||||
return
|
||||
if sys.version_info < (3, 10):
|
||||
from importlib_metadata import entry_points
|
||||
else:
|
||||
from importlib.metadata import entry_points
|
||||
builtin: dict[str, Tag] = {}
|
||||
tag_plugins: dict[str, tuple[Tag, str]] = {}
|
||||
# A dict is used, last plugin wins
|
||||
for ep in entry_points(group="comicapi.tags"):
|
||||
location = "Unknown"
|
||||
try:
|
||||
_spec = importlib.util.find_spec(ep.module)
|
||||
if _spec and _spec.has_location and _spec.origin:
|
||||
location = _spec.origin
|
||||
except ValueError:
|
||||
location = "Unknown"
|
||||
|
||||
try:
|
||||
tag: type[Tag] = ep.load()
|
||||
|
||||
if ep.module.startswith("comicapi"):
|
||||
builtin[tag.id] = tag(version)
|
||||
else:
|
||||
if tag.id in tag_plugins:
|
||||
logger.warning(
|
||||
"Plugin %s from %s is overriding the existing plugin for %s tags",
|
||||
ep.module,
|
||||
location,
|
||||
tag.id,
|
||||
)
|
||||
tag_plugins[tag.id] = (tag(version), location)
|
||||
except Exception:
|
||||
logger.exception("Failed to load tag plugin: %s from %s", ep.name, location)
|
||||
# A dict is used, last plugin wins
|
||||
for tag in local_plugins:
|
||||
tag_plugins[tag.id] = (tag(version), "Local")
|
||||
|
||||
for tag_id in set(builtin.keys()).intersection(tag_plugins):
|
||||
location = tag_plugins[tag_id][1]
|
||||
logger.warning("Builtin plugin for %s tags are being overridden by a plugin from %s", tag_id, location)
|
||||
|
||||
tags.clear()
|
||||
tags.update(builtin)
|
||||
tags.update({s[0]: s[1][0] for s in tag_plugins.items()})
|
||||
|
||||
|
||||
class ComicArchive:
|
||||
logo_data = b""
|
||||
pil_available: bool | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: pathlib.Path | str | Archiver,
|
||||
default_image_path: pathlib.Path | str | None = None,
|
||||
hash_archive: str = "",
|
||||
) -> None:
|
||||
self.md: dict[str, GenericMetadata] = {}
|
||||
self.page_count: int | None = None
|
||||
self.page_list: list[str] = []
|
||||
self.hash_archive = hash_archive
|
||||
|
||||
self.reset_cache()
|
||||
self.default_image_path = default_image_path
|
||||
|
||||
if isinstance(path, Archiver):
|
||||
self.path = path.path
|
||||
self.archiver: Archiver = path
|
||||
else:
|
||||
self.path = pathlib.Path(path).absolute()
|
||||
self.archiver = UnknownArchiver.open(self.path)
|
||||
|
||||
load_archive_plugins()
|
||||
load_tag_plugins()
|
||||
archiver_missing = True
|
||||
for archiver in archivers:
|
||||
if self.path.suffix in archiver.supported_extensions and archiver.is_valid(self.path):
|
||||
self.archiver = archiver.open(self.path)
|
||||
archiver_missing = False
|
||||
break
|
||||
|
||||
if archiver_missing:
|
||||
for archiver in archivers:
|
||||
if archiver.enabled and archiver.is_valid(self.path):
|
||||
self.archiver = archiver.open(self.path)
|
||||
break
|
||||
|
||||
if not ComicArchive.logo_data and self.default_image_path:
|
||||
with open(self.default_image_path, mode="rb") as fd:
|
||||
ComicArchive.logo_data = fd.read()
|
||||
|
||||
def reset_cache(self) -> None:
|
||||
"""Clears the cached data"""
|
||||
|
||||
self.page_count = None
|
||||
self.page_list.clear()
|
||||
self.md.clear()
|
||||
|
||||
def load_cache(self, tag_ids: Iterable[str]) -> None:
|
||||
for tag_id in tag_ids:
|
||||
if tag_id not in tags:
|
||||
continue
|
||||
tag = tags[tag_id]
|
||||
if not tag.enabled:
|
||||
continue
|
||||
md = tag.read_tags(self.archiver)
|
||||
if not md.is_empty:
|
||||
self.md[tag_id] = md
|
||||
|
||||
def get_supported_tags(self) -> list[str]:
|
||||
return [tag_id for tag_id, tag in tags.items() if tag.enabled and tag.supports_tags(self.archiver)]
|
||||
|
||||
def rename(self, path: pathlib.Path | str) -> None:
|
||||
new_path = pathlib.Path(path).absolute()
|
||||
if new_path == self.path:
|
||||
return
|
||||
os.makedirs(new_path.parent, 0o777, True)
|
||||
shutil.move(self.path, new_path)
|
||||
self.path = new_path
|
||||
self.archiver.path = pathlib.Path(path)
|
||||
|
||||
def is_writable(self, check_archive_status: bool = True) -> bool:
|
||||
if isinstance(self.archiver, UnknownArchiver):
|
||||
return False
|
||||
|
||||
if check_archive_status and not self.archiver.is_writable():
|
||||
return False
|
||||
|
||||
if not (os.access(self.path, os.W_OK) or os.access(self.path.parent, os.W_OK)):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def is_zip(self) -> bool:
|
||||
return self.archiver.name() == "ZIP"
|
||||
|
||||
def seems_to_be_a_comic_archive(self) -> bool:
|
||||
if (
|
||||
not (isinstance(self.archiver, UnknownArchiver))
|
||||
and self.get_number_of_pages() > 0
|
||||
and self.archiver.is_valid(self.path)
|
||||
):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def extension(self) -> str:
|
||||
return self.archiver.extension()
|
||||
|
||||
def read_tags(self, tag_id: str) -> GenericMetadata:
|
||||
if tag_id in self.md:
|
||||
return self.md[tag_id]
|
||||
md = GenericMetadata()
|
||||
tag = tags[tag_id]
|
||||
if tag.enabled and tag.has_tags(self.archiver):
|
||||
md = tag.read_tags(self.archiver)
|
||||
md.apply_default_page_list(self.get_page_name_list())
|
||||
return md
|
||||
|
||||
def read_raw_tags(self, tag_id: str) -> str:
|
||||
if not tags[tag_id].enabled:
|
||||
return ""
|
||||
return tags[tag_id].read_raw_tags(self.archiver)
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, tag_id: str) -> bool:
|
||||
if tag_id in self.md:
|
||||
del self.md[tag_id]
|
||||
if not tags[tag_id].enabled:
|
||||
logger.warning("%s tags not enabled", tags[tag_id].name())
|
||||
return False
|
||||
|
||||
self.apply_archive_info_to_metadata(metadata, True, True, hash_archive=self.hash_archive)
|
||||
return tags[tag_id].write_tags(metadata, self.archiver)
|
||||
|
||||
def has_tags(self, tag_id: str) -> bool:
|
||||
if tag_id in self.md:
|
||||
return True
|
||||
if not tags[tag_id].enabled:
|
||||
return False
|
||||
return tags[tag_id].has_tags(self.archiver)
|
||||
|
||||
def remove_tags(self, tag_id: str) -> bool:
|
||||
if tag_id in self.md:
|
||||
del self.md[tag_id]
|
||||
if not tags[tag_id].enabled:
|
||||
return False
|
||||
return tags[tag_id].remove_tags(self.archiver)
|
||||
|
||||
def get_page(self, index: int) -> bytes:
|
||||
image_data = b""
|
||||
|
||||
filename = self.get_page_name(index)
|
||||
|
||||
if filename:
|
||||
try:
|
||||
image_data = self.archiver.read_file(filename) or b""
|
||||
except Exception:
|
||||
logger.exception("Error reading in page %d. Substituting logo page.", index)
|
||||
image_data = ComicArchive.logo_data
|
||||
|
||||
return image_data
|
||||
|
||||
def get_page_name(self, index: int) -> str:
|
||||
if index is None:
|
||||
return ""
|
||||
|
||||
page_list = self.get_page_name_list()
|
||||
|
||||
num_pages = len(page_list)
|
||||
if num_pages == 0 or index >= num_pages:
|
||||
return ""
|
||||
|
||||
return page_list[index]
|
||||
|
||||
def get_scanner_page_index(self) -> int | None:
|
||||
scanner_page_index = None
|
||||
|
||||
# make a guess at the scanner page
|
||||
name_list = self.get_page_name_list()
|
||||
count = self.get_number_of_pages()
|
||||
|
||||
# too few pages to really know
|
||||
if count < 5:
|
||||
return None
|
||||
|
||||
# count the length of every filename, and count occurrences
|
||||
length_buckets: dict[int, int] = {}
|
||||
for name in name_list:
|
||||
fname = os.path.split(name)[1]
|
||||
length = len(fname)
|
||||
if length in length_buckets:
|
||||
length_buckets[length] += 1
|
||||
else:
|
||||
length_buckets[length] = 1
|
||||
|
||||
# sort by most common
|
||||
sorted_buckets = sorted(length_buckets.items(), key=lambda tup: (tup[1], tup[0]), reverse=True)
|
||||
|
||||
# statistical mode occurrence is first
|
||||
mode_length = sorted_buckets[0][0]
|
||||
|
||||
# we are only going to consider the final image file:
|
||||
final_name = os.path.split(name_list[count - 1])[1]
|
||||
|
||||
common_length_list = []
|
||||
for name in name_list:
|
||||
if len(os.path.split(name)[1]) == mode_length:
|
||||
common_length_list.append(os.path.split(name)[1])
|
||||
|
||||
prefix = os.path.commonprefix(common_length_list)
|
||||
|
||||
if mode_length <= 7 and prefix == "":
|
||||
# probably all numbers
|
||||
if len(final_name) > mode_length:
|
||||
scanner_page_index = count - 1
|
||||
|
||||
# see if the last page doesn't start with the same prefix as most others
|
||||
elif not final_name.startswith(prefix):
|
||||
scanner_page_index = count - 1
|
||||
|
||||
return scanner_page_index
|
||||
|
||||
def get_page_name_list(self) -> list[str]:
|
||||
if not self.page_list:
|
||||
self.__import_pil__() # Import pillow for list of supported extensions
|
||||
self.page_list = utils.get_page_name_list(self.archiver.get_filename_list())
|
||||
|
||||
return self.page_list
|
||||
|
||||
def get_number_of_pages(self) -> int:
|
||||
if self.page_count is None:
|
||||
self.page_count = len(self.get_page_name_list())
|
||||
return self.page_count
|
||||
|
||||
def __import_pil__(self) -> bool:
|
||||
if self.pil_available is not None:
|
||||
return self.pil_available
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
Image.init()
|
||||
utils.KNOWN_IMAGE_EXTENSIONS.update([ext for ext, typ in Image.EXTENSION.items() if typ in Image.OPEN])
|
||||
self.pil_available = True
|
||||
except Exception:
|
||||
self.pil_available = False
|
||||
logger.exception("Failed to load Pillow")
|
||||
return False
|
||||
return True
|
||||
|
||||
def apply_archive_info_to_metadata(
|
||||
self,
|
||||
md: GenericMetadata,
|
||||
calc_page_sizes: bool = False,
|
||||
detect_double_page: bool = False,
|
||||
*,
|
||||
hash_archive: str = "",
|
||||
) -> None:
|
||||
hash_archive = hash_archive
|
||||
md.page_count = self.get_number_of_pages()
|
||||
md.apply_default_page_list(self.get_page_name_list())
|
||||
if not self.seems_to_be_a_comic_archive():
|
||||
return
|
||||
|
||||
if hash_archive in hashlib.algorithms_available and not md.original_hash:
|
||||
hasher = getattr(hashlib, hash_archive, hash_archive)
|
||||
try:
|
||||
with self.archiver.path.open("b+r") as archive:
|
||||
digest = utils.file_digest(archive, hasher)
|
||||
if len(inspect.signature(digest.hexdigest).parameters) > 0:
|
||||
length = digest.name.rpartition("_")[2]
|
||||
if not length.isdigit():
|
||||
length = "128"
|
||||
md.original_hash = FileHash(digest.name, digest.hexdigest(int(length) // 8)) # type: ignore[call-arg]
|
||||
else:
|
||||
md.original_hash = FileHash(digest.name, digest.hexdigest())
|
||||
except Exception:
|
||||
logger.exception("Failed to calculate original hash for '%s'", self.archiver.path)
|
||||
if not calc_page_sizes:
|
||||
return
|
||||
for p in md.pages:
|
||||
if p.byte_size is None or p.height is None or p.width is None or p.double_page is None:
|
||||
try:
|
||||
data = self.get_page(p.archive_index)
|
||||
p.byte_size = len(data)
|
||||
if not data or not self.__import_pil__():
|
||||
continue
|
||||
|
||||
from PIL import Image
|
||||
|
||||
im = Image.open(io.BytesIO(data))
|
||||
w, h = im.size
|
||||
|
||||
p.height = h
|
||||
p.width = w
|
||||
if detect_double_page:
|
||||
p.double_page = p.is_double_page()
|
||||
except Exception as e:
|
||||
logger.exception("Error decoding image [%s] %s :: image %s", e, self.path, p.archive_index)
|
||||
|
||||
def metadata_from_filename(
|
||||
self,
|
||||
parser: utils.Parser = utils.Parser.ORIGINAL,
|
||||
remove_c2c: bool = False,
|
||||
remove_fcbd: bool = False,
|
||||
remove_publisher: bool = False,
|
||||
split_words: bool = False,
|
||||
allow_issue_start_with_letter: bool = False,
|
||||
protofolius_issue_number_scheme: bool = False,
|
||||
) -> GenericMetadata:
|
||||
metadata = GenericMetadata()
|
||||
|
||||
filename_info = utils.parse_filename(
|
||||
self.path.name,
|
||||
parser=parser,
|
||||
remove_c2c=remove_c2c,
|
||||
remove_fcbd=remove_fcbd,
|
||||
remove_publisher=remove_publisher,
|
||||
split_words=split_words,
|
||||
allow_issue_start_with_letter=allow_issue_start_with_letter,
|
||||
protofolius_issue_number_scheme=protofolius_issue_number_scheme,
|
||||
)
|
||||
metadata.alternate_number = utils.xlate(filename_info.get("alternate", None))
|
||||
metadata.issue = utils.xlate(filename_info.get("issue", None))
|
||||
metadata.issue_count = utils.xlate_int(filename_info.get("issue_count", None))
|
||||
metadata.publisher = utils.xlate(filename_info.get("publisher", None))
|
||||
metadata.series = utils.xlate(filename_info.get("series", None))
|
||||
metadata.title = utils.xlate(filename_info.get("title", None))
|
||||
metadata.volume = utils.xlate_int(filename_info.get("volume", None))
|
||||
metadata.volume_count = utils.xlate_int(filename_info.get("volume_count", None))
|
||||
metadata.year = utils.xlate_int(filename_info.get("year", None))
|
||||
|
||||
metadata.scan_info = utils.xlate(filename_info.get("remainder", None))
|
||||
|
||||
if filename_info.get("fcbd", None):
|
||||
metadata.format = "FCBD"
|
||||
metadata.tags.add("FCBD")
|
||||
|
||||
if filename_info.get("c2c", None):
|
||||
metadata.tags.add("c2c")
|
||||
|
||||
if filename_info.get("annual", None):
|
||||
metadata.format = "Annual"
|
||||
|
||||
if filename_info.get("format", None):
|
||||
metadata.format = filename_info["format"]
|
||||
|
||||
metadata.is_empty = False
|
||||
return metadata
|
||||
|
||||
def export_as_zip(self, zip_filename: pathlib.Path) -> bool:
|
||||
if self.archiver.name() == "ZIP":
|
||||
# nothing to do, we're already a zip
|
||||
return True
|
||||
|
||||
zip_archiver = ZipArchiver.open(zip_filename)
|
||||
return zip_archiver.copy_from_archive(self.archiver)
|
||||
5
comicapi/data/__init__.py
Normal file
5
comicapi/data/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
|
||||
data_path = importlib.resources.files(__package__)
|
||||
143
comicapi/data/publishers.json
Normal file
143
comicapi/data/publishers.json
Normal file
@@ -0,0 +1,143 @@
|
||||
{
|
||||
"Marvel":{
|
||||
"marvel comics": "",
|
||||
"aircel comics": "Aircel Comics",
|
||||
"aircel": "Aircel Comics",
|
||||
"atlas comics": "Atlas Comics",
|
||||
"atlas": "Atlas Comics",
|
||||
"crossgen comics": "CrossGen comics",
|
||||
"crossgen": "CrossGen comics",
|
||||
"curtis magazines": "Curtis Magazines",
|
||||
"disney books group": "Disney Books Group",
|
||||
"disney books": "Disney Books Group",
|
||||
"disney kingdoms": "Disney Kingdoms",
|
||||
"epic comics group": "Epic Comics",
|
||||
"epic comics": "Epic Comics",
|
||||
"epic": "Epic Comics",
|
||||
"eternity comics": "Eternity Comics",
|
||||
"humorama": "Humorama",
|
||||
"icon comics": "Icon Comics",
|
||||
"infinite comics": "Infinite Comics",
|
||||
"malibu comics": "Malibu Comics",
|
||||
"malibu": "Malibu Comics",
|
||||
"marvel 2099": "Marvel 2099",
|
||||
"marvel absurd": "Marvel Absurd",
|
||||
"marvel adventures": "Marvel Adventures",
|
||||
"marvel age": "Marvel Age",
|
||||
"marvel books": "Marvel Books",
|
||||
"marvel comics 2": "Marvel Comics 2",
|
||||
"marvel digital comics unlimited": "Marvel Unlimited",
|
||||
"marvel edge": "Marvel Edge",
|
||||
"marvel frontier": "Marvel Frontier",
|
||||
"marvel illustrated": "Marvel Illustrated",
|
||||
"marvel knights": "Marvel Knights",
|
||||
"marvel magazine group": "Marvel Magazine Group",
|
||||
"marvel mangaverse": "Marvel Mangaverse",
|
||||
"marvel monsters group": "Marvel Monsters Group",
|
||||
"marvel music": "Marvel Music",
|
||||
"marvel next": "Marvel Next",
|
||||
"marvel noir": "Marvel Noir",
|
||||
"marvel press": "Marvel Press",
|
||||
"marvel uk": "Marvel UK",
|
||||
"marvel unlimited": "Marvel Unlimited",
|
||||
"max": "MAX",
|
||||
"mc2": "Marvel Comics 2",
|
||||
"new universe": "New Universe",
|
||||
"non-pareil publishing corp.": "Non-Pareil Publishing Corp.",
|
||||
"paramount comics": "Paramount Comics",
|
||||
"power comics": "Power Comics",
|
||||
"razorline": "Razorline",
|
||||
"star comics": "Star Comics",
|
||||
"timely comics": "Timely Comics",
|
||||
"timely": "Timely Comics",
|
||||
"tsunami": "Tsunami",
|
||||
"ultimate comics": "Ultimate Comics",
|
||||
"ultimate marvel": "Ultimate Marvel",
|
||||
"vital publications, inc.": "Vital Publications, Inc."
|
||||
},
|
||||
|
||||
"DC Comics":{
|
||||
"dc_comics": "",
|
||||
"dc": "",
|
||||
"dccomics": "",
|
||||
"!mpact comics": "Impact Comics",
|
||||
"all star dc": "All-Star",
|
||||
"all star": "All-Star",
|
||||
"all-star dc": "All-Star",
|
||||
"all-star": "All-Star",
|
||||
"america's best comics": "America's Best Comics",
|
||||
"black label": "DC Black Label",
|
||||
"cliffhanger": "Cliffhanger",
|
||||
"cmx manga": "CMX Manga",
|
||||
"dc black label": "DC Black Label",
|
||||
"dc focus": "DC Focus",
|
||||
"dc ink": "DC Ink",
|
||||
"dc zoom": "DC Zoom",
|
||||
"earth m": "Earth M",
|
||||
"earth one": "Earth One",
|
||||
"earth-m": "Earth M",
|
||||
"elseworlds": "Elseworlds",
|
||||
"eo": "Earth One",
|
||||
"first wave": "First Wave",
|
||||
"focus": "DC Focus",
|
||||
"helix": "Helix",
|
||||
"homage comics": "Homage Comics",
|
||||
"impact comics": "Impact Comics",
|
||||
"impact! comics": "Impact Comics",
|
||||
"johnny dc": "Johnny DC",
|
||||
"mad": "Mad",
|
||||
"minx": "Minx",
|
||||
"paradox press": "Paradox Press",
|
||||
"piranha press": "Piranha Press",
|
||||
"sandman universe": "Sandman Universe",
|
||||
"tangent comics": "Tangent Comics",
|
||||
"tsr": "TSR",
|
||||
"vertigo": "Vertigo",
|
||||
"wildstorm productions": "WildStorm Productions",
|
||||
"wildstorm signature": "WildStorm Productions",
|
||||
"wildstorm": "WildStorm Productions",
|
||||
"wonder comics": "Wonder Comics",
|
||||
"young animal": "Young Animal",
|
||||
"zuda comics": "Zuda Comics",
|
||||
"zuda": "Zuda Comics"
|
||||
},
|
||||
|
||||
"Dark Horse Comics":{
|
||||
"berger books": "Berger Books",
|
||||
"comics' greatest world": "Dark Horse Heroes",
|
||||
"dark horse digital": "Dark Horse Digital",
|
||||
"dark horse heroes": "Dark Horse Heroes",
|
||||
"dark horse manga": "Dark Horse Manga",
|
||||
"dh deluxe": "DH Deluxe",
|
||||
"dh press": "DH Press",
|
||||
"kitchen sink books": "Kitchen Sink Books",
|
||||
"legend": "Legend",
|
||||
"m press": "M Press",
|
||||
"maverick": "Maverick"
|
||||
},
|
||||
|
||||
"Archie Comics":{
|
||||
"archie action": "Archie Action",
|
||||
"archie adventure Series": "Archie Adventure Series",
|
||||
"archie horror": "Archie Horror",
|
||||
"dark circle Comics": "Dark Circle Comics",
|
||||
"dark circle": "Dark Circle Comics",
|
||||
"mighty comics Group": "Mighty Comics Group",
|
||||
"radio comics": "Mighty Comics Group",
|
||||
"red circle Comics": "Dark Circle Comics",
|
||||
"red circle": "Dark Circle Comics"
|
||||
},
|
||||
|
||||
"Image Comics": {
|
||||
"Image": "",
|
||||
"avalon studios": "Avalon Studios",
|
||||
"desperado publishing": "Desperado Publishing",
|
||||
"extreme studios": "Extreme Studios",
|
||||
"gorilla comics": "Gorilla Comics",
|
||||
"highbrow entertainment": "Highbrow Entertainment",
|
||||
"shadowline": "Shadowline",
|
||||
"skybound entertainment": "Skybound Entertainment",
|
||||
"todd mcfarlane productions": "Todd McFarlane Productions",
|
||||
"top cow productions": "Top Cow Productions"
|
||||
}
|
||||
}
|
||||
419
comicapi/filenamelexer.py
Normal file
419
comicapi/filenamelexer.py
Normal file
@@ -0,0 +1,419 @@
|
||||
# Extracted and mutilated from https://github.com/lordwelch/wsfmt
|
||||
# Which was extracted and mutilated from https://github.com/golang/go/tree/master/src/text/template/parse
|
||||
from __future__ import annotations
|
||||
|
||||
import calendar
|
||||
import os
|
||||
import unicodedata
|
||||
from enum import Enum, auto
|
||||
from itertools import chain
|
||||
from typing import Any, Callable, Protocol
|
||||
|
||||
|
||||
class ItemType(Enum):
|
||||
Error = auto() # Error occurred; value is text of error
|
||||
EOF = auto()
|
||||
Text = auto() # Text
|
||||
LeftParen = auto()
|
||||
Number = auto() # Simple number
|
||||
IssueNumber = auto() # Preceded by a # Symbol
|
||||
RightParen = auto()
|
||||
Space = auto() # Run of spaces separating arguments
|
||||
Dot = auto()
|
||||
LeftBrace = auto()
|
||||
RightBrace = auto()
|
||||
LeftSBrace = auto()
|
||||
RightSBrace = auto()
|
||||
Symbol = auto()
|
||||
Skip = auto() # __ or -- no title, issue or series information beyond
|
||||
Operator = auto()
|
||||
Calendar = auto()
|
||||
InfoSpecifier = auto() # Specifies type of info e.g. v1 for 'volume': 1
|
||||
ArchiveType = auto()
|
||||
Honorific = auto()
|
||||
Publisher = auto()
|
||||
Keywords = auto()
|
||||
FCBD = auto()
|
||||
ComicType = auto()
|
||||
C2C = auto()
|
||||
|
||||
|
||||
braces = [
|
||||
ItemType.LeftBrace,
|
||||
ItemType.LeftParen,
|
||||
ItemType.LeftSBrace,
|
||||
ItemType.RightBrace,
|
||||
ItemType.RightParen,
|
||||
ItemType.RightSBrace,
|
||||
]
|
||||
|
||||
eof = chr(0)
|
||||
|
||||
key = {
|
||||
"fcbd": ItemType.FCBD,
|
||||
"freecomicbookday": ItemType.FCBD,
|
||||
"cbr": ItemType.ArchiveType,
|
||||
"cbz": ItemType.ArchiveType,
|
||||
"cbt": ItemType.ArchiveType,
|
||||
"cb7": ItemType.ArchiveType,
|
||||
"rar": ItemType.ArchiveType,
|
||||
"zip": ItemType.ArchiveType,
|
||||
"tar": ItemType.ArchiveType,
|
||||
"7z": ItemType.ArchiveType,
|
||||
"annual": ItemType.ComicType,
|
||||
"volume": ItemType.InfoSpecifier,
|
||||
"vol.": ItemType.InfoSpecifier,
|
||||
"vol": ItemType.InfoSpecifier,
|
||||
"v": ItemType.InfoSpecifier,
|
||||
"of": ItemType.InfoSpecifier,
|
||||
"dc": ItemType.Publisher,
|
||||
"marvel": ItemType.Publisher,
|
||||
"covers": ItemType.InfoSpecifier,
|
||||
"c2c": ItemType.C2C,
|
||||
"mr": ItemType.Honorific,
|
||||
"ms": ItemType.Honorific,
|
||||
"mrs": ItemType.Honorific,
|
||||
"dr": ItemType.Honorific,
|
||||
}
|
||||
|
||||
|
||||
class Item:
|
||||
def __init__(self, typ: ItemType, pos: int, val: str) -> None:
|
||||
self.typ: ItemType = typ
|
||||
self.pos: int = pos
|
||||
self.val: str = val
|
||||
self.no_space = False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.val}: index: {self.pos}: {self.typ}"
|
||||
|
||||
|
||||
class LexerFunc(Protocol):
|
||||
def __call__(self, __origin: Lexer) -> LexerFunc | None: ...
|
||||
|
||||
|
||||
class Lexer:
|
||||
def __init__(self, string: str, allow_issue_start_with_letter: bool = False) -> None:
|
||||
self.input: str = string # The string being scanned
|
||||
# The next lexing function to enter
|
||||
self.state: LexerFunc | None = None
|
||||
self.pos: int = -1 # Current position in the input
|
||||
self.start: int = 0 # Start position of this item
|
||||
self.lastPos: int = 0 # Position of most recent item returned by nextItem
|
||||
self.paren_depth: int = 0 # Nesting depth of ( ) exprs
|
||||
self.brace_depth: int = 0 # Nesting depth of { }
|
||||
self.sbrace_depth: int = 0 # Nesting depth of [ ]
|
||||
self.items: list[Item] = []
|
||||
self.allow_issue_start_with_letter = allow_issue_start_with_letter
|
||||
|
||||
# Next returns the next rune in the input.
|
||||
def get(self) -> str:
|
||||
if int(self.pos) >= len(self.input) - 1:
|
||||
self.pos += 1
|
||||
return eof
|
||||
|
||||
self.pos += 1
|
||||
return self.input[self.pos]
|
||||
|
||||
# Peek returns but does not consume the next rune in the input.
|
||||
def peek(self) -> str:
|
||||
if int(self.pos) >= len(self.input) - 1:
|
||||
return eof
|
||||
|
||||
return self.input[self.pos + 1]
|
||||
|
||||
def backup(self) -> None:
|
||||
self.pos -= 1
|
||||
|
||||
# Emit passes an item back to the client.
|
||||
def emit(self, t: ItemType) -> None:
|
||||
self.items.append(Item(t, self.start, self.input[self.start : self.pos + 1]))
|
||||
self.start = self.pos + 1
|
||||
|
||||
# Ignore skips over the pending input before this point.
|
||||
def ignore(self) -> None:
|
||||
self.start = self.pos
|
||||
|
||||
# Accept consumes the next rune if it's from the valid se:
|
||||
def accept(self, valid: str | Callable[[str], bool]) -> bool:
|
||||
if isinstance(valid, str):
|
||||
if self.get() in valid:
|
||||
return True
|
||||
else:
|
||||
if valid(self.get()):
|
||||
return True
|
||||
|
||||
self.backup()
|
||||
return False
|
||||
|
||||
# AcceptRun consumes a run of runes from the valid set.
|
||||
def accept_run(self, valid: str | Callable[[str], bool]) -> bool:
|
||||
initial = self.pos
|
||||
if isinstance(valid, str):
|
||||
while self.get() in valid:
|
||||
continue
|
||||
else:
|
||||
while valid(self.get()):
|
||||
continue
|
||||
|
||||
self.backup()
|
||||
return initial != self.pos
|
||||
|
||||
def scan_number(self) -> bool:
|
||||
digits = "0123456789.,"
|
||||
|
||||
if not self.accept_run(lambda x: x.isnumeric() or x in digits):
|
||||
return False
|
||||
if self.input[self.pos] == ".":
|
||||
self.backup()
|
||||
self.accept_run(str.isalpha)
|
||||
|
||||
return True
|
||||
|
||||
# Runs the state machine for the lexer.
|
||||
def run(self) -> None:
|
||||
self.state = lex_filename
|
||||
while self.state is not None:
|
||||
self.state = self.state(self)
|
||||
|
||||
|
||||
# Errorf returns an error token and terminates the scan by passing
|
||||
# Back a nil pointer that will be the next state, terminating self.nextItem.
|
||||
def errorf(lex: Lexer, message: str) -> Any:
|
||||
lex.items.append(Item(ItemType.Error, lex.start, message))
|
||||
return None
|
||||
|
||||
|
||||
# Scans the elements inside action delimiters.
|
||||
def lex_filename(lex: Lexer) -> LexerFunc | None:
|
||||
r = lex.get()
|
||||
if r == eof:
|
||||
if lex.paren_depth != 0:
|
||||
errorf(lex, "unclosed left paren")
|
||||
return None
|
||||
|
||||
if lex.brace_depth != 0:
|
||||
errorf(lex, "unclosed left paren")
|
||||
return None
|
||||
lex.emit(ItemType.EOF)
|
||||
return None
|
||||
elif is_space(r):
|
||||
if r == "_" and lex.peek() == "_":
|
||||
lex.get()
|
||||
lex.emit(ItemType.Skip)
|
||||
else:
|
||||
return lex_space
|
||||
elif r == ".":
|
||||
r = lex.peek()
|
||||
if r.isnumeric() and lex.pos > 0 and is_space(lex.input[lex.pos - 1]):
|
||||
return lex_number
|
||||
lex.emit(ItemType.Dot)
|
||||
return lex_filename
|
||||
elif r == "'":
|
||||
r = lex.peek()
|
||||
if r.isdigit():
|
||||
return lex_number
|
||||
if is_symbol(r):
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
else:
|
||||
return lex_text
|
||||
elif r.isnumeric():
|
||||
lex.backup()
|
||||
return lex_number
|
||||
elif r == "#":
|
||||
if lex.allow_issue_start_with_letter and is_alpha_numeric(lex.peek()):
|
||||
return lex_issue_number
|
||||
elif lex.peek().isnumeric() or lex.peek() in "-+.":
|
||||
return lex_issue_number
|
||||
lex.emit(ItemType.Symbol)
|
||||
elif is_operator(r):
|
||||
if r == "-" and lex.peek() == "-":
|
||||
lex.get()
|
||||
lex.emit(ItemType.Skip)
|
||||
else:
|
||||
return lex_operator
|
||||
elif is_alpha_numeric(r):
|
||||
lex.backup()
|
||||
return lex_text
|
||||
elif r == "(":
|
||||
lex.emit(ItemType.LeftParen)
|
||||
lex.paren_depth += 1
|
||||
elif r == ")":
|
||||
lex.emit(ItemType.RightParen)
|
||||
lex.paren_depth -= 1
|
||||
if lex.paren_depth < 0:
|
||||
errorf(lex, "unexpected right paren " + r)
|
||||
return None
|
||||
|
||||
elif r == "{":
|
||||
lex.emit(ItemType.LeftBrace)
|
||||
lex.brace_depth += 1
|
||||
elif r == "}":
|
||||
lex.emit(ItemType.RightBrace)
|
||||
lex.brace_depth -= 1
|
||||
if lex.brace_depth < 0:
|
||||
errorf(lex, "unexpected right brace " + r)
|
||||
return None
|
||||
|
||||
elif r == "[":
|
||||
lex.emit(ItemType.LeftSBrace)
|
||||
lex.sbrace_depth += 1
|
||||
elif r == "]":
|
||||
lex.emit(ItemType.RightSBrace)
|
||||
lex.sbrace_depth -= 1
|
||||
if lex.sbrace_depth < 0:
|
||||
errorf(lex, "unexpected right brace " + r)
|
||||
return None
|
||||
elif is_symbol(r):
|
||||
if unicodedata.category(r) == "Sc":
|
||||
return lex_currency
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
else:
|
||||
errorf(lex, "unrecognized character in action: " + repr(r))
|
||||
return None
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_currency(lex: Lexer) -> LexerFunc:
|
||||
orig = lex.pos
|
||||
lex.accept_run(is_space)
|
||||
if lex.peek().isnumeric():
|
||||
return lex_number
|
||||
else:
|
||||
lex.pos = orig
|
||||
# We don't have a number with this currency symbol. Don't treat it special
|
||||
lex.emit(ItemType.Symbol)
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_operator(lex: Lexer) -> LexerFunc:
|
||||
lex.accept_run("-|:;")
|
||||
lex.emit(ItemType.Operator)
|
||||
return lex_filename
|
||||
|
||||
|
||||
# LexSpace scans a run of space characters.
|
||||
# One space has already been seen.
|
||||
def lex_space(lex: Lexer) -> LexerFunc:
|
||||
lex.accept_run(is_space)
|
||||
|
||||
lex.emit(ItemType.Space)
|
||||
return lex_filename
|
||||
|
||||
|
||||
# Lex_text scans an alphanumeric.
|
||||
def lex_text(lex: Lexer) -> LexerFunc:
|
||||
while True:
|
||||
r = lex.get()
|
||||
if is_alpha_numeric(r) or r in "'":
|
||||
if r.isnumeric(): # E.g. v1
|
||||
word = lex.input[lex.start : lex.pos]
|
||||
if key.get(word.casefold(), None) == ItemType.InfoSpecifier:
|
||||
lex.backup()
|
||||
lex.emit(key[word.casefold()])
|
||||
return lex_filename
|
||||
else:
|
||||
lex.backup()
|
||||
word = lex.input[lex.start : lex.pos + 1]
|
||||
|
||||
if word.casefold() in key:
|
||||
if key[word.casefold()] in (ItemType.Honorific, ItemType.InfoSpecifier):
|
||||
lex.accept(".")
|
||||
lex.emit(key[word.casefold()])
|
||||
elif cal(word):
|
||||
lex.emit(ItemType.Calendar)
|
||||
else:
|
||||
lex.emit(ItemType.Text)
|
||||
break
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def cal(value: str) -> bool:
|
||||
return value.title() in set(chain(calendar.month_abbr, calendar.month_name, calendar.day_abbr, calendar.day_name))
|
||||
|
||||
|
||||
def lex_number(lex: Lexer) -> LexerFunc | None:
|
||||
if not lex.scan_number():
|
||||
return errorf(lex, "bad number syntax: " + lex.input[lex.start : lex.pos])
|
||||
# Complex number logic removed. Messes with math operations without space
|
||||
|
||||
if lex.input[lex.start] == "#":
|
||||
lex.emit(ItemType.IssueNumber)
|
||||
elif not lex.input[lex.pos].isnumeric():
|
||||
# Assume that 80th is just text and not a number
|
||||
lex.emit(ItemType.Text)
|
||||
else:
|
||||
# Used to check for a '$'
|
||||
endNumber = lex.pos
|
||||
|
||||
# Consume any spaces
|
||||
lex.accept_run(is_space)
|
||||
|
||||
# This number starts with a '$' emit it as Text instead of a Number
|
||||
if "Sc" == unicodedata.category(lex.input[lex.start]):
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Text)
|
||||
|
||||
# This number ends in a '$' if there is a number on the other side we assume it belongs to the following number
|
||||
elif "Sc" == unicodedata.category(lex.get()):
|
||||
# Store the end of the number '$'. We still need to check to see if there is a number coming up
|
||||
endCurrency = lex.pos
|
||||
# Consume any spaces
|
||||
lex.accept_run(is_space)
|
||||
|
||||
# This is a number
|
||||
if lex.peek().isnumeric():
|
||||
# We go back to the original number before the '$' and emit a number
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Number)
|
||||
else:
|
||||
# There was no following number, reset to the '$' and emit a number
|
||||
lex.pos = endCurrency
|
||||
lex.emit(ItemType.Text)
|
||||
else:
|
||||
# We go back to the original number there is no '$'
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Number)
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_issue_number(lex: Lexer) -> LexerFunc:
|
||||
# Only called when lex.input[lex.start] == "#"
|
||||
original_start = lex.pos
|
||||
lex.accept_run(str.isalpha)
|
||||
|
||||
if lex.peek().isnumeric():
|
||||
return lex_number
|
||||
else:
|
||||
lex.pos = original_start
|
||||
lex.emit(ItemType.Symbol)
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def is_space(character: str) -> bool:
|
||||
return character in "_ \t"
|
||||
|
||||
|
||||
# IsAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
def is_alpha_numeric(character: str) -> bool:
|
||||
return character.isalpha() or character.isnumeric()
|
||||
|
||||
|
||||
def is_operator(character: str) -> bool:
|
||||
return character in "-|:;/\\"
|
||||
|
||||
|
||||
def is_symbol(character: str) -> bool:
|
||||
return unicodedata.category(character)[0] in "PS" and character != "."
|
||||
|
||||
|
||||
def Lex(filename: str, allow_issue_start_with_letter: bool = False) -> Lexer:
|
||||
lex = Lexer(os.path.basename(filename), allow_issue_start_with_letter)
|
||||
lex.run()
|
||||
return lex
|
||||
1280
comicapi/filenameparser.py
Normal file
1280
comicapi/filenameparser.py
Normal file
File diff suppressed because it is too large
Load Diff
884
comicapi/genericmetadata.py
Normal file
884
comicapi/genericmetadata.py
Normal file
@@ -0,0 +1,884 @@
|
||||
"""A class for internal metadata storage
|
||||
|
||||
The goal of this class is to handle ALL the data that might come from various
|
||||
tagging schemes and databases, such as ComicVine or GCD. This makes conversion
|
||||
possible, however lossy it might be
|
||||
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import dataclasses
|
||||
import hashlib
|
||||
import logging
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING, Any, Union, overload
|
||||
|
||||
from typing_extensions import NamedTuple
|
||||
|
||||
from comicapi import merge, utils
|
||||
from comicapi._url import Url, parse_url
|
||||
from comicapi.utils import norm_fold
|
||||
|
||||
# needed for runtime type guessing
|
||||
if TYPE_CHECKING:
|
||||
Union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
REMOVE = object()
|
||||
|
||||
|
||||
Credit = merge.Credit
|
||||
|
||||
|
||||
class PageType(merge.StrEnum):
|
||||
"""
|
||||
These page info classes are exactly the same as the CIX scheme, since
|
||||
it's unique
|
||||
"""
|
||||
|
||||
FrontCover = "FrontCover"
|
||||
InnerCover = "InnerCover"
|
||||
Roundup = "Roundup"
|
||||
Story = "Story"
|
||||
Advertisement = "Advertisement"
|
||||
Editorial = "Editorial"
|
||||
Letters = "Letters"
|
||||
Preview = "Preview"
|
||||
BackCover = "BackCover"
|
||||
Other = "Other"
|
||||
Deleted = "Deleted"
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PageMetadata:
|
||||
filename: str
|
||||
type: str
|
||||
bookmark: str
|
||||
display_index: int
|
||||
archive_index: int
|
||||
# These are optional because getting this info requires reading in each page
|
||||
double_page: bool | None = None
|
||||
byte_size: int | None = None
|
||||
height: int | None = None
|
||||
width: int | None = None
|
||||
|
||||
def set_type(self, value: str) -> None:
|
||||
values = {x.casefold(): x for x in PageType}
|
||||
self.type = values.get(value.casefold(), value)
|
||||
|
||||
def is_double_page(self) -> bool:
|
||||
w = self.width or 0
|
||||
h = self.height or 0
|
||||
return self.double_page or (w >= h and w > 0 and h > 0)
|
||||
|
||||
def __lt__(self, other: Any) -> bool:
|
||||
if not isinstance(other, PageMetadata):
|
||||
return False
|
||||
return self.archive_index < other.archive_index
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
if not isinstance(other, PageMetadata):
|
||||
return False
|
||||
return self.archive_index == other.archive_index
|
||||
|
||||
def _get_clean_metadata(self, *attributes: str) -> PageMetadata:
|
||||
return PageMetadata(
|
||||
filename=self.filename if "filename" in attributes else "",
|
||||
type=self.type if "type" in attributes else "",
|
||||
bookmark=self.bookmark if "bookmark" in attributes else "",
|
||||
display_index=self.display_index if "display_index" in attributes else 0,
|
||||
archive_index=self.archive_index if "archive_index" in attributes else 0,
|
||||
double_page=self.double_page if "double_page" in attributes else None,
|
||||
byte_size=self.byte_size if "byte_size" in attributes else None,
|
||||
height=self.height if "height" in attributes else None,
|
||||
width=self.width if "width" in attributes else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ComicSeries:
|
||||
id: str
|
||||
name: str
|
||||
aliases: set[str]
|
||||
count_of_issues: int | None
|
||||
count_of_volumes: int | None
|
||||
description: str
|
||||
image_url: str
|
||||
publisher: str
|
||||
start_year: int | None
|
||||
format: str | None
|
||||
|
||||
def copy(self) -> ComicSeries:
|
||||
return copy.deepcopy(self)
|
||||
|
||||
|
||||
class MetadataOrigin(NamedTuple):
|
||||
id: str
|
||||
name: str
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.name
|
||||
|
||||
|
||||
class ImageHash(NamedTuple):
|
||||
"""
|
||||
A valid ImageHash requires at a minimum a Hash and Kind or a URL
|
||||
If only a URL is given, it will be used for cover matching otherwise Hash is used
|
||||
The URL is also required for the GUI to display covers
|
||||
Available Kind's are "ahash" and "phash"
|
||||
"""
|
||||
|
||||
Hash: int
|
||||
Kind: str
|
||||
URL: str
|
||||
|
||||
|
||||
class FileHash(NamedTuple):
|
||||
name: str
|
||||
hash: str
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.name + ":" + self.hash
|
||||
|
||||
@classmethod
|
||||
def parse(cls, string: str) -> FileHash:
|
||||
name, _, parsed_hash = string.partition(":")
|
||||
if name in hashlib.algorithms_available:
|
||||
return FileHash(name, parsed_hash)
|
||||
return FileHash("", "")
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return all(self)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class GenericMetadata:
|
||||
writer_synonyms = ("writer", "plotter", "scripter", "script")
|
||||
penciller_synonyms = ("artist", "penciller", "penciler", "breakdowns", "pencils", "painting")
|
||||
inker_synonyms = ("inker", "artist", "finishes", "inks", "painting")
|
||||
colorist_synonyms = ("colorist", "colourist", "colorer", "colourer", "colors", "painting")
|
||||
letterer_synonyms = ("letterer", "letters")
|
||||
cover_synonyms = ("cover", "covers", "coverartist", "cover artist")
|
||||
editor_synonyms = ("editor", "edits", "editing")
|
||||
translator_synonyms = ("translator", "translation")
|
||||
|
||||
is_empty: bool = True
|
||||
data_origin: MetadataOrigin | None = None
|
||||
issue_id: str | None = None
|
||||
series_id: str | None = None
|
||||
original_hash: FileHash | None = None
|
||||
|
||||
series: str | None = None
|
||||
series_aliases: set[str] = dataclasses.field(default_factory=set)
|
||||
issue: str | None = None
|
||||
issue_count: int | None = None
|
||||
title: str | None = None
|
||||
title_aliases: set[str] = dataclasses.field(default_factory=set)
|
||||
volume: int | None = None
|
||||
volume_count: int | None = None
|
||||
genres: set[str] = dataclasses.field(default_factory=set)
|
||||
description: str | None = None # use same way as Summary in CIX
|
||||
notes: str | None = None
|
||||
|
||||
alternate_series: str | None = None
|
||||
alternate_number: str | None = None
|
||||
alternate_count: int | None = None
|
||||
story_arcs: list[str] = dataclasses.field(default_factory=list)
|
||||
series_groups: list[str] = dataclasses.field(default_factory=list)
|
||||
|
||||
publisher: str | None = None
|
||||
imprint: str | None = None
|
||||
day: int | None = None
|
||||
month: int | None = None
|
||||
year: int | None = None
|
||||
language: str | None = None # 2 letter iso code
|
||||
country: str | None = None
|
||||
web_links: list[Url] = dataclasses.field(default_factory=list)
|
||||
format: str | None = None
|
||||
manga: str | None = None
|
||||
black_and_white: bool | None = None
|
||||
maturity_rating: str | None = None
|
||||
critical_rating: float | None = None # rating in CBL; CommunityRating in CIX
|
||||
scan_info: str | None = None
|
||||
|
||||
tags: set[str] = dataclasses.field(default_factory=set)
|
||||
pages: list[PageMetadata] = dataclasses.field(default_factory=list)
|
||||
page_count: int | None = None
|
||||
|
||||
characters: set[str] = dataclasses.field(default_factory=set)
|
||||
teams: set[str] = dataclasses.field(default_factory=set)
|
||||
locations: set[str] = dataclasses.field(default_factory=set)
|
||||
credits: list[Credit] = dataclasses.field(default_factory=list)
|
||||
|
||||
# Some CoMet-only items
|
||||
price: float | None = None
|
||||
is_version_of: str | None = None
|
||||
rights: str | None = None
|
||||
identifier: str | None = None
|
||||
last_mark: str | None = None
|
||||
|
||||
# urls to cover image, not generally part of the metadata
|
||||
_cover_image: ImageHash | None = None
|
||||
_alternate_images: list[ImageHash] = dataclasses.field(default_factory=list)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
for key, value in self.__dict__.items():
|
||||
if value and key != "is_empty":
|
||||
self.is_empty = False
|
||||
break
|
||||
|
||||
def copy(self) -> GenericMetadata:
|
||||
return copy.deepcopy(self)
|
||||
|
||||
def replace(self, /, **kwargs: Any) -> GenericMetadata:
|
||||
tmp = self.copy()
|
||||
tmp.__dict__.update(kwargs)
|
||||
return tmp
|
||||
|
||||
def _get_clean_metadata(self, *attributes: str) -> GenericMetadata:
|
||||
new_md = GenericMetadata()
|
||||
list_handled = []
|
||||
for attr in sorted(attributes):
|
||||
if "." in attr:
|
||||
lst, _, name = attr.partition(".")
|
||||
if lst in list_handled:
|
||||
continue
|
||||
old_value = getattr(self, lst)
|
||||
new_value = getattr(new_md, lst)
|
||||
if old_value:
|
||||
if hasattr(old_value[0], "_get_clean_metadata"):
|
||||
list_attributes = [x.removeprefix(lst + ".") for x in attributes if x.startswith(lst)]
|
||||
for x in old_value:
|
||||
new_value.append(x._get_clean_metadata(*list_attributes))
|
||||
list_handled.append(lst)
|
||||
continue
|
||||
if not new_value:
|
||||
for x in old_value:
|
||||
new_value.append(x.__class__())
|
||||
for i, x in enumerate(old_value):
|
||||
if isinstance(x, dict):
|
||||
if name in x:
|
||||
new_value[i][name] = x[name]
|
||||
else:
|
||||
setattr(new_value[i], name, getattr(x, name))
|
||||
|
||||
else:
|
||||
old_value = getattr(self, attr)
|
||||
if isinstance(old_value, list):
|
||||
continue
|
||||
setattr(new_md, attr, old_value)
|
||||
|
||||
new_md.__post_init__()
|
||||
return new_md
|
||||
|
||||
def overlay(
|
||||
self, new_md: GenericMetadata, mode: merge.Mode = merge.Mode.OVERLAY, merge_lists: bool = False
|
||||
) -> None:
|
||||
"""Overlay a new metadata object on this one"""
|
||||
|
||||
attribute_merge = merge.attribute[mode]
|
||||
list_merge = merge.lists[mode]
|
||||
|
||||
def assign(old: Any, new: Any, attribute_merge: Any = attribute_merge) -> Any:
|
||||
if new is REMOVE:
|
||||
return None
|
||||
|
||||
return attribute_merge(old, new)
|
||||
|
||||
def assign_list(old: list[Any] | set[Any], new: list[Any] | set[Any], list_merge: Any = list_merge) -> Any:
|
||||
if new is REMOVE:
|
||||
old.clear()
|
||||
return old
|
||||
if merge_lists:
|
||||
return list_merge(old, new)
|
||||
else:
|
||||
return assign(old, new)
|
||||
|
||||
if not new_md.is_empty:
|
||||
self.is_empty = False
|
||||
|
||||
self.data_origin = assign(self.data_origin, new_md.data_origin) # TODO use and purpose now?
|
||||
self.issue_id = assign(self.issue_id, new_md.issue_id)
|
||||
self.series_id = assign(self.series_id, new_md.series_id)
|
||||
|
||||
# This should not usually be set by a talker or other online datasource
|
||||
self.original_hash = assign(self.original_hash, new_md.original_hash)
|
||||
|
||||
self.series = assign(self.series, new_md.series)
|
||||
|
||||
self.series_aliases = assign_list(self.series_aliases, new_md.series_aliases)
|
||||
self.issue = assign(self.issue, new_md.issue)
|
||||
self.issue_count = assign(self.issue_count, new_md.issue_count)
|
||||
self.title = assign(self.title, new_md.title)
|
||||
self.title_aliases = assign_list(self.title_aliases, new_md.title_aliases)
|
||||
self.volume = assign(self.volume, new_md.volume)
|
||||
self.volume_count = assign(self.volume_count, new_md.volume_count)
|
||||
self.genres = assign_list(self.genres, new_md.genres)
|
||||
self.description = assign(self.description, new_md.description)
|
||||
self.notes = assign(self.notes, new_md.notes)
|
||||
|
||||
self.alternate_series = assign(self.alternate_series, new_md.alternate_series)
|
||||
self.alternate_number = assign(self.alternate_number, new_md.alternate_number)
|
||||
self.alternate_count = assign(self.alternate_count, new_md.alternate_count)
|
||||
self.story_arcs = assign_list(self.story_arcs, new_md.story_arcs)
|
||||
self.series_groups = assign_list(self.series_groups, new_md.series_groups)
|
||||
|
||||
self.publisher = assign(self.publisher, new_md.publisher)
|
||||
self.imprint = assign(self.imprint, new_md.imprint)
|
||||
self.day = assign(self.day, new_md.day)
|
||||
self.month = assign(self.month, new_md.month)
|
||||
self.year = assign(self.year, new_md.year)
|
||||
self.language = assign(self.language, new_md.language)
|
||||
self.country = assign(self.country, new_md.country)
|
||||
self.web_links = assign_list(self.web_links, new_md.web_links)
|
||||
self.format = assign(self.format, new_md.format)
|
||||
self.manga = assign(self.manga, new_md.manga)
|
||||
self.black_and_white = assign(self.black_and_white, new_md.black_and_white)
|
||||
self.maturity_rating = assign(self.maturity_rating, new_md.maturity_rating)
|
||||
self.critical_rating = assign(self.critical_rating, new_md.critical_rating)
|
||||
self.scan_info = assign(self.scan_info, new_md.scan_info)
|
||||
|
||||
self.tags = assign_list(self.tags, new_md.tags)
|
||||
|
||||
self.characters = assign_list(self.characters, new_md.characters)
|
||||
self.teams = assign_list(self.teams, new_md.teams)
|
||||
self.locations = assign_list(self.locations, new_md.locations)
|
||||
|
||||
# credits are added through add_credit so that some standard checks are observed
|
||||
# which means that we needs self.credits to be empty
|
||||
tmp_credits = self.credits
|
||||
self.credits = []
|
||||
for c in assign_list(tmp_credits, new_md.credits):
|
||||
self.add_credit(c)
|
||||
|
||||
self.price = assign(self.price, new_md.price)
|
||||
self.is_version_of = assign(self.is_version_of, new_md.is_version_of)
|
||||
self.rights = assign(self.rights, new_md.rights)
|
||||
self.identifier = assign(self.identifier, new_md.identifier)
|
||||
self.last_mark = assign(self.last_mark, new_md.last_mark)
|
||||
self._cover_image = assign(self._cover_image, new_md._cover_image)
|
||||
self._alternate_images = assign_list(self._alternate_images, new_md._alternate_images)
|
||||
|
||||
# pages doesn't get merged, if we did merge we would end up with duplicate pages
|
||||
self.pages = assign(self.pages, new_md.pages)
|
||||
self.page_count = assign(self.page_count, new_md.page_count)
|
||||
|
||||
def apply_default_page_list(self, page_list: Sequence[str]) -> None:
|
||||
"""apply a default page list, with the first page marked as the cover"""
|
||||
|
||||
# Create a dictionary in the weird case that the metadata doesn't match the archive
|
||||
pages = {p.archive_index: p for p in self.pages}
|
||||
cover_set = False
|
||||
|
||||
# It might be a good idea to validate that each page in `pages` is found in page_list
|
||||
for i, filename in enumerate(page_list):
|
||||
page = pages.get(i, PageMetadata(archive_index=i, display_index=i, filename="", type="", bookmark=""))
|
||||
page.filename = filename
|
||||
pages[i] = page
|
||||
|
||||
# Check if we know what the cover is
|
||||
cover_set = page.type == PageType.FrontCover or cover_set
|
||||
self.pages = sorted(pages.values())
|
||||
|
||||
self.page_count = len(self.pages)
|
||||
if self.page_count != len(page_list):
|
||||
logger.warning("Wrong count of pages: expected %d got %d", len(self.pages), len(page_list))
|
||||
# Set the cover to the first image acording to hte display index if we don't know what the cover is
|
||||
if not cover_set:
|
||||
first_page = self.get_archive_page_index(0)
|
||||
self.pages[first_page].type = PageType.FrontCover
|
||||
|
||||
def get_archive_page_index(self, pagenum: int) -> int:
|
||||
"""convert the displayed page number to the page index of the file in the archive"""
|
||||
if pagenum < len(self.pages):
|
||||
return int(sorted(self.pages, key=lambda p: p.display_index)[pagenum].archive_index)
|
||||
|
||||
return 0
|
||||
|
||||
def get_cover_page_index_list(self) -> list[int]:
|
||||
# return a list of archive page indices of cover pages
|
||||
if not self.pages:
|
||||
return [0]
|
||||
coverlist = []
|
||||
for p in self.pages:
|
||||
if p.type == PageType.FrontCover:
|
||||
coverlist.append(p.archive_index)
|
||||
|
||||
if len(coverlist) == 0:
|
||||
coverlist.append(self.get_archive_page_index(0))
|
||||
|
||||
return coverlist
|
||||
|
||||
@overload
|
||||
def add_credit(self, person: Credit) -> None: ...
|
||||
|
||||
@overload
|
||||
def add_credit(self, person: str, role: str, primary: bool = False, language: str = "") -> None: ...
|
||||
|
||||
def add_credit(
|
||||
self, person: str | Credit, role: str | None = None, primary: bool = False, language: str = ""
|
||||
) -> None:
|
||||
|
||||
credit: Credit
|
||||
if isinstance(person, Credit):
|
||||
credit = person
|
||||
else:
|
||||
assert role is not None
|
||||
credit = Credit(person=person, role=role, primary=primary, language=language)
|
||||
|
||||
if credit.role is None:
|
||||
raise TypeError("GenericMetadata.add_credit takes either a Credit object or a person name and role")
|
||||
if credit.person == "":
|
||||
return
|
||||
|
||||
person = norm_fold(credit.person)
|
||||
role = norm_fold(credit.role)
|
||||
|
||||
# look to see if it's not already there...
|
||||
found = False
|
||||
for c in self.credits:
|
||||
if norm_fold(c.person) == person and norm_fold(c.role) == role:
|
||||
# no need to add it. just adjust the "primary" flag as needed
|
||||
c.primary = c.primary or primary
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
self.credits.append(credit)
|
||||
|
||||
def get_primary_credit(self, role: str) -> str:
|
||||
primary = ""
|
||||
for credit in self.credits:
|
||||
if (primary == "" and credit.role.casefold() == role.casefold()) or (
|
||||
credit.role.casefold() == role.casefold() and credit.primary
|
||||
):
|
||||
primary = credit.person
|
||||
return primary
|
||||
|
||||
def __str__(self) -> str:
|
||||
vals: list[tuple[str, Any]] = []
|
||||
if self.is_empty:
|
||||
return "No metadata"
|
||||
|
||||
def add_string(tag: str, val: Any) -> None:
|
||||
if isinstance(val, (Sequence, set)):
|
||||
if val:
|
||||
vals.append((tag, val))
|
||||
elif val is not None:
|
||||
vals.append((tag, val))
|
||||
|
||||
add_string("data_origin", self.data_origin)
|
||||
add_string("series", self.series)
|
||||
add_string("original_hash", self.original_hash)
|
||||
add_string("series_aliases", ",".join(self.series_aliases))
|
||||
add_string("issue", self.issue)
|
||||
add_string("issue_count", self.issue_count)
|
||||
add_string("title", self.title)
|
||||
add_string("title_aliases", ",".join(self.title_aliases))
|
||||
add_string("publisher", self.publisher)
|
||||
add_string("year", self.year)
|
||||
add_string("month", self.month)
|
||||
add_string("day", self.day)
|
||||
add_string("volume", self.volume)
|
||||
add_string("volume_count", self.volume_count)
|
||||
add_string("genres", ", ".join(self.genres))
|
||||
add_string("language", self.language)
|
||||
add_string("country", self.country)
|
||||
add_string("critical_rating", self.critical_rating)
|
||||
add_string("alternate_series", self.alternate_series)
|
||||
add_string("alternate_number", self.alternate_number)
|
||||
add_string("alternate_count", self.alternate_count)
|
||||
add_string("imprint", self.imprint)
|
||||
add_string("web_links", [str(x) for x in self.web_links])
|
||||
add_string("format", self.format)
|
||||
add_string("manga", self.manga)
|
||||
|
||||
add_string("price", self.price)
|
||||
add_string("is_version_of", self.is_version_of)
|
||||
add_string("rights", self.rights)
|
||||
add_string("identifier", self.identifier)
|
||||
add_string("last_mark", self.last_mark)
|
||||
|
||||
if self.black_and_white:
|
||||
add_string("black_and_white", self.black_and_white)
|
||||
add_string("maturity_rating", self.maturity_rating)
|
||||
add_string("story_arcs", self.story_arcs)
|
||||
add_string("series_groups", self.series_groups)
|
||||
add_string("scan_info", self.scan_info)
|
||||
add_string("characters", ", ".join(self.characters))
|
||||
add_string("teams", ", ".join(self.teams))
|
||||
add_string("locations", ", ".join(self.locations))
|
||||
add_string("description", self.description)
|
||||
add_string("notes", self.notes)
|
||||
|
||||
add_string("tags", ", ".join(self.tags))
|
||||
|
||||
for c in self.credits:
|
||||
primary = ""
|
||||
if c.primary:
|
||||
primary = " [P]"
|
||||
add_string("credit", f"{c}{primary}")
|
||||
|
||||
# find the longest field name
|
||||
flen = 0
|
||||
for i in vals:
|
||||
flen = max(flen, len(i[0]))
|
||||
flen += 1
|
||||
|
||||
# format the data nicely
|
||||
outstr = ""
|
||||
fmt_str = "{0: <" + str(flen) + "} {1}\n"
|
||||
for i in vals:
|
||||
outstr += fmt_str.format(i[0] + ":", i[1])
|
||||
|
||||
return outstr
|
||||
|
||||
def fix_publisher(self) -> None:
|
||||
if self.publisher is None:
|
||||
return
|
||||
if self.imprint is None:
|
||||
self.imprint = ""
|
||||
|
||||
imprint, publisher = utils.get_publisher(self.publisher)
|
||||
|
||||
self.publisher = publisher
|
||||
|
||||
if self.imprint.casefold() in publisher.casefold():
|
||||
self.imprint = None
|
||||
|
||||
if self.imprint is None or self.imprint == "":
|
||||
self.imprint = imprint
|
||||
elif self.imprint.casefold() in imprint.casefold():
|
||||
self.imprint = imprint
|
||||
|
||||
|
||||
md_test: GenericMetadata = GenericMetadata(
|
||||
is_empty=False,
|
||||
data_origin=MetadataOrigin("comicvine", "Comic Vine"),
|
||||
series="Cory Doctorow's Futuristic Tales of the Here and Now",
|
||||
series_id="23437",
|
||||
issue="1",
|
||||
issue_id="140529",
|
||||
title="Anda's Game",
|
||||
publisher="IDW Publishing",
|
||||
month=10,
|
||||
year=2007,
|
||||
day=1,
|
||||
issue_count=6,
|
||||
volume=1,
|
||||
genres={"Sci-Fi"},
|
||||
language="en",
|
||||
description=(
|
||||
"For 12-year-old Anda, getting paid real money to kill the characters of players who were cheating"
|
||||
" in her favorite online computer game was a win-win situation. Until she found out who was paying her,"
|
||||
" and what those characters meant to the livelihood of children around the world."
|
||||
),
|
||||
volume_count=None,
|
||||
critical_rating=3.0,
|
||||
country=None,
|
||||
alternate_series="Tales",
|
||||
alternate_number="2",
|
||||
alternate_count=7,
|
||||
imprint="craphound.com",
|
||||
notes="Tagged with ComicTagger 1.3.2a5 using info from Comic Vine on 2022-04-16 15:52:26. [Issue ID 140529]",
|
||||
web_links=[
|
||||
parse_url("https://comicvine.gamespot.com/cory-doctorows-futuristic-tales-of-the-here-and-no/4000-140529/")
|
||||
],
|
||||
format="Series",
|
||||
manga="No",
|
||||
black_and_white=None,
|
||||
page_count=24,
|
||||
maturity_rating="Everyone 10+",
|
||||
story_arcs=["Here and Now"],
|
||||
series_groups=["Futuristic Tales"],
|
||||
scan_info="(CC BY-NC-SA 3.0)",
|
||||
characters={"Anda"},
|
||||
teams={"Fahrenheit"},
|
||||
locations=set(utils.split("lonely cottage ", ",")),
|
||||
credits=[
|
||||
Credit(primary=False, person="Dara Naraghi", role="Writer"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Penciller"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Inker"),
|
||||
Credit(primary=False, person="Neil Uyetake", role="Letterer"),
|
||||
Credit(primary=False, person="Sam Kieth", role="Cover"),
|
||||
Credit(primary=False, person="Ted Adams", role="Editor"),
|
||||
],
|
||||
tags=set(),
|
||||
pages=[
|
||||
PageMetadata(
|
||||
archive_index=0,
|
||||
display_index=0,
|
||||
height=1280,
|
||||
byte_size=195977,
|
||||
width=800,
|
||||
type=PageType.FrontCover,
|
||||
filename="!cover.jpg",
|
||||
bookmark="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=1,
|
||||
display_index=1,
|
||||
height=2039,
|
||||
byte_size=611993,
|
||||
width=1327,
|
||||
filename="01.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=2,
|
||||
display_index=2,
|
||||
height=2039,
|
||||
byte_size=783726,
|
||||
width=1327,
|
||||
filename="02.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=3,
|
||||
display_index=3,
|
||||
height=2039,
|
||||
byte_size=679584,
|
||||
width=1327,
|
||||
filename="03.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=4,
|
||||
display_index=4,
|
||||
height=2039,
|
||||
byte_size=788179,
|
||||
width=1327,
|
||||
filename="04.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=5,
|
||||
display_index=5,
|
||||
height=2039,
|
||||
byte_size=864433,
|
||||
width=1327,
|
||||
filename="05.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=6,
|
||||
display_index=6,
|
||||
height=2039,
|
||||
byte_size=765606,
|
||||
width=1327,
|
||||
filename="06.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=7,
|
||||
display_index=7,
|
||||
height=2039,
|
||||
byte_size=876427,
|
||||
width=1327,
|
||||
filename="07.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=8,
|
||||
display_index=8,
|
||||
height=2039,
|
||||
byte_size=852622,
|
||||
width=1327,
|
||||
filename="08.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=9,
|
||||
display_index=9,
|
||||
height=2039,
|
||||
byte_size=800205,
|
||||
width=1327,
|
||||
filename="09.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=10,
|
||||
display_index=10,
|
||||
height=2039,
|
||||
byte_size=746243,
|
||||
width=1326,
|
||||
filename="10.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=11,
|
||||
display_index=11,
|
||||
height=2039,
|
||||
byte_size=718062,
|
||||
width=1327,
|
||||
filename="11.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=12,
|
||||
display_index=12,
|
||||
height=2039,
|
||||
byte_size=532179,
|
||||
width=1326,
|
||||
filename="12.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=13,
|
||||
display_index=13,
|
||||
height=2039,
|
||||
byte_size=686708,
|
||||
width=1327,
|
||||
filename="13.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=14,
|
||||
display_index=14,
|
||||
height=2039,
|
||||
byte_size=641907,
|
||||
width=1327,
|
||||
filename="14.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=15,
|
||||
display_index=15,
|
||||
height=2039,
|
||||
byte_size=805388,
|
||||
width=1327,
|
||||
filename="15.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=16,
|
||||
display_index=16,
|
||||
height=2039,
|
||||
byte_size=668927,
|
||||
width=1326,
|
||||
filename="16.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=17,
|
||||
display_index=17,
|
||||
height=2039,
|
||||
byte_size=710605,
|
||||
width=1327,
|
||||
filename="17.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=18,
|
||||
display_index=18,
|
||||
height=2039,
|
||||
byte_size=761398,
|
||||
width=1326,
|
||||
filename="18.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=19,
|
||||
display_index=19,
|
||||
height=2039,
|
||||
byte_size=743807,
|
||||
width=1327,
|
||||
filename="19.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=20,
|
||||
display_index=20,
|
||||
height=2039,
|
||||
byte_size=552911,
|
||||
width=1326,
|
||||
filename="20.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=21,
|
||||
display_index=21,
|
||||
height=2039,
|
||||
byte_size=556827,
|
||||
width=1327,
|
||||
filename="21.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
archive_index=22,
|
||||
display_index=22,
|
||||
height=2039,
|
||||
byte_size=675078,
|
||||
width=1326,
|
||||
filename="22.jpg",
|
||||
bookmark="",
|
||||
type="",
|
||||
),
|
||||
PageMetadata(
|
||||
bookmark="Interview",
|
||||
archive_index=23,
|
||||
display_index=23,
|
||||
height=2032,
|
||||
byte_size=800965,
|
||||
width=1338,
|
||||
type=PageType.Letters,
|
||||
filename="23.jpg",
|
||||
),
|
||||
],
|
||||
price=None,
|
||||
is_version_of=None,
|
||||
rights=None,
|
||||
identifier=None,
|
||||
last_mark=None,
|
||||
_cover_image=None,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"Url",
|
||||
"parse_url",
|
||||
"PageType",
|
||||
"PageMetadata",
|
||||
"Credit",
|
||||
"ComicSeries",
|
||||
"MetadataOrigin",
|
||||
"GenericMetadata",
|
||||
)
|
||||
130
comicapi/issuestring.py
Normal file
130
comicapi/issuestring.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Support for mixed digit/string type Issue field
|
||||
|
||||
Class for handling the odd permutations of an 'issue number' that the
|
||||
comics industry throws at us.
|
||||
e.g.: "12", "12.1", "0", "-1", "5AU", "100-2"
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import unicodedata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IssueString:
|
||||
def __init__(self, text: str | None) -> None:
|
||||
# break up the issue number string into 2 parts: the numeric and suffix string.
|
||||
# (assumes that the numeric portion is always first)
|
||||
|
||||
self.num = None
|
||||
self.suffix = ""
|
||||
self.prefix = ""
|
||||
|
||||
if text is None:
|
||||
return
|
||||
|
||||
text = str(text)
|
||||
|
||||
if len(text) == 0:
|
||||
return
|
||||
|
||||
for idx, r in enumerate(text):
|
||||
if not r.isalpha():
|
||||
break
|
||||
self.prefix = text[:idx]
|
||||
self.num, self.suffix = self.get_number(text[idx:])
|
||||
|
||||
def get_number(self, text: str) -> tuple[float | None, str]:
|
||||
num, suffix = None, ""
|
||||
start = 0
|
||||
# skip the minus sign if it's first
|
||||
if text[0] in ("-", "+"):
|
||||
start = 1
|
||||
|
||||
# if it's still not numeric at start skip it
|
||||
if text[start].isdigit() or text[start] == ".":
|
||||
# walk through the string, look for split point (the first non-numeric)
|
||||
decimal_count = 0
|
||||
for idx in range(start, len(text)):
|
||||
if not (text[idx].isdigit() or text[idx] in "."):
|
||||
break
|
||||
# special case: also split on second "."
|
||||
if text[idx] == ".":
|
||||
decimal_count += 1
|
||||
if decimal_count > 1:
|
||||
break
|
||||
else:
|
||||
idx = len(text)
|
||||
|
||||
# move trailing numeric decimal to suffix
|
||||
# (only if there is other junk after )
|
||||
if text[idx - 1] == "." and len(text) != idx:
|
||||
idx = idx - 1
|
||||
|
||||
# if there is no numeric after the minus, make the minus part of the suffix
|
||||
if idx == 1 and start == 1:
|
||||
idx = 0
|
||||
|
||||
if text[0:idx]:
|
||||
num = float(text[0:idx])
|
||||
suffix = text[idx : len(text)]
|
||||
else:
|
||||
suffix = text
|
||||
return num, suffix
|
||||
|
||||
def as_string(self, pad: int = 0) -> str:
|
||||
"""return the number, left side zero-padded, with suffix attached"""
|
||||
|
||||
# if there is no number return the text
|
||||
if self.num is None:
|
||||
return self.prefix + self.suffix
|
||||
|
||||
# negative is added back in last
|
||||
negative = self.num < 0
|
||||
num_f = abs(self.num)
|
||||
|
||||
# used for padding
|
||||
num_int = int(num_f)
|
||||
|
||||
if num_f.is_integer():
|
||||
num_s = str(num_int)
|
||||
else:
|
||||
num_s = str(num_f)
|
||||
|
||||
# create padding
|
||||
padding = ""
|
||||
# we only pad the whole number part, we don't care about the decimal
|
||||
length = len(str(num_int))
|
||||
if length < pad:
|
||||
padding = "0" * (pad - length)
|
||||
|
||||
# add the padding to the front
|
||||
num_s = padding + num_s
|
||||
|
||||
# finally add the negative back in
|
||||
if negative:
|
||||
num_s = "-" + num_s
|
||||
|
||||
# return the prefix + formatted number + suffix
|
||||
return self.prefix + num_s + self.suffix
|
||||
|
||||
def as_float(self) -> float | None:
|
||||
# return the float, with no suffix
|
||||
if len(self.suffix) == 1 and self.suffix.isnumeric():
|
||||
return (self.num or 0) + unicodedata.numeric(self.suffix)
|
||||
return self.num
|
||||
72
comicapi/merge.py
Normal file
72
comicapi/merge.py
Normal file
@@ -0,0 +1,72 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from collections.abc import Collection
|
||||
from enum import auto
|
||||
from typing import Any, Callable
|
||||
|
||||
from comicapi.utils import DefaultDict, StrEnum, norm_fold
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Credit:
|
||||
person: str = ""
|
||||
role: str = ""
|
||||
primary: bool = False
|
||||
language: str = "" # Should be ISO 639 language code
|
||||
|
||||
def __str__(self) -> str:
|
||||
lang = ""
|
||||
if self.language:
|
||||
lang = f" [{self.language}]"
|
||||
return f"{self.role}: {self.person}{lang}"
|
||||
|
||||
|
||||
class Mode(StrEnum):
|
||||
OVERLAY = auto()
|
||||
ADD_MISSING = auto()
|
||||
|
||||
|
||||
def merge_lists(old: Collection[Any], new: Collection[Any]) -> list[Any] | set[Any]:
|
||||
"""Dedupes normalised (NFKD), casefolded values using 'new' values on collisions"""
|
||||
if len(new) == 0:
|
||||
return old if isinstance(old, set) else list(old)
|
||||
if len(old) == 0:
|
||||
return new if isinstance(new, set) else list(new)
|
||||
|
||||
# Create dict to preserve case
|
||||
new_dict = {norm_fold(str(n)): n for n in new}
|
||||
old_dict = {norm_fold(str(c)): c for c in old}
|
||||
|
||||
old_dict.update(new_dict)
|
||||
|
||||
if isinstance(old, set):
|
||||
return set(old_dict.values())
|
||||
|
||||
return list(old_dict.values())
|
||||
|
||||
|
||||
def overlay(old: Any, new: Any) -> Any:
|
||||
"""overlay - When the `new` object is not empty, replace `old` with `new`."""
|
||||
if new is None or (isinstance(new, Collection) and len(new) == 0):
|
||||
return old
|
||||
|
||||
return new
|
||||
|
||||
|
||||
attribute: DefaultDict[Mode, Callable[[Any, Any], Any]] = DefaultDict(
|
||||
{
|
||||
Mode.OVERLAY: overlay,
|
||||
Mode.ADD_MISSING: lambda old, new: overlay(new, old),
|
||||
},
|
||||
default=lambda x: overlay,
|
||||
)
|
||||
|
||||
|
||||
lists: DefaultDict[Mode, Callable[[Collection[Any], Collection[Any]], list[Any] | set[Any]]] = DefaultDict(
|
||||
{
|
||||
Mode.OVERLAY: merge_lists,
|
||||
Mode.ADD_MISSING: lambda old, new: merge_lists(new, old),
|
||||
},
|
||||
default=lambda x: overlay,
|
||||
)
|
||||
5
comicapi/tags/__init__.py
Normal file
5
comicapi/tags/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.tags.tag import Tag
|
||||
|
||||
__all__ = ["Tag"]
|
||||
416
comicapi/tags/comicrack.py
Normal file
416
comicapi/tags/comicrack.py
Normal file
@@ -0,0 +1,416 @@
|
||||
"""A class to encapsulate ComicRack's ComicInfo.xml data"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Any
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import FileHash, GenericMetadata, PageMetadata
|
||||
from comicapi.tags import Tag
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ComicRack(Tag):
|
||||
enabled = True
|
||||
|
||||
id = "cr"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.file = "ComicInfo.xml"
|
||||
self.supported_attributes = {
|
||||
"original_hash",
|
||||
"series",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"volume",
|
||||
"genres",
|
||||
"description",
|
||||
"notes",
|
||||
"alternate_series",
|
||||
"alternate_number",
|
||||
"alternate_count",
|
||||
"story_arcs",
|
||||
"series_groups",
|
||||
"publisher",
|
||||
"imprint",
|
||||
"day",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"web_links",
|
||||
"format",
|
||||
"manga",
|
||||
"black_and_white",
|
||||
"maturity_rating",
|
||||
"critical_rating",
|
||||
"scan_info",
|
||||
"pages",
|
||||
"pages.bookmark",
|
||||
"pages.double_page",
|
||||
"pages.height",
|
||||
"pages.image_index",
|
||||
"pages.size",
|
||||
"pages.type",
|
||||
"pages.width",
|
||||
"page_count",
|
||||
"characters",
|
||||
"teams",
|
||||
"locations",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.role",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return role.casefold() in self._get_parseable_credits()
|
||||
|
||||
def supports_tags(self, archive: Archiver) -> bool:
|
||||
return archive.supports_files()
|
||||
|
||||
def has_tags(self, archive: Archiver) -> bool:
|
||||
try: # read_file can cause an exception
|
||||
return (
|
||||
self.supports_tags(archive)
|
||||
and self.file in archive.get_filename_list()
|
||||
and self._validate_bytes(archive.read_file(self.file))
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def remove_tags(self, archive: Archiver) -> bool:
|
||||
return self.has_tags(archive) and archive.remove_file(self.file)
|
||||
|
||||
def read_tags(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_tags(archive):
|
||||
try: # read_file can cause an exception
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata)
|
||||
except Exception:
|
||||
...
|
||||
return GenericMetadata()
|
||||
|
||||
def read_raw_tags(self, archive: Archiver) -> str:
|
||||
try: # read_file can cause an exception
|
||||
if self.has_tags(archive):
|
||||
b = archive.read_file(self.file)
|
||||
# ET.fromstring is used as xml can declare the encoding
|
||||
return ET.tostring(ET.fromstring(b), encoding="unicode", xml_declaration=True)
|
||||
except Exception:
|
||||
...
|
||||
return ""
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_tags(archive):
|
||||
xml = b""
|
||||
try: # read_file can cause an exception
|
||||
if self.has_tags(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
return archive.write_file(self.file, self._bytes_from_metadata(metadata, xml))
|
||||
except Exception:
|
||||
...
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "Comic Rack"
|
||||
|
||||
@classmethod
|
||||
def _get_parseable_credits(cls) -> list[str]:
|
||||
parsable_credits: list[str] = []
|
||||
parsable_credits.extend(GenericMetadata.writer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.penciller_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.inker_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.colorist_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.letterer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.cover_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.editor_synonyms)
|
||||
return parsable_credits
|
||||
|
||||
def _metadata_from_bytes(self, string: bytes) -> GenericMetadata:
|
||||
root = ET.fromstring(string)
|
||||
return self._convert_xml_to_metadata(root)
|
||||
|
||||
def _bytes_from_metadata(self, metadata: GenericMetadata, xml: bytes = b"") -> bytes:
|
||||
root = self._convert_metadata_to_xml(metadata, xml)
|
||||
return ET.tostring(root, encoding="utf-8", xml_declaration=True)
|
||||
|
||||
def _convert_metadata_to_xml(self, metadata: GenericMetadata, xml: bytes = b"") -> ET.Element:
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
if xml:
|
||||
root = ET.fromstring(xml)
|
||||
else:
|
||||
# build a tree structure
|
||||
root = ET.Element("ComicInfo")
|
||||
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
|
||||
# helper func
|
||||
|
||||
def assign(cr_entry: str, md_entry: Any) -> None:
|
||||
if md_entry:
|
||||
text = str(md_entry)
|
||||
if isinstance(md_entry, (list, set)):
|
||||
text = ",".join(md_entry)
|
||||
et_entry = root.find(cr_entry)
|
||||
if et_entry is not None:
|
||||
et_entry.text = text
|
||||
else:
|
||||
ET.SubElement(root, cr_entry).text = text
|
||||
else:
|
||||
et_entry = root.find(cr_entry)
|
||||
if et_entry is not None:
|
||||
root.remove(et_entry)
|
||||
|
||||
# need to specially process the credits, since they are structured
|
||||
# differently than CIX
|
||||
credit_writer_list = []
|
||||
credit_penciller_list = []
|
||||
credit_inker_list = []
|
||||
credit_colorist_list = []
|
||||
credit_letterer_list = []
|
||||
credit_cover_list = []
|
||||
credit_editor_list = []
|
||||
|
||||
# first, loop thru credits, and build a list for each role that CIX
|
||||
# supports
|
||||
for credit in metadata.credits:
|
||||
if credit.role.casefold() in set(GenericMetadata.writer_synonyms):
|
||||
credit_writer_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.penciller_synonyms):
|
||||
credit_penciller_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.inker_synonyms):
|
||||
credit_inker_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.colorist_synonyms):
|
||||
credit_colorist_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.letterer_synonyms):
|
||||
credit_letterer_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.cover_synonyms):
|
||||
credit_cover_list.append(credit.person.replace(",", ""))
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.editor_synonyms):
|
||||
credit_editor_list.append(credit.person.replace(",", ""))
|
||||
|
||||
assign("Series", md.series)
|
||||
assign("Number", md.issue)
|
||||
assign("Count", md.issue_count)
|
||||
assign("Title", md.title)
|
||||
assign("Volume", md.volume)
|
||||
assign("Genre", md.genres)
|
||||
assign("Summary", md.description)
|
||||
assign("Notes", md.notes)
|
||||
|
||||
assign("AlternateSeries", md.alternate_series)
|
||||
assign("AlternateNumber", md.alternate_number)
|
||||
assign("AlternateCount", md.alternate_count)
|
||||
assign("StoryArc", md.story_arcs)
|
||||
assign("SeriesGroup", md.series_groups)
|
||||
|
||||
assign("Publisher", md.publisher)
|
||||
assign("Imprint", md.imprint)
|
||||
assign("Day", md.day)
|
||||
assign("Month", md.month)
|
||||
assign("Year", md.year)
|
||||
assign("LanguageISO", md.language)
|
||||
assign("Web", " ".join(u.url for u in md.web_links))
|
||||
assign("Format", md.format)
|
||||
assign("Manga", md.manga)
|
||||
assign("BlackAndWhite", "Yes" if md.black_and_white else None)
|
||||
assign("AgeRating", md.maturity_rating)
|
||||
assign("CommunityRating", md.critical_rating)
|
||||
|
||||
scan_info = md.scan_info or ""
|
||||
if md.original_hash:
|
||||
scan_info += f" sum:{md.original_hash}"
|
||||
assign("ScanInformation", scan_info)
|
||||
|
||||
assign("PageCount", md.page_count)
|
||||
|
||||
assign("Characters", md.characters)
|
||||
assign("Teams", md.teams)
|
||||
assign("Locations", md.locations)
|
||||
assign("Writer", ", ".join(credit_writer_list))
|
||||
assign("Penciller", ", ".join(credit_penciller_list))
|
||||
assign("Inker", ", ".join(credit_inker_list))
|
||||
assign("Colorist", ", ".join(credit_colorist_list))
|
||||
assign("Letterer", ", ".join(credit_letterer_list))
|
||||
assign("CoverArtist", ", ".join(credit_cover_list))
|
||||
assign("Editor", ", ".join(credit_editor_list))
|
||||
|
||||
# loop and add the page entries under pages node
|
||||
pages_node = root.find("Pages")
|
||||
if pages_node is not None:
|
||||
pages_node.clear()
|
||||
else:
|
||||
pages_node = ET.SubElement(root, "Pages")
|
||||
|
||||
for page in sorted(md.pages, key=lambda x: x.archive_index):
|
||||
page_node = ET.SubElement(pages_node, "Page")
|
||||
page_node.attrib = {"Image": str(page.display_index)}
|
||||
if page.bookmark:
|
||||
page_node.attrib["Bookmark"] = page.bookmark
|
||||
if page.type:
|
||||
page_node.attrib["Type"] = page.type
|
||||
|
||||
if page.double_page is not None:
|
||||
page_node.attrib["DoublePage"] = str(page.double_page)
|
||||
if page.height is not None:
|
||||
page_node.attrib["ImageHeight"] = str(page.height)
|
||||
if page.byte_size is not None:
|
||||
page_node.attrib["ImageSize"] = str(page.byte_size)
|
||||
if page.width is not None:
|
||||
page_node.attrib["ImageWidth"] = str(page.width)
|
||||
page_node.attrib = dict(sorted(page_node.attrib.items()))
|
||||
|
||||
ET.indent(root)
|
||||
|
||||
return root
|
||||
|
||||
def _convert_xml_to_metadata(self, root: ET.Element) -> GenericMetadata:
|
||||
if root.tag != "ComicInfo":
|
||||
raise Exception("Not a ComicInfo file")
|
||||
|
||||
def get(name: str) -> str | None:
|
||||
tag = root.find(name)
|
||||
if tag is None:
|
||||
return None
|
||||
return tag.text
|
||||
|
||||
md = GenericMetadata()
|
||||
|
||||
md.series = utils.xlate(get("Series"))
|
||||
md.issue = utils.xlate(get("Number"))
|
||||
md.issue_count = utils.xlate_int(get("Count"))
|
||||
md.title = utils.xlate(get("Title"))
|
||||
md.volume = utils.xlate_int(get("Volume"))
|
||||
md.genres = set(utils.split(get("Genre"), ","))
|
||||
md.description = utils.xlate(get("Summary"))
|
||||
md.notes = utils.xlate(get("Notes"))
|
||||
|
||||
md.alternate_series = utils.xlate(get("AlternateSeries"))
|
||||
md.alternate_number = utils.xlate(get("AlternateNumber"))
|
||||
md.alternate_count = utils.xlate_int(get("AlternateCount"))
|
||||
md.story_arcs = utils.split(get("StoryArc"), ",")
|
||||
md.series_groups = utils.split(get("SeriesGroup"), ",")
|
||||
|
||||
md.publisher = utils.xlate(get("Publisher"))
|
||||
md.imprint = utils.xlate(get("Imprint"))
|
||||
md.day = utils.xlate_int(get("Day"))
|
||||
md.month = utils.xlate_int(get("Month"))
|
||||
md.year = utils.xlate_int(get("Year"))
|
||||
md.language = utils.xlate(get("LanguageISO"))
|
||||
md.web_links = utils.split_urls(utils.xlate(get("Web")))
|
||||
md.format = utils.xlate(get("Format"))
|
||||
md.manga = utils.xlate(get("Manga"))
|
||||
md.maturity_rating = utils.xlate(get("AgeRating"))
|
||||
md.critical_rating = utils.xlate_float(get("CommunityRating"))
|
||||
scan_info_list = (utils.xlate(get("ScanInformation")) or "").split()
|
||||
for word in scan_info_list.copy():
|
||||
if not word.startswith("sum:"):
|
||||
continue
|
||||
original_hash = FileHash.parse(word[4:])
|
||||
if original_hash:
|
||||
md.original_hash = original_hash
|
||||
scan_info_list.remove(word)
|
||||
break
|
||||
if scan_info_list:
|
||||
md.scan_info = " ".join(scan_info_list)
|
||||
md.is_empty = False
|
||||
|
||||
md.page_count = utils.xlate_int(get("PageCount"))
|
||||
|
||||
md.characters = set(utils.split(get("Characters"), ","))
|
||||
md.teams = set(utils.split(get("Teams"), ","))
|
||||
md.locations = set(utils.split(get("Locations"), ","))
|
||||
|
||||
tmp = utils.xlate(get("BlackAndWhite"))
|
||||
if tmp is not None:
|
||||
md.black_and_white = tmp.casefold() in ["yes", "true", "1"]
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if any(
|
||||
[
|
||||
n.tag == "Writer",
|
||||
n.tag == "Penciller",
|
||||
n.tag == "Inker",
|
||||
n.tag == "Colorist",
|
||||
n.tag == "Letterer",
|
||||
n.tag == "Editor",
|
||||
]
|
||||
):
|
||||
if n.text is not None:
|
||||
for name in utils.split(n.text, ","):
|
||||
md.add_credit(name.strip(), n.tag)
|
||||
|
||||
if n.tag == "CoverArtist":
|
||||
if n.text is not None:
|
||||
for name in utils.split(n.text, ","):
|
||||
md.add_credit(name.strip(), "Cover")
|
||||
|
||||
# parse page data now
|
||||
pages_node = root.find("Pages")
|
||||
if pages_node is not None:
|
||||
for i, page in enumerate(pages_node):
|
||||
p: dict[str, Any] = page.attrib
|
||||
md_page = PageMetadata(
|
||||
filename="", # cr doesn't record the filename it just assumes it's always ordered the same
|
||||
display_index=int(p.get("Image", i)),
|
||||
archive_index=i,
|
||||
bookmark=p.get("Bookmark", ""),
|
||||
type="",
|
||||
)
|
||||
md_page.set_type(p.get("Type", ""))
|
||||
|
||||
if isinstance(p.get("DoublePage", None), str):
|
||||
md_page.double_page = p["DoublePage"].casefold() in ("yes", "true", "1")
|
||||
if p.get("ImageHeight", "").isnumeric():
|
||||
md_page.height = int(float(p["ImageHeight"]))
|
||||
if p.get("ImageWidth", "").isnumeric():
|
||||
md_page.width = int(float(p["ImageWidth"]))
|
||||
if p.get("ImageSize", "").isnumeric():
|
||||
md_page.byte_size = int(float(p["ImageSize"]))
|
||||
|
||||
md.pages.append(md_page)
|
||||
|
||||
md.is_empty = False
|
||||
|
||||
return md
|
||||
|
||||
def _validate_bytes(self, string: bytes) -> bool:
|
||||
"""verify that the string actually contains CIX data in XML format"""
|
||||
try:
|
||||
root = ET.fromstring(string)
|
||||
if root.tag != "ComicInfo":
|
||||
return False
|
||||
except ET.ParseError:
|
||||
return False
|
||||
|
||||
return True
|
||||
125
comicapi/tags/tag.py
Normal file
125
comicapi/tags/tag.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
|
||||
|
||||
class Tag:
|
||||
enabled: bool = False
|
||||
id: str = ""
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
self.version: str = version
|
||||
self.supported_attributes = {
|
||||
"data_origin",
|
||||
"issue_id",
|
||||
"series_id",
|
||||
"original_hash",
|
||||
"series",
|
||||
"series_aliases",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"title_aliases",
|
||||
"volume",
|
||||
"volume_count",
|
||||
"genres",
|
||||
"description",
|
||||
"notes",
|
||||
"alternate_series",
|
||||
"alternate_number",
|
||||
"alternate_count",
|
||||
"story_arcs",
|
||||
"series_groups",
|
||||
"publisher",
|
||||
"imprint",
|
||||
"day",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"country",
|
||||
"web_link",
|
||||
"format",
|
||||
"manga",
|
||||
"black_and_white",
|
||||
"maturity_rating",
|
||||
"critical_rating",
|
||||
"scan_info",
|
||||
"tags",
|
||||
"pages",
|
||||
"pages.type",
|
||||
"pages.bookmark",
|
||||
"pages.double_page",
|
||||
"pages.image_index",
|
||||
"pages.size",
|
||||
"pages.height",
|
||||
"pages.width",
|
||||
"page_count",
|
||||
"characters",
|
||||
"teams",
|
||||
"locations",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.role",
|
||||
"credits.primary",
|
||||
"credits.language",
|
||||
"price",
|
||||
"is_version_of",
|
||||
"rights",
|
||||
"identifier",
|
||||
"last_mark",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return False
|
||||
|
||||
def supports_tags(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Checks the given archive for the ability to save these tags.
|
||||
Should always return a bool. Failures should return False.
|
||||
Typically consists of a call to either `archive.supports_comment` or `archive.supports_file`
|
||||
"""
|
||||
return False
|
||||
|
||||
def has_tags(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Checks the given archive for tags.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def remove_tags(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Removes the tags from the given archive.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def read_tags(self, archive: Archiver) -> GenericMetadata:
|
||||
"""
|
||||
Returns a GenericMetadata representing the tags saved in the given archive.
|
||||
Should always return a GenericMetadata. Failures should return an empty metadata object.
|
||||
"""
|
||||
return GenericMetadata()
|
||||
|
||||
def read_raw_tags(self, archive: Archiver) -> str:
|
||||
"""
|
||||
Returns the raw tags as a string.
|
||||
If the tags are a binary format a roughly similar text format should be used.
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
"""
|
||||
Saves the given metadata to the given archive.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
"""
|
||||
Returns the name of these tags for display purposes eg "Comic Rack".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
714
comicapi/utils.py
Normal file
714
comicapi/utils.py
Normal file
@@ -0,0 +1,714 @@
|
||||
"""Some generic utilities"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import sys
|
||||
import unicodedata
|
||||
from collections.abc import Iterable, Mapping, Sequence
|
||||
from enum import Enum, auto
|
||||
from shutil import which # noqa: F401
|
||||
from typing import Any, Callable, TypeVar, cast
|
||||
|
||||
from comicfn2dict import comicfn2dict
|
||||
|
||||
import comicapi.data
|
||||
from comicapi import filenamelexer, filenameparser
|
||||
from comicapi._url import LocationParseError as LocationParseError # noqa: F401
|
||||
from comicapi._url import Url as Url
|
||||
from comicapi._url import parse_url as parse_url
|
||||
|
||||
try:
|
||||
import icu
|
||||
|
||||
del icu
|
||||
icu_available = True
|
||||
except ImportError:
|
||||
icu_available = False
|
||||
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
|
||||
def file_digest(fileobj, digest, /, *, _bufsize=2**18): # type: ignore[no-untyped-def]
|
||||
"""Hash the contents of a file-like object. Returns a digest object.
|
||||
|
||||
*fileobj* must be a file-like object opened for reading in binary mode.
|
||||
It accepts file objects from open(), io.BytesIO(), and SocketIO objects.
|
||||
The function may bypass Python's I/O and use the file descriptor *fileno*
|
||||
directly.
|
||||
|
||||
*digest* must either be a hash algorithm name as a *str*, a hash
|
||||
constructor, or a callable that returns a hash object.
|
||||
"""
|
||||
# On Linux we could use AF_ALG sockets and sendfile() to archive zero-copy
|
||||
# hashing with hardware acceleration.
|
||||
if isinstance(digest, str):
|
||||
digestobj = hashlib.new(digest)
|
||||
else:
|
||||
digestobj = digest()
|
||||
|
||||
if hasattr(fileobj, "getbuffer"):
|
||||
# io.BytesIO object, use zero-copy buffer
|
||||
digestobj.update(fileobj.getbuffer())
|
||||
return digestobj
|
||||
|
||||
# Only binary files implement readinto().
|
||||
if not (hasattr(fileobj, "readinto") and hasattr(fileobj, "readable") and fileobj.readable()):
|
||||
raise ValueError(f"'{fileobj!r}' is not a file-like object in binary reading mode.")
|
||||
|
||||
# binary file, socket.SocketIO object
|
||||
# Note: socket I/O uses different syscalls than file I/O.
|
||||
buf = bytearray(_bufsize) # Reusable buffer to reduce allocations.
|
||||
view = memoryview(buf)
|
||||
while True:
|
||||
size = fileobj.readinto(buf)
|
||||
if size == 0:
|
||||
break # EOF
|
||||
digestobj.update(view[:size])
|
||||
|
||||
return digestobj
|
||||
|
||||
class StrEnum(str, Enum):
|
||||
"""
|
||||
Enum where members are also (and must be) strings
|
||||
"""
|
||||
|
||||
def __new__(cls, *values: Any) -> Any:
|
||||
"values must already be of type `str`"
|
||||
if len(values) > 3:
|
||||
raise TypeError(f"too many arguments for str(): {values!r}")
|
||||
if len(values) == 1:
|
||||
# it must be a string
|
||||
if not isinstance(values[0], str):
|
||||
raise TypeError(f"{values[0]!r} is not a string")
|
||||
if len(values) >= 2:
|
||||
# check that encoding argument is a string
|
||||
if not isinstance(values[1], str):
|
||||
raise TypeError(f"encoding must be a string, not {values[1]!r}")
|
||||
if len(values) == 3:
|
||||
# check that errors argument is a string
|
||||
if not isinstance(values[2], str):
|
||||
raise TypeError("errors must be a string, not %r" % (values[2]))
|
||||
value = str(*values)
|
||||
member = str.__new__(cls, value)
|
||||
member._value_ = value
|
||||
return member
|
||||
|
||||
@staticmethod
|
||||
def _generate_next_value_(name: str, start: int, count: int, last_values: Any) -> str:
|
||||
"""
|
||||
Return the lower-cased version of the member name.
|
||||
"""
|
||||
return name.lower()
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value: Any) -> str | None:
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
if not hasattr(cls, "_lower_members"):
|
||||
cls._lower_members = {x.casefold(): x for x in cls} # type: ignore[attr-defined]
|
||||
return cls._lower_members.get(value.casefold(), None) # type: ignore[attr-defined]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.value
|
||||
|
||||
else:
|
||||
from enum import StrEnum as _StrEnum
|
||||
from hashlib import file_digest
|
||||
|
||||
class StrEnum(_StrEnum):
|
||||
@classmethod
|
||||
def _missing_(cls, value: Any) -> str | None:
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
if not hasattr(cls, "_lower_members"):
|
||||
cls._lower_members = {x.casefold(): x for x in cls} # type: ignore[attr-defined]
|
||||
return cls._lower_members.get(value.casefold(), None) # type: ignore[attr-defined]
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_KT = TypeVar("_KT")
|
||||
_VT = TypeVar("_VT")
|
||||
|
||||
|
||||
class DefaultDict(dict[_KT, _VT]):
|
||||
def __init__(self, *args, default: Callable[[_KT], _VT | _KT] | None = None, **kwargs) -> None: # type: ignore[no-untyped-def]
|
||||
super().__init__(*args, **kwargs)
|
||||
self.default = default
|
||||
|
||||
def __missing__(self, key: _KT) -> _VT | _KT:
|
||||
if self.default is None:
|
||||
return key
|
||||
return self.default(key)
|
||||
|
||||
|
||||
class Parser(StrEnum):
|
||||
ORIGINAL = auto()
|
||||
COMPLICATED = auto()
|
||||
COMICFN2DICT = auto()
|
||||
|
||||
|
||||
def _custom_key(tup: Any) -> Any:
|
||||
import natsort
|
||||
|
||||
lst = []
|
||||
for x in natsort.os_sort_keygen()(tup):
|
||||
ret = x
|
||||
if isinstance(x, Sequence) and len(x) > 1 and isinstance(x[1], int) and isinstance(x[0], str) and x[0] == "":
|
||||
ret = ("a", *x[1:])
|
||||
|
||||
lst.append(ret)
|
||||
return tuple(lst)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def os_sorted(lst: Iterable[T]) -> list[T]:
|
||||
import natsort
|
||||
|
||||
key = _custom_key
|
||||
if icu_available or platform.system() == "Windows":
|
||||
key = natsort.os_sort_keygen()
|
||||
return sorted(sorted(lst), key=key) # type: ignore[type-var]
|
||||
|
||||
|
||||
KNOWN_IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".avif"}
|
||||
|
||||
|
||||
def parse_filename(
|
||||
filename: str,
|
||||
parser: Parser = Parser.ORIGINAL,
|
||||
remove_c2c: bool = False,
|
||||
remove_fcbd: bool = False,
|
||||
remove_publisher: bool = False,
|
||||
split_words: bool = False,
|
||||
allow_issue_start_with_letter: bool = False,
|
||||
protofolius_issue_number_scheme: bool = False,
|
||||
) -> filenameparser.FilenameInfo:
|
||||
fni = filenameparser.FilenameInfo(
|
||||
alternate="",
|
||||
annual=False,
|
||||
archive="",
|
||||
c2c=False,
|
||||
fcbd=False,
|
||||
format="",
|
||||
issue="",
|
||||
issue_count="",
|
||||
publisher="",
|
||||
remainder="",
|
||||
series="",
|
||||
title="",
|
||||
volume="",
|
||||
volume_count="",
|
||||
year="",
|
||||
)
|
||||
if not filename:
|
||||
return fni
|
||||
if split_words:
|
||||
import wordninja
|
||||
|
||||
filename, ext = os.path.splitext(filename)
|
||||
filename = " ".join(wordninja.split(filename)) + ext
|
||||
|
||||
if parser == Parser.COMPLICATED:
|
||||
lex = filenamelexer.Lex(filename, allow_issue_start_with_letter)
|
||||
p = filenameparser.Parse(
|
||||
lex.items,
|
||||
remove_c2c=remove_c2c,
|
||||
remove_fcbd=remove_fcbd,
|
||||
remove_publisher=remove_publisher,
|
||||
protofolius_issue_number_scheme=protofolius_issue_number_scheme,
|
||||
)
|
||||
if p.error:
|
||||
logger.info("Issue parsing filename: '%s': %s ", filename, p.error.val)
|
||||
fni = p.filename_info
|
||||
elif parser == Parser.COMICFN2DICT:
|
||||
fn2d = comicfn2dict(filename)
|
||||
fni = filenameparser.FilenameInfo(
|
||||
alternate="",
|
||||
annual=False,
|
||||
archive=fn2d.get("ext", ""),
|
||||
c2c=False,
|
||||
fcbd=False,
|
||||
issue=fn2d.get("issue", ""),
|
||||
issue_count=fn2d.get("issue_count", ""),
|
||||
publisher=fn2d.get("publisher", ""),
|
||||
remainder=fn2d.get("scan_info", ""),
|
||||
series=fn2d.get("series", ""),
|
||||
title=fn2d.get("title", ""),
|
||||
volume=fn2d.get("volume", ""),
|
||||
volume_count=fn2d.get("volume_count", ""),
|
||||
year=fn2d.get("year", ""),
|
||||
format=fn2d.get("original_format", ""),
|
||||
)
|
||||
else:
|
||||
fnp = filenameparser.FileNameParser()
|
||||
fnp.parse_filename(filename)
|
||||
fni = filenameparser.FilenameInfo(
|
||||
alternate="",
|
||||
annual=False,
|
||||
archive="",
|
||||
c2c=False,
|
||||
fcbd=False,
|
||||
issue=fnp.issue,
|
||||
issue_count=fnp.issue_count,
|
||||
publisher="",
|
||||
remainder=fnp.remainder,
|
||||
series=fnp.series,
|
||||
title="",
|
||||
volume=fnp.volume,
|
||||
volume_count="",
|
||||
year=fnp.year,
|
||||
format="",
|
||||
)
|
||||
return fni
|
||||
|
||||
|
||||
def norm_fold(string: str) -> str:
|
||||
"""Normalise and casefold string"""
|
||||
return unicodedata.normalize("NFKD", string).casefold()
|
||||
|
||||
|
||||
def combine_notes(existing_notes: str | None, new_notes: str | None, split: str) -> str:
|
||||
split_notes, split_str, untouched_notes = (existing_notes or "").rpartition(split)
|
||||
if split_notes or split_str:
|
||||
return (split_notes + (new_notes or "")).strip()
|
||||
else:
|
||||
return (untouched_notes + "\n" + (new_notes or "")).strip()
|
||||
|
||||
|
||||
def parse_date_str(date_str: str | None) -> tuple[int | None, int | None, int | None]:
|
||||
day = None
|
||||
month = None
|
||||
year = None
|
||||
if date_str:
|
||||
parts = date_str.split("-")
|
||||
year = xlate_int(parts[0])
|
||||
if len(parts) > 1:
|
||||
month = xlate_int(parts[1])
|
||||
if len(parts) > 2:
|
||||
day = xlate_int(parts[2])
|
||||
return day, month, year
|
||||
|
||||
|
||||
def shorten_path(path: pathlib.Path, path2: pathlib.Path | None = None) -> tuple[pathlib.Path, pathlib.Path]:
|
||||
if path2:
|
||||
path2 = path2.absolute()
|
||||
|
||||
path = path.absolute()
|
||||
shortened_path: pathlib.Path = path
|
||||
relative_path = pathlib.Path(path.anchor)
|
||||
|
||||
if path.is_relative_to(path.home()):
|
||||
relative_path = path.home()
|
||||
shortened_path = path.relative_to(path.home())
|
||||
if path.is_relative_to(path.cwd()):
|
||||
relative_path = path.cwd()
|
||||
shortened_path = path.relative_to(path.cwd())
|
||||
|
||||
if path2 and shortened_path.is_relative_to(path2.parent):
|
||||
relative_path = path2
|
||||
shortened_path = shortened_path.relative_to(path2)
|
||||
|
||||
return relative_path, shortened_path
|
||||
|
||||
|
||||
def path_to_short_str(original_path: pathlib.Path, renamed_path: pathlib.Path | None = None) -> str:
|
||||
rel, _original_path = shorten_path(original_path)
|
||||
path_str = str(_original_path)
|
||||
if rel.samefile(rel.cwd()):
|
||||
path_str = f"./{_original_path}"
|
||||
elif rel.samefile(rel.home()):
|
||||
path_str = f"~/{_original_path}"
|
||||
|
||||
if renamed_path:
|
||||
rel, path = shorten_path(renamed_path, original_path.parent)
|
||||
rename_str = f" -> {path}"
|
||||
if rel.samefile(rel.cwd()):
|
||||
rename_str = f" -> ./{_original_path}"
|
||||
elif rel.samefile(rel.home()):
|
||||
rename_str = f" -> ~/{_original_path}"
|
||||
path_str += rename_str
|
||||
|
||||
return path_str
|
||||
|
||||
|
||||
def get_page_name_list(files: list[str]) -> list[str]:
|
||||
# get the list file names in the archive, and sort
|
||||
files = cast(list[str], os_sorted(files))
|
||||
|
||||
# make a sub-list of image files
|
||||
page_list = []
|
||||
for name in files:
|
||||
if os.path.splitext(name)[1].casefold() in KNOWN_IMAGE_EXTENSIONS and os.path.basename(name)[0] != ".":
|
||||
page_list.append(name)
|
||||
return page_list
|
||||
|
||||
|
||||
def get_recursive_filelist(pathlist: list[str]) -> list[str]:
|
||||
"""Get a recursive list of of all files under all path items in the list"""
|
||||
|
||||
filelist: list[str] = []
|
||||
for p in pathlist:
|
||||
if os.path.isdir(p):
|
||||
for root, _, files in os.walk(p):
|
||||
for f in files:
|
||||
filelist.append(os.path.join(root, f))
|
||||
elif os.path.exists(p):
|
||||
filelist.append(p)
|
||||
|
||||
return filelist
|
||||
|
||||
|
||||
def add_to_path(dirname: str) -> None:
|
||||
if dirname:
|
||||
dirname = os.path.abspath(dirname)
|
||||
paths = [os.path.normpath(x) for x in split(os.environ["PATH"], os.pathsep)]
|
||||
|
||||
if dirname not in paths:
|
||||
paths.insert(0, dirname)
|
||||
os.environ["PATH"] = os.pathsep.join(paths)
|
||||
|
||||
|
||||
def remove_from_path(dirname: str) -> None:
|
||||
if dirname:
|
||||
dirname = os.path.abspath(dirname)
|
||||
paths = [os.path.normpath(x) for x in split(os.environ["PATH"], os.pathsep) if dirname != os.path.normpath(x)]
|
||||
|
||||
os.environ["PATH"] = os.pathsep.join(paths)
|
||||
|
||||
|
||||
def xlate_int(data: Any) -> int | None:
|
||||
data = xlate_float(data)
|
||||
if data is None:
|
||||
return None
|
||||
return int(data)
|
||||
|
||||
|
||||
def xlate_float(data: Any) -> float | None:
|
||||
if isinstance(data, str):
|
||||
data = data.strip()
|
||||
if data is None or data == "":
|
||||
return None
|
||||
i: str | int | float
|
||||
if isinstance(data, (int, float)):
|
||||
i = data
|
||||
else:
|
||||
i = str(data).translate(
|
||||
DefaultDict(zip((ord(c) for c in "1234567890."), "1234567890."), default=lambda x: None)
|
||||
)
|
||||
if i == "":
|
||||
return None
|
||||
try:
|
||||
return float(i)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def xlate(data: Any) -> str | None:
|
||||
if data is None or isinstance(data, str) and data.strip() == "":
|
||||
return None
|
||||
|
||||
return str(data).strip()
|
||||
|
||||
|
||||
def split(s: str | None, c: str) -> list[str]:
|
||||
s = xlate(s)
|
||||
if s:
|
||||
return [x.strip() for x in s.strip().split(c) if x.strip()]
|
||||
return []
|
||||
|
||||
|
||||
def split_urls(s: str | None) -> list[Url]:
|
||||
if s is None:
|
||||
return []
|
||||
# Find occurences of ' http'
|
||||
if s.count("http") > 1 and s.count(" http") >= 1:
|
||||
urls = []
|
||||
# Split urls out
|
||||
url_strings = split(s, " http")
|
||||
# Return the scheme 'http' and parse the url
|
||||
for i, url_string in enumerate(url_strings):
|
||||
if not url_string.startswith("http"):
|
||||
url_string = "http" + url_string
|
||||
urls.append(parse_url(url_string))
|
||||
return urls
|
||||
else:
|
||||
return [parse_url(s)]
|
||||
|
||||
|
||||
def remove_articles(text: str) -> str:
|
||||
text = text.casefold()
|
||||
articles = [
|
||||
"&",
|
||||
"a",
|
||||
"am",
|
||||
"an",
|
||||
"and",
|
||||
"as",
|
||||
"at",
|
||||
"be",
|
||||
"but",
|
||||
"by",
|
||||
"for",
|
||||
"if",
|
||||
"is",
|
||||
"issue",
|
||||
"it",
|
||||
"it's",
|
||||
"its",
|
||||
"itself",
|
||||
"of",
|
||||
"or",
|
||||
"so",
|
||||
"the",
|
||||
"the",
|
||||
"with",
|
||||
]
|
||||
new_text = ""
|
||||
for word in text.split():
|
||||
if word not in articles:
|
||||
new_text += word + " "
|
||||
|
||||
new_text = new_text[:-1]
|
||||
|
||||
return new_text
|
||||
|
||||
|
||||
def sanitize_title(text: str, basic: bool = False) -> str:
|
||||
# normalize unicode and convert to ascii. Does not work for everything eg ½ to 1⁄2 not 1/2
|
||||
text = unicodedata.normalize("NFKD", text).casefold()
|
||||
# comicvine keeps apostrophes a part of the word
|
||||
text = text.replace("'", "")
|
||||
text = text.replace('"', "")
|
||||
if not basic:
|
||||
# comicvine ignores punctuation and accents
|
||||
# remove all characters that are not a letter, separator (space) or number
|
||||
# replace any "dash punctuation" with a space
|
||||
# makes sure that batman-superman and self-proclaimed stay separate words
|
||||
text = "".join(
|
||||
c if unicodedata.category(c)[0] not in "P" else " " for c in text if unicodedata.category(c)[0] in "LZNP"
|
||||
)
|
||||
# remove extra space and articles and all lower case
|
||||
text = remove_articles(text).strip()
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def titles_match(search_title: str, record_title: str, threshold: int = 90) -> bool:
|
||||
log_msg = "search title: %s ; record title: %s ; ratio: %d ; match threshold: %d"
|
||||
thresh = threshold / 100
|
||||
|
||||
sanitized_search = sanitize_title(search_title)
|
||||
sanitized_record = sanitize_title(record_title)
|
||||
s = difflib.SequenceMatcher(None, sanitized_search, sanitized_record)
|
||||
|
||||
ratio = s.real_quick_ratio()
|
||||
if ratio < thresh:
|
||||
logger.debug(log_msg, search_title, record_title, ratio * 100, threshold)
|
||||
return False
|
||||
|
||||
ratio = s.quick_ratio()
|
||||
if ratio < thresh:
|
||||
logger.debug(log_msg, search_title, record_title, ratio * 100, threshold)
|
||||
return False
|
||||
|
||||
ratio = s.ratio()
|
||||
if ratio < thresh:
|
||||
logger.debug(log_msg, search_title, record_title, ratio * 100, threshold)
|
||||
return False
|
||||
|
||||
logger.debug(log_msg, search_title, record_title, ratio * 100, threshold)
|
||||
return True
|
||||
|
||||
|
||||
def unique_file(file_name: pathlib.Path) -> pathlib.Path:
|
||||
name = file_name.stem
|
||||
counter = 1
|
||||
while True:
|
||||
if not file_name.exists():
|
||||
return file_name
|
||||
file_name = file_name.with_stem(name + " (" + str(counter) + ")")
|
||||
counter += 1
|
||||
|
||||
|
||||
def parse_version(s: str) -> tuple[int, int, int]:
|
||||
str_parts = s.split(".")[:3]
|
||||
parts = [int(x) if x.isdigit() else 0 for x in str_parts]
|
||||
parts.extend([0] * (3 - len(parts))) # Ensure exactly three elements in the resulting list
|
||||
|
||||
return (parts[0], parts[1], parts[2])
|
||||
|
||||
|
||||
_languages: dict[str | None, str | None] = DefaultDict(default=lambda x: None)
|
||||
|
||||
_countries: dict[str | None, str | None] = DefaultDict(default=lambda x: None)
|
||||
|
||||
|
||||
def countries() -> dict[str | None, str | None]:
|
||||
if not _countries:
|
||||
import isocodes
|
||||
|
||||
for alpha_2, c in isocodes.countries.by_alpha_2:
|
||||
_countries[alpha_2] = c["name"]
|
||||
return _countries.copy()
|
||||
|
||||
|
||||
def languages() -> dict[str | None, str | None]:
|
||||
if not _languages:
|
||||
import isocodes
|
||||
|
||||
for alpha_2, lng in isocodes.extendend_languages._sorted_by_index(index="alpha_2"):
|
||||
_languages[alpha_2] = lng["name"]
|
||||
return _languages.copy()
|
||||
|
||||
|
||||
def get_language_from_iso(iso: str | None) -> str | None:
|
||||
if not _languages:
|
||||
return languages()[iso]
|
||||
return _languages[iso]
|
||||
|
||||
|
||||
def get_language_iso(string: str | None) -> str | None:
|
||||
if string is None:
|
||||
return None
|
||||
import isocodes
|
||||
|
||||
# Return current string if all else fails
|
||||
lang = string.casefold()
|
||||
|
||||
found = None
|
||||
|
||||
for lng in isocodes.extendend_languages.items:
|
||||
for x in ("alpha_2", "alpha_3", "bibliographic", "common_name", "name"):
|
||||
if x in lng and lng[x].casefold() == lang:
|
||||
found = lng
|
||||
# break
|
||||
if found:
|
||||
break
|
||||
|
||||
if found:
|
||||
return found.get("alpha_2", None)
|
||||
return lang
|
||||
|
||||
|
||||
def get_country_from_iso(iso: str | None) -> str | None:
|
||||
if not _countries:
|
||||
return countries()[iso]
|
||||
return _countries[iso]
|
||||
|
||||
|
||||
def get_publisher(publisher: str) -> tuple[str, str]:
|
||||
imprint = ""
|
||||
|
||||
for pub in publishers.values():
|
||||
imprint, publisher, ok = pub[publisher]
|
||||
if ok:
|
||||
break
|
||||
|
||||
return imprint, publisher
|
||||
|
||||
|
||||
def update_publishers(new_publishers: Mapping[str, Mapping[str, str]]) -> None:
|
||||
for publisher in new_publishers:
|
||||
if publisher in publishers:
|
||||
publishers[publisher].update(new_publishers[publisher])
|
||||
else:
|
||||
publishers[publisher] = ImprintDict(publisher, new_publishers[publisher])
|
||||
|
||||
|
||||
class ImprintDict(dict[str, str]):
|
||||
"""
|
||||
ImprintDict takes a publisher and a dict or mapping of lowercased
|
||||
imprint names to the proper imprint name. Retrieving a value from an
|
||||
ImprintDict returns a tuple of (imprint, publisher, keyExists).
|
||||
if the key does not exist the key is returned as the publisher unchanged
|
||||
"""
|
||||
|
||||
def __init__(self, publisher: str, mapping: Mapping[str, str] = {}, **kwargs) -> None: # type: ignore[no-untyped-def]
|
||||
super().__init__(mapping, **kwargs)
|
||||
self.publisher = publisher
|
||||
|
||||
def __missing__(self, key: str) -> None:
|
||||
return None
|
||||
|
||||
def __getitem__(self, k: str) -> tuple[str, str, bool]: # type: ignore[override]
|
||||
item = super().__getitem__(k.casefold())
|
||||
if k.casefold() == self.publisher.casefold():
|
||||
return "", self.publisher, True
|
||||
if item is None:
|
||||
return "", k, False
|
||||
else:
|
||||
return item, self.publisher, True
|
||||
|
||||
def copy(self) -> ImprintDict:
|
||||
return ImprintDict(self.publisher, super().copy())
|
||||
|
||||
|
||||
publishers: dict[str, ImprintDict] = {}
|
||||
|
||||
|
||||
def load_publishers() -> None:
|
||||
try:
|
||||
update_publishers(json.loads((comicapi.data.data_path / "publishers.json").read_text("utf-8")))
|
||||
except Exception:
|
||||
logger.exception("Failed to load publishers.json; The are no publishers or imprints loaded")
|
||||
|
||||
|
||||
__all__ = (
|
||||
"load_publishers",
|
||||
"file_digest",
|
||||
"Parser",
|
||||
"ImprintDict",
|
||||
"os_sorted",
|
||||
"parse_filename",
|
||||
"norm_fold",
|
||||
"combine_notes",
|
||||
"parse_date_str",
|
||||
"shorten_path",
|
||||
"path_to_short_str",
|
||||
"get_page_name_list",
|
||||
"get_recursive_filelist",
|
||||
"add_to_path",
|
||||
"remove_from_path",
|
||||
"xlate_int",
|
||||
"xlate_float",
|
||||
"xlate",
|
||||
"split",
|
||||
"split_urls",
|
||||
"remove_articles",
|
||||
"sanitize_title",
|
||||
"titles_match",
|
||||
"unique_file",
|
||||
"parse_version",
|
||||
"countries",
|
||||
"languages",
|
||||
"get_language_from_iso",
|
||||
"get_language_iso",
|
||||
"get_country_from_iso",
|
||||
"get_publisher",
|
||||
"update_publishers",
|
||||
"load_publishers",
|
||||
)
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
from comictaggerlib.main import ctmain
|
||||
|
||||
if __name__ == '__main__':
|
||||
ctmain()
|
||||
@@ -1,18 +0,0 @@
|
||||
The unrar.dll library is freeware. This means:
|
||||
|
||||
1. All copyrights to RAR and the unrar.dll are exclusively
|
||||
owned by the author - Alexander Roshal.
|
||||
|
||||
2. The unrar.dll library may be used in any software to handle RAR
|
||||
archives without limitations free of charge.
|
||||
|
||||
3. THE RAR ARCHIVER AND THE UNRAR.DLL LIBRARY ARE DISTRIBUTED "AS IS".
|
||||
NO WARRANTY OF ANY KIND IS EXPRESSED OR IMPLIED. YOU USE AT
|
||||
YOUR OWN RISK. THE AUTHOR WILL NOT BE LIABLE FOR DATA LOSS,
|
||||
DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING
|
||||
OR MISUSING THIS SOFTWARE.
|
||||
|
||||
Thank you for your interest in RAR and unrar.dll.
|
||||
|
||||
|
||||
Alexander L. Roshal
|
||||
Binary file not shown.
@@ -1,140 +0,0 @@
|
||||
#ifndef _UNRAR_DLL_
|
||||
#define _UNRAR_DLL_
|
||||
|
||||
#define ERAR_END_ARCHIVE 10
|
||||
#define ERAR_NO_MEMORY 11
|
||||
#define ERAR_BAD_DATA 12
|
||||
#define ERAR_BAD_ARCHIVE 13
|
||||
#define ERAR_UNKNOWN_FORMAT 14
|
||||
#define ERAR_EOPEN 15
|
||||
#define ERAR_ECREATE 16
|
||||
#define ERAR_ECLOSE 17
|
||||
#define ERAR_EREAD 18
|
||||
#define ERAR_EWRITE 19
|
||||
#define ERAR_SMALL_BUF 20
|
||||
#define ERAR_UNKNOWN 21
|
||||
#define ERAR_MISSING_PASSWORD 22
|
||||
|
||||
#define RAR_OM_LIST 0
|
||||
#define RAR_OM_EXTRACT 1
|
||||
#define RAR_OM_LIST_INCSPLIT 2
|
||||
|
||||
#define RAR_SKIP 0
|
||||
#define RAR_TEST 1
|
||||
#define RAR_EXTRACT 2
|
||||
|
||||
#define RAR_VOL_ASK 0
|
||||
#define RAR_VOL_NOTIFY 1
|
||||
|
||||
#define RAR_DLL_VERSION 4
|
||||
|
||||
#ifdef _UNIX
|
||||
#define CALLBACK
|
||||
#define PASCAL
|
||||
#define LONG long
|
||||
#define HANDLE void *
|
||||
#define LPARAM long
|
||||
#define UINT unsigned int
|
||||
#endif
|
||||
|
||||
struct RARHeaderData
|
||||
{
|
||||
char ArcName[260];
|
||||
char FileName[260];
|
||||
unsigned int Flags;
|
||||
unsigned int PackSize;
|
||||
unsigned int UnpSize;
|
||||
unsigned int HostOS;
|
||||
unsigned int FileCRC;
|
||||
unsigned int FileTime;
|
||||
unsigned int UnpVer;
|
||||
unsigned int Method;
|
||||
unsigned int FileAttr;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
};
|
||||
|
||||
|
||||
struct RARHeaderDataEx
|
||||
{
|
||||
char ArcName[1024];
|
||||
wchar_t ArcNameW[1024];
|
||||
char FileName[1024];
|
||||
wchar_t FileNameW[1024];
|
||||
unsigned int Flags;
|
||||
unsigned int PackSize;
|
||||
unsigned int PackSizeHigh;
|
||||
unsigned int UnpSize;
|
||||
unsigned int UnpSizeHigh;
|
||||
unsigned int HostOS;
|
||||
unsigned int FileCRC;
|
||||
unsigned int FileTime;
|
||||
unsigned int UnpVer;
|
||||
unsigned int Method;
|
||||
unsigned int FileAttr;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
unsigned int Reserved[1024];
|
||||
};
|
||||
|
||||
|
||||
struct RAROpenArchiveData
|
||||
{
|
||||
char *ArcName;
|
||||
unsigned int OpenMode;
|
||||
unsigned int OpenResult;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
};
|
||||
|
||||
struct RAROpenArchiveDataEx
|
||||
{
|
||||
char *ArcName;
|
||||
wchar_t *ArcNameW;
|
||||
unsigned int OpenMode;
|
||||
unsigned int OpenResult;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
unsigned int Flags;
|
||||
unsigned int Reserved[32];
|
||||
};
|
||||
|
||||
enum UNRARCALLBACK_MESSAGES {
|
||||
UCM_CHANGEVOLUME,UCM_PROCESSDATA,UCM_NEEDPASSWORD
|
||||
};
|
||||
|
||||
typedef int (CALLBACK *UNRARCALLBACK)(UINT msg,LPARAM UserData,LPARAM P1,LPARAM P2);
|
||||
|
||||
typedef int (PASCAL *CHANGEVOLPROC)(char *ArcName,int Mode);
|
||||
typedef int (PASCAL *PROCESSDATAPROC)(unsigned char *Addr,int Size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
HANDLE PASCAL RAROpenArchive(struct RAROpenArchiveData *ArchiveData);
|
||||
HANDLE PASCAL RAROpenArchiveEx(struct RAROpenArchiveDataEx *ArchiveData);
|
||||
int PASCAL RARCloseArchive(HANDLE hArcData);
|
||||
int PASCAL RARReadHeader(HANDLE hArcData,struct RARHeaderData *HeaderData);
|
||||
int PASCAL RARReadHeaderEx(HANDLE hArcData,struct RARHeaderDataEx *HeaderData);
|
||||
int PASCAL RARProcessFile(HANDLE hArcData,int Operation,char *DestPath,char *DestName);
|
||||
int PASCAL RARProcessFileW(HANDLE hArcData,int Operation,wchar_t *DestPath,wchar_t *DestName);
|
||||
void PASCAL RARSetCallback(HANDLE hArcData,UNRARCALLBACK Callback,LPARAM UserData);
|
||||
void PASCAL RARSetChangeVolProc(HANDLE hArcData,CHANGEVOLPROC ChangeVolProc);
|
||||
void PASCAL RARSetProcessDataProc(HANDLE hArcData,PROCESSDATAPROC ProcessDataProc);
|
||||
void PASCAL RARSetPassword(HANDLE hArcData,char *Password);
|
||||
int PASCAL RARGetDllVersion();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
Binary file not shown.
@@ -1,606 +0,0 @@
|
||||
|
||||
UnRAR.dll Manual
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
UnRAR.dll is a 32-bit Windows dynamic-link library which provides
|
||||
file extraction from RAR archives.
|
||||
|
||||
|
||||
Exported functions
|
||||
|
||||
====================================================================
|
||||
HANDLE PASCAL RAROpenArchive(struct RAROpenArchiveData *ArchiveData)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Open RAR archive and allocate memory structures
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
ArchiveData Points to RAROpenArchiveData structure
|
||||
|
||||
struct RAROpenArchiveData
|
||||
{
|
||||
char *ArcName;
|
||||
UINT OpenMode;
|
||||
UINT OpenResult;
|
||||
char *CmtBuf;
|
||||
UINT CmtBufSize;
|
||||
UINT CmtSize;
|
||||
UINT CmtState;
|
||||
};
|
||||
|
||||
Structure fields:
|
||||
|
||||
ArcName
|
||||
Input parameter which should point to zero terminated string
|
||||
containing the archive name.
|
||||
|
||||
OpenMode
|
||||
Input parameter.
|
||||
|
||||
Possible values
|
||||
|
||||
RAR_OM_LIST
|
||||
Open archive for reading file headers only.
|
||||
|
||||
RAR_OM_EXTRACT
|
||||
Open archive for testing and extracting files.
|
||||
|
||||
RAR_OM_LIST_INCSPLIT
|
||||
Open archive for reading file headers only. If you open an archive
|
||||
in such mode, RARReadHeader[Ex] will return all file headers,
|
||||
including those with "file continued from previous volume" flag.
|
||||
In case of RAR_OM_LIST such headers are automatically skipped.
|
||||
So if you process RAR volumes in RAR_OM_LIST_INCSPLIT mode, you will
|
||||
get several file header records for same file if file is split between
|
||||
volumes. For such files only the last file header record will contain
|
||||
the correct file CRC and if you wish to get the correct packed size,
|
||||
you need to sum up packed sizes of all parts.
|
||||
|
||||
OpenResult
|
||||
Output parameter.
|
||||
|
||||
Possible values
|
||||
|
||||
0 Success
|
||||
ERAR_NO_MEMORY Not enough memory to initialize data structures
|
||||
ERAR_BAD_DATA Archive header broken
|
||||
ERAR_BAD_ARCHIVE File is not valid RAR archive
|
||||
ERAR_UNKNOWN_FORMAT Unknown encryption used for archive headers
|
||||
ERAR_EOPEN File open error
|
||||
|
||||
CmtBuf
|
||||
Input parameter which should point to the buffer for archive
|
||||
comments. Maximum comment size is limited to 64Kb. Comment text is
|
||||
zero terminated. If the comment text is larger than the buffer
|
||||
size, the comment text will be truncated. If CmtBuf is set to
|
||||
NULL, comments will not be read.
|
||||
|
||||
CmtBufSize
|
||||
Input parameter which should contain size of buffer for archive
|
||||
comments.
|
||||
|
||||
CmtSize
|
||||
Output parameter containing size of comments actually read into the
|
||||
buffer, cannot exceed CmtBufSize.
|
||||
|
||||
CmtState
|
||||
Output parameter.
|
||||
|
||||
Possible values
|
||||
|
||||
0 comments not present
|
||||
1 Comments read completely
|
||||
ERAR_NO_MEMORY Not enough memory to extract comments
|
||||
ERAR_BAD_DATA Broken comment
|
||||
ERAR_UNKNOWN_FORMAT Unknown comment format
|
||||
ERAR_SMALL_BUF Buffer too small, comments not completely read
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
Archive handle or NULL in case of error
|
||||
|
||||
|
||||
========================================================================
|
||||
HANDLE PASCAL RAROpenArchiveEx(struct RAROpenArchiveDataEx *ArchiveData)
|
||||
========================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Similar to RAROpenArchive, but uses RAROpenArchiveDataEx structure
|
||||
allowing to specify Unicode archive name and returning information
|
||||
about archive flags.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
ArchiveData Points to RAROpenArchiveDataEx structure
|
||||
|
||||
struct RAROpenArchiveDataEx
|
||||
{
|
||||
char *ArcName;
|
||||
wchar_t *ArcNameW;
|
||||
unsigned int OpenMode;
|
||||
unsigned int OpenResult;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
unsigned int Flags;
|
||||
unsigned int Reserved[32];
|
||||
};
|
||||
|
||||
Structure fields:
|
||||
|
||||
ArcNameW
|
||||
Input parameter which should point to zero terminated Unicode string
|
||||
containing the archive name or NULL if Unicode name is not specified.
|
||||
|
||||
Flags
|
||||
Output parameter. Combination of bit flags.
|
||||
|
||||
Possible values
|
||||
|
||||
0x0001 - Volume attribute (archive volume)
|
||||
0x0002 - Archive comment present
|
||||
0x0004 - Archive lock attribute
|
||||
0x0008 - Solid attribute (solid archive)
|
||||
0x0010 - New volume naming scheme ('volname.partN.rar')
|
||||
0x0020 - Authenticity information present
|
||||
0x0040 - Recovery record present
|
||||
0x0080 - Block headers are encrypted
|
||||
0x0100 - First volume (set only by RAR 3.0 and later)
|
||||
|
||||
Reserved[32]
|
||||
Reserved for future use. Must be zero.
|
||||
|
||||
Information on other structure fields and function return values
|
||||
is available above, in RAROpenArchive function description.
|
||||
|
||||
|
||||
====================================================================
|
||||
int PASCAL RARCloseArchive(HANDLE hArcData)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Close RAR archive and release allocated memory. It must be called when
|
||||
archive processing is finished, even if the archive processing was stopped
|
||||
due to an error.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
hArcData
|
||||
This parameter should contain the archive handle obtained from the
|
||||
RAROpenArchive function call.
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
0 Success
|
||||
ERAR_ECLOSE Archive close error
|
||||
|
||||
|
||||
====================================================================
|
||||
int PASCAL RARReadHeader(HANDLE hArcData,
|
||||
struct RARHeaderData *HeaderData)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Read header of file in archive.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
hArcData
|
||||
This parameter should contain the archive handle obtained from the
|
||||
RAROpenArchive function call.
|
||||
|
||||
HeaderData
|
||||
It should point to RARHeaderData structure:
|
||||
|
||||
struct RARHeaderData
|
||||
{
|
||||
char ArcName[260];
|
||||
char FileName[260];
|
||||
UINT Flags;
|
||||
UINT PackSize;
|
||||
UINT UnpSize;
|
||||
UINT HostOS;
|
||||
UINT FileCRC;
|
||||
UINT FileTime;
|
||||
UINT UnpVer;
|
||||
UINT Method;
|
||||
UINT FileAttr;
|
||||
char *CmtBuf;
|
||||
UINT CmtBufSize;
|
||||
UINT CmtSize;
|
||||
UINT CmtState;
|
||||
};
|
||||
|
||||
Structure fields:
|
||||
|
||||
ArcName
|
||||
Output parameter which contains a zero terminated string of the
|
||||
current archive name. May be used to determine the current volume
|
||||
name.
|
||||
|
||||
FileName
|
||||
Output parameter which contains a zero terminated string of the
|
||||
file name in OEM (DOS) encoding.
|
||||
|
||||
Flags
|
||||
Output parameter which contains file flags:
|
||||
|
||||
0x01 - file continued from previous volume
|
||||
0x02 - file continued on next volume
|
||||
0x04 - file encrypted with password
|
||||
0x08 - file comment present
|
||||
0x10 - compression of previous files is used (solid flag)
|
||||
|
||||
bits 7 6 5
|
||||
|
||||
0 0 0 - dictionary size 64 Kb
|
||||
0 0 1 - dictionary size 128 Kb
|
||||
0 1 0 - dictionary size 256 Kb
|
||||
0 1 1 - dictionary size 512 Kb
|
||||
1 0 0 - dictionary size 1024 Kb
|
||||
1 0 1 - dictionary size 2048 KB
|
||||
1 1 0 - dictionary size 4096 KB
|
||||
1 1 1 - file is directory
|
||||
|
||||
Other bits are reserved.
|
||||
|
||||
PackSize
|
||||
Output parameter means packed file size or size of the
|
||||
file part if file was split between volumes.
|
||||
|
||||
UnpSize
|
||||
Output parameter - unpacked file size.
|
||||
|
||||
HostOS
|
||||
Output parameter - operating system used for archiving:
|
||||
|
||||
0 - MS DOS;
|
||||
1 - OS/2.
|
||||
2 - Win32
|
||||
3 - Unix
|
||||
|
||||
FileCRC
|
||||
Output parameter which contains unpacked file CRC. In case of file parts
|
||||
split between volumes only the last part contains the correct CRC
|
||||
and it is accessible only in RAR_OM_LIST_INCSPLIT listing mode.
|
||||
|
||||
FileTime
|
||||
Output parameter - contains date and time in standard MS DOS format.
|
||||
|
||||
UnpVer
|
||||
Output parameter - RAR version needed to extract file.
|
||||
It is encoded as 10 * Major version + minor version.
|
||||
|
||||
Method
|
||||
Output parameter - packing method.
|
||||
|
||||
FileAttr
|
||||
Output parameter - file attributes.
|
||||
|
||||
CmtBuf
|
||||
File comments support is not implemented in the new DLL version yet.
|
||||
Now CmtState is always 0.
|
||||
|
||||
/*
|
||||
* Input parameter which should point to the buffer for file
|
||||
* comments. Maximum comment size is limited to 64Kb. Comment text is
|
||||
* a zero terminated string in OEM encoding. If the comment text is
|
||||
* larger than the buffer size, the comment text will be truncated.
|
||||
* If CmtBuf is set to NULL, comments will not be read.
|
||||
*/
|
||||
|
||||
CmtBufSize
|
||||
Input parameter which should contain size of buffer for archive
|
||||
comments.
|
||||
|
||||
CmtSize
|
||||
Output parameter containing size of comments actually read into the
|
||||
buffer, should not exceed CmtBufSize.
|
||||
|
||||
CmtState
|
||||
Output parameter.
|
||||
|
||||
Possible values
|
||||
|
||||
0 Absent comments
|
||||
1 Comments read completely
|
||||
ERAR_NO_MEMORY Not enough memory to extract comments
|
||||
ERAR_BAD_DATA Broken comment
|
||||
ERAR_UNKNOWN_FORMAT Unknown comment format
|
||||
ERAR_SMALL_BUF Buffer too small, comments not completely read
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
0 Success
|
||||
ERAR_END_ARCHIVE End of archive
|
||||
ERAR_BAD_DATA File header broken
|
||||
|
||||
|
||||
====================================================================
|
||||
int PASCAL RARReadHeaderEx(HANDLE hArcData,
|
||||
struct RARHeaderDataEx *HeaderData)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Similar to RARReadHeader, but uses RARHeaderDataEx structure,
|
||||
containing information about Unicode file names and 64 bit file sizes.
|
||||
|
||||
struct RARHeaderDataEx
|
||||
{
|
||||
char ArcName[1024];
|
||||
wchar_t ArcNameW[1024];
|
||||
char FileName[1024];
|
||||
wchar_t FileNameW[1024];
|
||||
unsigned int Flags;
|
||||
unsigned int PackSize;
|
||||
unsigned int PackSizeHigh;
|
||||
unsigned int UnpSize;
|
||||
unsigned int UnpSizeHigh;
|
||||
unsigned int HostOS;
|
||||
unsigned int FileCRC;
|
||||
unsigned int FileTime;
|
||||
unsigned int UnpVer;
|
||||
unsigned int Method;
|
||||
unsigned int FileAttr;
|
||||
char *CmtBuf;
|
||||
unsigned int CmtBufSize;
|
||||
unsigned int CmtSize;
|
||||
unsigned int CmtState;
|
||||
unsigned int Reserved[1024];
|
||||
};
|
||||
|
||||
|
||||
====================================================================
|
||||
int PASCAL RARProcessFile(HANDLE hArcData,
|
||||
int Operation,
|
||||
char *DestPath,
|
||||
char *DestName)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Performs action and moves the current position in the archive to
|
||||
the next file. Extract or test the current file from the archive
|
||||
opened in RAR_OM_EXTRACT mode. If the mode RAR_OM_LIST is set,
|
||||
then a call to this function will simply skip the archive position
|
||||
to the next file.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
hArcData
|
||||
This parameter should contain the archive handle obtained from the
|
||||
RAROpenArchive function call.
|
||||
|
||||
Operation
|
||||
File operation.
|
||||
|
||||
Possible values
|
||||
|
||||
RAR_SKIP Move to the next file in the archive. If the
|
||||
archive is solid and RAR_OM_EXTRACT mode was set
|
||||
when the archive was opened, the current file will
|
||||
be processed - the operation will be performed
|
||||
slower than a simple seek.
|
||||
|
||||
RAR_TEST Test the current file and move to the next file in
|
||||
the archive. If the archive was opened with
|
||||
RAR_OM_LIST mode, the operation is equal to
|
||||
RAR_SKIP.
|
||||
|
||||
RAR_EXTRACT Extract the current file and move to the next file.
|
||||
If the archive was opened with RAR_OM_LIST mode,
|
||||
the operation is equal to RAR_SKIP.
|
||||
|
||||
|
||||
DestPath
|
||||
This parameter should point to a zero terminated string containing the
|
||||
destination directory to which to extract files to. If DestPath is equal
|
||||
to NULL, it means extract to the current directory. This parameter has
|
||||
meaning only if DestName is NULL.
|
||||
|
||||
DestName
|
||||
This parameter should point to a string containing the full path and name
|
||||
to assign to extracted file or it can be NULL to use the default name.
|
||||
If DestName is defined (not NULL), it overrides both the original file
|
||||
name saved in the archive and path specigied in DestPath setting.
|
||||
|
||||
Both DestPath and DestName must be in OEM encoding. If necessary,
|
||||
use CharToOem to convert text to OEM before passing to this function.
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
0 Success
|
||||
ERAR_BAD_DATA File CRC error
|
||||
ERAR_BAD_ARCHIVE Volume is not valid RAR archive
|
||||
ERAR_UNKNOWN_FORMAT Unknown archive format
|
||||
ERAR_EOPEN Volume open error
|
||||
ERAR_ECREATE File create error
|
||||
ERAR_ECLOSE File close error
|
||||
ERAR_EREAD Read error
|
||||
ERAR_EWRITE Write error
|
||||
|
||||
|
||||
Note: if you wish to cancel extraction, return -1 when processing
|
||||
UCM_PROCESSDATA callback message.
|
||||
|
||||
|
||||
====================================================================
|
||||
int PASCAL RARProcessFileW(HANDLE hArcData,
|
||||
int Operation,
|
||||
wchar_t *DestPath,
|
||||
wchar_t *DestName)
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Unicode version of RARProcessFile. It uses Unicode DestPath
|
||||
and DestName parameters, other parameters and return values
|
||||
are the same as in RARProcessFile.
|
||||
|
||||
|
||||
====================================================================
|
||||
void PASCAL RARSetCallback(HANDLE hArcData,
|
||||
int PASCAL (*CallbackProc)(UINT msg,LPARAM UserData,LPARAM P1,LPARAM P2),
|
||||
LPARAM UserData);
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Set a user-defined callback function to process Unrar events.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
hArcData
|
||||
This parameter should contain the archive handle obtained from the
|
||||
RAROpenArchive function call.
|
||||
|
||||
CallbackProc
|
||||
It should point to a user-defined callback function.
|
||||
|
||||
The function will be passed four parameters:
|
||||
|
||||
|
||||
msg Type of event. Described below.
|
||||
|
||||
UserData User defined value passed to RARSetCallback.
|
||||
|
||||
P1 and P2 Event dependent parameters. Described below.
|
||||
|
||||
|
||||
Possible events
|
||||
|
||||
UCM_CHANGEVOLUME Process volume change.
|
||||
|
||||
P1 Points to the zero terminated name
|
||||
of the next volume.
|
||||
|
||||
P2 The function call mode:
|
||||
|
||||
RAR_VOL_ASK Required volume is absent. The function should
|
||||
prompt user and return a positive value
|
||||
to retry or return -1 value to terminate
|
||||
operation. The function may also specify a new
|
||||
volume name, placing it to the address specified
|
||||
by P1 parameter.
|
||||
|
||||
RAR_VOL_NOTIFY Required volume is successfully opened.
|
||||
This is a notification call and volume name
|
||||
modification is not allowed. The function should
|
||||
return a positive value to continue or -1
|
||||
to terminate operation.
|
||||
|
||||
UCM_PROCESSDATA Process unpacked data. It may be used to read
|
||||
a file while it is being extracted or tested
|
||||
without actual extracting file to disk.
|
||||
Return a positive value to continue process
|
||||
or -1 to cancel the archive operation
|
||||
|
||||
P1 Address pointing to the unpacked data.
|
||||
Function may refer to the data but must not
|
||||
change it.
|
||||
|
||||
P2 Size of the unpacked data. It is guaranteed
|
||||
only that the size will not exceed the maximum
|
||||
dictionary size (4 Mb in RAR 3.0).
|
||||
|
||||
UCM_NEEDPASSWORD DLL needs a password to process archive.
|
||||
This message must be processed if you wish
|
||||
to be able to handle archives with encrypted
|
||||
file names. It can be also used as replacement
|
||||
of RARSetPassword function even for usual
|
||||
encrypted files with non-encrypted names.
|
||||
|
||||
P1 Address pointing to the buffer for a password.
|
||||
You need to copy a password here.
|
||||
|
||||
P2 Size of the password buffer.
|
||||
|
||||
|
||||
UserData
|
||||
User data passed to callback function.
|
||||
|
||||
Other functions of UnRAR.dll should not be called from the callback
|
||||
function.
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
None
|
||||
|
||||
|
||||
|
||||
====================================================================
|
||||
void PASCAL RARSetChangeVolProc(HANDLE hArcData,
|
||||
int PASCAL (*ChangeVolProc)(char *ArcName,int Mode));
|
||||
====================================================================
|
||||
|
||||
Obsoleted, use RARSetCallback instead.
|
||||
|
||||
|
||||
|
||||
====================================================================
|
||||
void PASCAL RARSetProcessDataProc(HANDLE hArcData,
|
||||
int PASCAL (*ProcessDataProc)(unsigned char *Addr,int Size))
|
||||
====================================================================
|
||||
|
||||
Obsoleted, use RARSetCallback instead.
|
||||
|
||||
|
||||
====================================================================
|
||||
void PASCAL RARSetPassword(HANDLE hArcData,
|
||||
char *Password);
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Set a password to decrypt files.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
hArcData
|
||||
This parameter should contain the archive handle obtained from the
|
||||
RAROpenArchive function call.
|
||||
|
||||
Password
|
||||
It should point to a string containing a zero terminated password.
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
None
|
||||
|
||||
|
||||
====================================================================
|
||||
void PASCAL RARGetDllVersion();
|
||||
====================================================================
|
||||
|
||||
Description
|
||||
~~~~~~~~~~~
|
||||
Returns API version.
|
||||
|
||||
Parameters
|
||||
~~~~~~~~~~
|
||||
None.
|
||||
|
||||
Return values
|
||||
~~~~~~~~~~~~~
|
||||
Returns an integer value denoting UnRAR.dll API version, which is also
|
||||
defined in unrar.h as RAR_DLL_VERSION. API version number is incremented
|
||||
only in case of noticeable changes in UnRAR.dll API. Do not confuse it
|
||||
with version of UnRAR.dll stored in DLL resources, which is incremented
|
||||
with every DLL rebuild.
|
||||
|
||||
If RARGetDllVersion() returns a value lower than UnRAR.dll which your
|
||||
application was designed for, it may indicate that DLL version is too old
|
||||
and it will fail to provide all necessary functions to your application.
|
||||
|
||||
This function is absent in old versions of UnRAR.dll, so it is safer
|
||||
to use LoadLibrary and GetProcAddress to access this function.
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
List of unrar.dll API changes. We do not include performance and reliability
|
||||
improvements into this list, but this library and RAR/UnRAR tools share
|
||||
the same source code. So the latest version of unrar.dll usually contains
|
||||
same decompression algorithm changes as the latest UnRAR version.
|
||||
============================================================================
|
||||
|
||||
-- 18 January 2008
|
||||
|
||||
all LONG parameters of CallbackProc function were changed
|
||||
to LPARAM type for 64 bit mode compatibility.
|
||||
|
||||
|
||||
-- 12 December 2007
|
||||
|
||||
Added new RAR_OM_LIST_INCSPLIT open mode for function RAROpenArchive.
|
||||
|
||||
|
||||
-- 14 August 2007
|
||||
|
||||
Added NoCrypt\unrar_nocrypt.dll without decryption code for those
|
||||
applications where presence of encryption or decryption code is not
|
||||
allowed because of legal restrictions.
|
||||
|
||||
|
||||
-- 14 December 2006
|
||||
|
||||
Added ERAR_MISSING_PASSWORD error type. This error is returned
|
||||
if empty password is specified for encrypted file.
|
||||
|
||||
|
||||
-- 12 June 2003
|
||||
|
||||
Added RARProcessFileW function, Unicode version of RARProcessFile
|
||||
|
||||
|
||||
-- 9 August 2002
|
||||
|
||||
Added RAROpenArchiveEx function allowing to specify Unicode archive
|
||||
name and get archive flags.
|
||||
|
||||
|
||||
-- 24 January 2002
|
||||
|
||||
Added RARReadHeaderEx function allowing to read Unicode file names
|
||||
and 64 bit file sizes.
|
||||
|
||||
|
||||
-- 23 January 2002
|
||||
|
||||
Added ERAR_UNKNOWN error type (it is used for all errors which
|
||||
do not have special ERAR code yet) and UCM_NEEDPASSWORD callback
|
||||
message.
|
||||
|
||||
Unrar.dll automatically opens all next volumes not only when extracting,
|
||||
but also in RAR_OM_LIST mode.
|
||||
|
||||
|
||||
-- 27 November 2001
|
||||
|
||||
RARSetChangeVolProc and RARSetProcessDataProc are replaced by
|
||||
the single callback function installed with RARSetCallback.
|
||||
Unlike old style callbacks, the new function accepts the user defined
|
||||
parameter. Unrar.dll still supports RARSetChangeVolProc and
|
||||
RARSetProcessDataProc for compatibility purposes, but if you write
|
||||
a new application, better use RARSetCallback.
|
||||
|
||||
File comments support is not implemented in the new DLL version yet.
|
||||
Now CmtState is always 0.
|
||||
|
||||
|
||||
-- 13 August 2001
|
||||
|
||||
Added RARGetDllVersion function, so you may distinguish old unrar.dll,
|
||||
which used C style callback functions and the new one with PASCAL callbacks.
|
||||
|
||||
|
||||
-- 10 May 2001
|
||||
|
||||
Callback functions in RARSetChangeVolProc and RARSetProcessDataProc
|
||||
use PASCAL style call convention now.
|
||||
@@ -1 +0,0 @@
|
||||
This is x64 version of unrar.dll.
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,177 +0,0 @@
|
||||
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
"""
|
||||
pyUnRAR2 is a ctypes based wrapper around the free UnRAR.dll.
|
||||
|
||||
It is an modified version of Jimmy Retzlaff's pyUnRAR - more simple,
|
||||
stable and foolproof.
|
||||
Notice that it has INCOMPATIBLE interface.
|
||||
|
||||
It enables reading and unpacking of archives created with the
|
||||
RAR/WinRAR archivers. There is a low-level interface which is very
|
||||
similar to the C interface provided by UnRAR. There is also a
|
||||
higher level interface which makes some common operations easier.
|
||||
"""
|
||||
|
||||
__version__ = '0.99.3'
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
in_windows = True
|
||||
except NameError:
|
||||
in_windows = False
|
||||
|
||||
if in_windows:
|
||||
from windows import RarFileImplementation
|
||||
else:
|
||||
from unix import RarFileImplementation
|
||||
|
||||
|
||||
import fnmatch, time, weakref
|
||||
|
||||
class RarInfo(object):
|
||||
"""Represents a file header in an archive. Don't instantiate directly.
|
||||
Use only to obtain information about file.
|
||||
YOU CANNOT EXTRACT FILE CONTENTS USING THIS OBJECT.
|
||||
USE METHODS OF RarFile CLASS INSTEAD.
|
||||
|
||||
Properties:
|
||||
index - index of file within the archive
|
||||
filename - name of the file in the archive including path (if any)
|
||||
datetime - file date/time as a struct_time suitable for time.strftime
|
||||
isdir - True if the file is a directory
|
||||
size - size in bytes of the uncompressed file
|
||||
comment - comment associated with the file
|
||||
|
||||
Note - this is not currently intended to be a Python file-like object.
|
||||
"""
|
||||
|
||||
def __init__(self, rarfile, data):
|
||||
self.rarfile = weakref.proxy(rarfile)
|
||||
self.index = data['index']
|
||||
self.filename = data['filename']
|
||||
self.isdir = data['isdir']
|
||||
self.size = data['size']
|
||||
self.datetime = data['datetime']
|
||||
self.comment = data['comment']
|
||||
|
||||
|
||||
|
||||
def __str__(self):
|
||||
try :
|
||||
arcName = self.rarfile.archiveName
|
||||
except ReferenceError:
|
||||
arcName = "[ARCHIVE_NO_LONGER_LOADED]"
|
||||
return '<RarInfo "%s" in "%s">' % (self.filename, arcName)
|
||||
|
||||
class RarFile(RarFileImplementation):
|
||||
|
||||
def __init__(self, archiveName, password=None):
|
||||
"""Instantiate the archive.
|
||||
|
||||
archiveName is the name of the RAR file.
|
||||
password is used to decrypt the files in the archive.
|
||||
|
||||
Properties:
|
||||
comment - comment associated with the archive
|
||||
|
||||
>>> print RarFile('test.rar').comment
|
||||
This is a test.
|
||||
"""
|
||||
self.archiveName = archiveName
|
||||
RarFileImplementation.init(self, password)
|
||||
|
||||
def __del__(self):
|
||||
self.destruct()
|
||||
|
||||
def infoiter(self):
|
||||
"""Iterate over all the files in the archive, generating RarInfos.
|
||||
|
||||
>>> import os
|
||||
>>> for fileInArchive in RarFile('test.rar').infoiter():
|
||||
... print os.path.split(fileInArchive.filename)[-1],
|
||||
... print fileInArchive.isdir,
|
||||
... print fileInArchive.size,
|
||||
... print fileInArchive.comment,
|
||||
... print tuple(fileInArchive.datetime)[0:5],
|
||||
... print time.strftime('%a, %d %b %Y %H:%M', fileInArchive.datetime)
|
||||
test True 0 None (2003, 6, 30, 1, 59) Mon, 30 Jun 2003 01:59
|
||||
test.txt False 20 None (2003, 6, 30, 2, 1) Mon, 30 Jun 2003 02:01
|
||||
this.py False 1030 None (2002, 2, 8, 16, 47) Fri, 08 Feb 2002 16:47
|
||||
"""
|
||||
for params in RarFileImplementation.infoiter(self):
|
||||
yield RarInfo(self, params)
|
||||
|
||||
def infolist(self):
|
||||
"""Return a list of RarInfos, descripting the contents of the archive."""
|
||||
return list(self.infoiter())
|
||||
|
||||
def read_files(self, condition='*'):
|
||||
"""Read specific files from archive into memory.
|
||||
If "condition" is a list of numbers, then return files which have those positions in infolist.
|
||||
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
|
||||
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
|
||||
and returns boolean True (extract) or False (skip).
|
||||
If "condition" is omitted, all files are returned.
|
||||
|
||||
Returns list of tuples (RarInfo info, str contents)
|
||||
"""
|
||||
checker = condition2checker(condition)
|
||||
return RarFileImplementation.read_files(self, checker)
|
||||
|
||||
|
||||
def extract(self, condition='*', path='.', withSubpath=True, overwrite=True):
|
||||
"""Extract specific files from archive to disk.
|
||||
|
||||
If "condition" is a list of numbers, then extract files which have those positions in infolist.
|
||||
If "condition" is a string, then it is treated as a wildcard for names of files to extract.
|
||||
If "condition" is a function, it is treated as a callback function, which accepts a RarInfo object
|
||||
and returns either boolean True (extract) or boolean False (skip).
|
||||
DEPRECATED: If "condition" callback returns string (only supported for Windows) -
|
||||
that string will be used as a new name to save the file under.
|
||||
If "condition" is omitted, all files are extracted.
|
||||
|
||||
"path" is a directory to extract to
|
||||
"withSubpath" flag denotes whether files are extracted with their full path in the archive.
|
||||
"overwrite" flag denotes whether extracted files will overwrite old ones. Defaults to true.
|
||||
|
||||
Returns list of RarInfos for extracted files."""
|
||||
checker = condition2checker(condition)
|
||||
return RarFileImplementation.extract(self, checker, path, withSubpath, overwrite)
|
||||
|
||||
def condition2checker(condition):
|
||||
"""Converts different condition types to callback"""
|
||||
if type(condition) in [str, unicode]:
|
||||
def smatcher(info):
|
||||
return fnmatch.fnmatch(info.filename, condition)
|
||||
return smatcher
|
||||
elif type(condition) in [list, tuple] and type(condition[0]) in [int, long]:
|
||||
def imatcher(info):
|
||||
return info.index in condition
|
||||
return imatcher
|
||||
elif callable(condition):
|
||||
return condition
|
||||
else:
|
||||
raise TypeError
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# Low level interface - see UnRARDLL\UNRARDLL.TXT
|
||||
|
||||
|
||||
class ArchiveHeaderBroken(Exception): pass
|
||||
class InvalidRARArchive(Exception): pass
|
||||
class FileOpenError(Exception): pass
|
||||
class IncorrectRARPassword(Exception): pass
|
||||
class InvalidRARArchiveUsage(Exception): pass
|
||||
@@ -1,138 +0,0 @@
|
||||
import os, sys
|
||||
|
||||
import UnRAR2
|
||||
from UnRAR2.rar_exceptions import *
|
||||
|
||||
|
||||
def cleanup(dir='test'):
|
||||
for path, dirs, files in os.walk(dir):
|
||||
for fn in files:
|
||||
os.remove(os.path.join(path, fn))
|
||||
for dir in dirs:
|
||||
os.removedirs(os.path.join(path, dir))
|
||||
|
||||
|
||||
# basic test
|
||||
cleanup()
|
||||
rarc = UnRAR2.RarFile('test.rar')
|
||||
rarc.infolist()
|
||||
assert rarc.comment == "This is a test."
|
||||
for info in rarc.infoiter():
|
||||
saveinfo = info
|
||||
assert (str(info)=="""<RarInfo "test" in "test.rar">""")
|
||||
break
|
||||
rarc.extract()
|
||||
assert os.path.exists('test'+os.sep+'test.txt')
|
||||
assert os.path.exists('test'+os.sep+'this.py')
|
||||
del rarc
|
||||
assert (str(saveinfo)=="""<RarInfo "test" in "[ARCHIVE_NO_LONGER_LOADED]">""")
|
||||
cleanup()
|
||||
|
||||
# extract all the files in test.rar
|
||||
cleanup()
|
||||
UnRAR2.RarFile('test.rar').extract()
|
||||
assert os.path.exists('test'+os.sep+'test.txt')
|
||||
assert os.path.exists('test'+os.sep+'this.py')
|
||||
cleanup()
|
||||
|
||||
# extract all the files in test.rar matching the wildcard *.txt
|
||||
cleanup()
|
||||
UnRAR2.RarFile('test.rar').extract('*.txt')
|
||||
assert os.path.exists('test'+os.sep+'test.txt')
|
||||
assert not os.path.exists('test'+os.sep+'this.py')
|
||||
cleanup()
|
||||
|
||||
|
||||
# check the name and size of each file, extracting small ones
|
||||
cleanup()
|
||||
archive = UnRAR2.RarFile('test.rar')
|
||||
assert archive.comment == 'This is a test.'
|
||||
archive.extract(lambda rarinfo: rarinfo.size <= 1024)
|
||||
for rarinfo in archive.infoiter():
|
||||
if rarinfo.size <= 1024 and not rarinfo.isdir:
|
||||
assert rarinfo.size == os.stat(rarinfo.filename).st_size
|
||||
assert file('test'+os.sep+'test.txt', 'rt').read() == 'This is only a test.'
|
||||
assert not os.path.exists('test'+os.sep+'this.py')
|
||||
cleanup()
|
||||
|
||||
|
||||
# extract this.py, overriding it's destination
|
||||
cleanup('test2')
|
||||
archive = UnRAR2.RarFile('test.rar')
|
||||
archive.extract('*.py', 'test2', False)
|
||||
assert os.path.exists('test2'+os.sep+'this.py')
|
||||
cleanup('test2')
|
||||
|
||||
|
||||
# extract test.txt to memory
|
||||
cleanup()
|
||||
archive = UnRAR2.RarFile('test.rar')
|
||||
entries = UnRAR2.RarFile('test.rar').read_files('*test.txt')
|
||||
assert len(entries)==1
|
||||
assert entries[0][0].filename.endswith('test.txt')
|
||||
assert entries[0][1]=='This is only a test.'
|
||||
|
||||
|
||||
# extract all the files in test.rar with overwriting
|
||||
cleanup()
|
||||
fo = open('test'+os.sep+'test.txt',"wt")
|
||||
fo.write("blah")
|
||||
fo.close()
|
||||
UnRAR2.RarFile('test.rar').extract('*.txt')
|
||||
assert open('test'+os.sep+'test.txt',"rt").read()!="blah"
|
||||
cleanup()
|
||||
|
||||
# extract all the files in test.rar without overwriting
|
||||
cleanup()
|
||||
fo = open('test'+os.sep+'test.txt',"wt")
|
||||
fo.write("blahblah")
|
||||
fo.close()
|
||||
UnRAR2.RarFile('test.rar').extract('*.txt', overwrite = False)
|
||||
assert open('test'+os.sep+'test.txt',"rt").read()=="blahblah"
|
||||
cleanup()
|
||||
|
||||
# list big file in an archive
|
||||
list(UnRAR2.RarFile('test_nulls.rar').infoiter())
|
||||
|
||||
# extract files from an archive with protected files
|
||||
cleanup()
|
||||
rarc = UnRAR2.RarFile('test_protected_files.rar', password="protected")
|
||||
rarc.extract()
|
||||
assert os.path.exists('test'+os.sep+'top_secret_xxx_file.txt')
|
||||
cleanup()
|
||||
errored = False
|
||||
try:
|
||||
UnRAR2.RarFile('test_protected_files.rar', password="proteqted").extract()
|
||||
except IncorrectRARPassword:
|
||||
errored = True
|
||||
assert not os.path.exists('test'+os.sep+'top_secret_xxx_file.txt')
|
||||
assert errored
|
||||
cleanup()
|
||||
|
||||
# extract files from an archive with protected headers
|
||||
cleanup()
|
||||
UnRAR2.RarFile('test_protected_headers.rar', password="secret").extract()
|
||||
assert os.path.exists('test'+os.sep+'top_secret_xxx_file.txt')
|
||||
cleanup()
|
||||
errored = False
|
||||
try:
|
||||
UnRAR2.RarFile('test_protected_headers.rar', password="seqret").extract()
|
||||
except IncorrectRARPassword:
|
||||
errored = True
|
||||
assert not os.path.exists('test'+os.sep+'top_secret_xxx_file.txt')
|
||||
assert errored
|
||||
cleanup()
|
||||
|
||||
# make sure docstring examples are working
|
||||
import doctest
|
||||
doctest.testmod(UnRAR2)
|
||||
|
||||
# update documentation
|
||||
import pydoc
|
||||
pydoc.writedoc(UnRAR2)
|
||||
|
||||
# cleanup
|
||||
try:
|
||||
os.remove('__init__.pyc')
|
||||
except:
|
||||
pass
|
||||
@@ -1,218 +0,0 @@
|
||||
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# Unix version uses unrar command line executable
|
||||
|
||||
import subprocess
|
||||
import gc
|
||||
|
||||
import os, os.path
|
||||
import time, re
|
||||
|
||||
from rar_exceptions import *
|
||||
|
||||
class UnpackerNotInstalled(Exception): pass
|
||||
|
||||
rar_executable_cached = None
|
||||
rar_executable_version = None
|
||||
|
||||
def call_unrar(params):
|
||||
"Calls rar/unrar command line executable, returns stdout pipe"
|
||||
global rar_executable_cached
|
||||
if rar_executable_cached is None:
|
||||
for command in ('unrar', 'rar'):
|
||||
try:
|
||||
subprocess.Popen([command], stdout=subprocess.PIPE)
|
||||
rar_executable_cached = command
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
if rar_executable_cached is None:
|
||||
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
|
||||
|
||||
assert type(params) == list, "params must be list"
|
||||
args = [rar_executable_cached] + params
|
||||
try:
|
||||
gc.disable() # See http://bugs.python.org/issue1336
|
||||
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
finally:
|
||||
gc.enable()
|
||||
|
||||
class RarFileImplementation(object):
|
||||
|
||||
def init(self, password=None):
|
||||
global rar_executable_version
|
||||
self.password = password
|
||||
|
||||
|
||||
stdoutdata, stderrdata = self.call('v', []).communicate()
|
||||
|
||||
for line in stderrdata.splitlines():
|
||||
if line.strip().startswith("Cannot open"):
|
||||
raise FileOpenError
|
||||
if line.find("CRC failed")>=0:
|
||||
raise IncorrectRARPassword
|
||||
accum = []
|
||||
source = iter(stdoutdata.splitlines())
|
||||
line = ''
|
||||
while not (line.startswith('UNRAR')):
|
||||
line = source.next()
|
||||
signature = line
|
||||
# The code below is mighty flaky
|
||||
# and will probably crash on localized versions of RAR
|
||||
# but I see no safe way to rewrite it using a CLI tool
|
||||
if signature.startswith("UNRAR 4"):
|
||||
rar_executable_version = 4
|
||||
while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')):
|
||||
if line.strip().endswith('is not RAR archive'):
|
||||
raise InvalidRARArchive
|
||||
line = source.next()
|
||||
while not line.startswith('Pathname/Comment'):
|
||||
accum.append(line.rstrip('\n'))
|
||||
line = source.next()
|
||||
if len(accum):
|
||||
accum[0] = accum[0][9:] # strip out "Comment:" part
|
||||
self.comment = '\n'.join(accum[:-1])
|
||||
else:
|
||||
self.comment = None
|
||||
elif signature.startswith("UNRAR 5"):
|
||||
rar_executable_version = 5
|
||||
line = source.next()
|
||||
while not line.startswith('Archive:'):
|
||||
if line.strip().endswith('is not RAR archive'):
|
||||
raise InvalidRARArchive
|
||||
accum.append(line.rstrip('\n'))
|
||||
line = source.next()
|
||||
if len(accum):
|
||||
self.comment = '\n'.join(accum[:-1]).strip()
|
||||
else:
|
||||
self.comment = None
|
||||
else:
|
||||
raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: "
|
||||
+ signature.split(" ")[1])
|
||||
|
||||
|
||||
def escaped_password(self):
|
||||
return '-' if self.password == None else self.password
|
||||
|
||||
|
||||
def call(self, cmd, options=[], files=[]):
|
||||
options2 = options + ['p'+self.escaped_password()]
|
||||
soptions = ['-'+x for x in options2]
|
||||
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
|
||||
|
||||
def infoiter(self):
|
||||
|
||||
command = "v" if rar_executable_version == 4 else "l"
|
||||
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
|
||||
|
||||
for line in stderrdata.splitlines():
|
||||
if line.strip().startswith("Cannot open"):
|
||||
raise FileOpenError
|
||||
|
||||
accum = []
|
||||
source = iter(stdoutdata.splitlines())
|
||||
line = ''
|
||||
while not line.startswith('-----------'):
|
||||
if line.strip().endswith('is not RAR archive'):
|
||||
raise InvalidRARArchive
|
||||
if line.startswith("CRC failed") or line.startswith("Checksum error"):
|
||||
raise IncorrectRARPassword
|
||||
line = source.next()
|
||||
line = source.next()
|
||||
i = 0
|
||||
re_spaces = re.compile(r"\s+")
|
||||
if rar_executable_version == 4:
|
||||
while not line.startswith('-----------'):
|
||||
accum.append(line)
|
||||
if len(accum)==2:
|
||||
data = {}
|
||||
data['index'] = i
|
||||
# asterisks mark password-encrypted files
|
||||
data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files
|
||||
fields = re_spaces.split(accum[1].strip())
|
||||
data['size'] = int(fields[0])
|
||||
attr = fields[5]
|
||||
data['isdir'] = 'd' in attr.lower()
|
||||
data['datetime'] = time.strptime(fields[3]+" "+fields[4], '%d-%m-%y %H:%M')
|
||||
data['comment'] = None
|
||||
yield data
|
||||
accum = []
|
||||
i += 1
|
||||
line = source.next()
|
||||
elif rar_executable_version == 5:
|
||||
while not line.startswith('-----------'):
|
||||
fields = line.strip().lstrip("*").split()
|
||||
data = {}
|
||||
data['index'] = i
|
||||
data['filename'] = " ".join(fields[4:])
|
||||
data['size'] = int(fields[1])
|
||||
attr = fields[0]
|
||||
data['isdir'] = 'd' in attr.lower()
|
||||
data['datetime'] = time.strptime(fields[2]+" "+fields[3], '%d-%m-%y %H:%M')
|
||||
data['comment'] = None
|
||||
yield data
|
||||
i += 1
|
||||
line = source.next()
|
||||
|
||||
|
||||
def read_files(self, checker):
|
||||
res = []
|
||||
for info in self.infoiter():
|
||||
checkres = checker(info)
|
||||
if checkres==True and not info.isdir:
|
||||
pipe = self.call('p', ['inul'], [info.filename]).stdout
|
||||
res.append((info, pipe.read()))
|
||||
return res
|
||||
|
||||
|
||||
def extract(self, checker, path, withSubpath, overwrite):
|
||||
res = []
|
||||
command = 'x'
|
||||
if not withSubpath:
|
||||
command = 'e'
|
||||
options = []
|
||||
if overwrite:
|
||||
options.append('o+')
|
||||
else:
|
||||
options.append('o-')
|
||||
if not path.endswith(os.sep):
|
||||
path += os.sep
|
||||
names = []
|
||||
for info in self.infoiter():
|
||||
checkres = checker(info)
|
||||
if type(checkres) in [str, unicode]:
|
||||
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
|
||||
if checkres==True and not info.isdir:
|
||||
names.append(info.filename)
|
||||
res.append(info)
|
||||
names.append(path)
|
||||
proc = self.call(command, options, names)
|
||||
stdoutdata, stderrdata = proc.communicate()
|
||||
if stderrdata.find("CRC failed")>=0 or stderrdata.find("Checksum error")>=0:
|
||||
raise IncorrectRARPassword
|
||||
return res
|
||||
|
||||
def destruct(self):
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,309 +0,0 @@
|
||||
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# Low level interface - see UnRARDLL\UNRARDLL.TXT
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
import ctypes, ctypes.wintypes
|
||||
import os, os.path, sys
|
||||
import Queue
|
||||
import time
|
||||
|
||||
from rar_exceptions import *
|
||||
|
||||
ERAR_END_ARCHIVE = 10
|
||||
ERAR_NO_MEMORY = 11
|
||||
ERAR_BAD_DATA = 12
|
||||
ERAR_BAD_ARCHIVE = 13
|
||||
ERAR_UNKNOWN_FORMAT = 14
|
||||
ERAR_EOPEN = 15
|
||||
ERAR_ECREATE = 16
|
||||
ERAR_ECLOSE = 17
|
||||
ERAR_EREAD = 18
|
||||
ERAR_EWRITE = 19
|
||||
ERAR_SMALL_BUF = 20
|
||||
ERAR_UNKNOWN = 21
|
||||
|
||||
RAR_OM_LIST = 0
|
||||
RAR_OM_EXTRACT = 1
|
||||
|
||||
RAR_SKIP = 0
|
||||
RAR_TEST = 1
|
||||
RAR_EXTRACT = 2
|
||||
|
||||
RAR_VOL_ASK = 0
|
||||
RAR_VOL_NOTIFY = 1
|
||||
|
||||
RAR_DLL_VERSION = 3
|
||||
|
||||
# enum UNRARCALLBACK_MESSAGES
|
||||
UCM_CHANGEVOLUME = 0
|
||||
UCM_PROCESSDATA = 1
|
||||
UCM_NEEDPASSWORD = 2
|
||||
|
||||
architecture_bits = ctypes.sizeof(ctypes.c_voidp)*8
|
||||
dll_name = "unrar.dll"
|
||||
if architecture_bits == 64:
|
||||
dll_name = "x64\\unrar64.dll"
|
||||
|
||||
|
||||
try:
|
||||
unrar = ctypes.WinDLL(os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name))
|
||||
except WindowsError:
|
||||
unrar = ctypes.WinDLL(dll_name)
|
||||
|
||||
|
||||
class RAROpenArchiveDataEx(ctypes.Structure):
|
||||
def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):
|
||||
self.CmtBuf = ctypes.c_buffer(64*1024)
|
||||
ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW, OpenMode=OpenMode, _CmtBuf=ctypes.addressof(self.CmtBuf), CmtBufSize=ctypes.sizeof(self.CmtBuf))
|
||||
|
||||
_fields_ = [
|
||||
('ArcName', ctypes.c_char_p),
|
||||
('ArcNameW', ctypes.c_wchar_p),
|
||||
('OpenMode', ctypes.c_uint),
|
||||
('OpenResult', ctypes.c_uint),
|
||||
('_CmtBuf', ctypes.c_voidp),
|
||||
('CmtBufSize', ctypes.c_uint),
|
||||
('CmtSize', ctypes.c_uint),
|
||||
('CmtState', ctypes.c_uint),
|
||||
('Flags', ctypes.c_uint),
|
||||
('Reserved', ctypes.c_uint*32),
|
||||
]
|
||||
|
||||
class RARHeaderDataEx(ctypes.Structure):
|
||||
def __init__(self):
|
||||
self.CmtBuf = ctypes.c_buffer(64*1024)
|
||||
ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf), CmtBufSize=ctypes.sizeof(self.CmtBuf))
|
||||
|
||||
_fields_ = [
|
||||
('ArcName', ctypes.c_char*1024),
|
||||
('ArcNameW', ctypes.c_wchar*1024),
|
||||
('FileName', ctypes.c_char*1024),
|
||||
('FileNameW', ctypes.c_wchar*1024),
|
||||
('Flags', ctypes.c_uint),
|
||||
('PackSize', ctypes.c_uint),
|
||||
('PackSizeHigh', ctypes.c_uint),
|
||||
('UnpSize', ctypes.c_uint),
|
||||
('UnpSizeHigh', ctypes.c_uint),
|
||||
('HostOS', ctypes.c_uint),
|
||||
('FileCRC', ctypes.c_uint),
|
||||
('FileTime', ctypes.c_uint),
|
||||
('UnpVer', ctypes.c_uint),
|
||||
('Method', ctypes.c_uint),
|
||||
('FileAttr', ctypes.c_uint),
|
||||
('_CmtBuf', ctypes.c_voidp),
|
||||
('CmtBufSize', ctypes.c_uint),
|
||||
('CmtSize', ctypes.c_uint),
|
||||
('CmtState', ctypes.c_uint),
|
||||
('Reserved', ctypes.c_uint*1024),
|
||||
]
|
||||
|
||||
def DosDateTimeToTimeTuple(dosDateTime):
|
||||
"""Convert an MS-DOS format date time to a Python time tuple.
|
||||
"""
|
||||
dosDate = dosDateTime >> 16
|
||||
dosTime = dosDateTime & 0xffff
|
||||
day = dosDate & 0x1f
|
||||
month = (dosDate >> 5) & 0xf
|
||||
year = 1980 + (dosDate >> 9)
|
||||
second = 2*(dosTime & 0x1f)
|
||||
minute = (dosTime >> 5) & 0x3f
|
||||
hour = dosTime >> 11
|
||||
return time.localtime(time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))
|
||||
|
||||
def _wrap(restype, function, argtypes):
|
||||
result = function
|
||||
result.argtypes = argtypes
|
||||
result.restype = restype
|
||||
return result
|
||||
|
||||
RARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])
|
||||
|
||||
RAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx, [ctypes.POINTER(RAROpenArchiveDataEx)])
|
||||
|
||||
RARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx, [ctypes.wintypes.HANDLE, ctypes.POINTER(RARHeaderDataEx)])
|
||||
|
||||
_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword, [ctypes.wintypes.HANDLE, ctypes.c_char_p])
|
||||
def RARSetPassword(*args, **kwargs):
|
||||
_RARSetPassword(*args, **kwargs)
|
||||
|
||||
RARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile, [ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p])
|
||||
|
||||
RARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive, [ctypes.wintypes.HANDLE])
|
||||
|
||||
UNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint, ctypes.c_long, ctypes.c_long, ctypes.c_long)
|
||||
RARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback, [ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])
|
||||
|
||||
|
||||
|
||||
RARExceptions = {
|
||||
ERAR_NO_MEMORY : MemoryError,
|
||||
ERAR_BAD_DATA : ArchiveHeaderBroken,
|
||||
ERAR_BAD_ARCHIVE : InvalidRARArchive,
|
||||
ERAR_EOPEN : FileOpenError,
|
||||
}
|
||||
|
||||
class PassiveReader:
|
||||
"""Used for reading files to memory"""
|
||||
def __init__(self, usercallback = None):
|
||||
self.buf = []
|
||||
self.ucb = usercallback
|
||||
|
||||
def _callback(self, msg, UserData, P1, P2):
|
||||
if msg == UCM_PROCESSDATA:
|
||||
data = (ctypes.c_char*P2).from_address(P1).raw
|
||||
if self.ucb!=None:
|
||||
self.ucb(data)
|
||||
else:
|
||||
self.buf.append(data)
|
||||
return 1
|
||||
|
||||
def get_result(self):
|
||||
return ''.join(self.buf)
|
||||
|
||||
class RarInfoIterator(object):
|
||||
def __init__(self, arc):
|
||||
self.arc = arc
|
||||
self.index = 0
|
||||
self.headerData = RARHeaderDataEx()
|
||||
self.res = RARReadHeaderEx(self.arc._handle, ctypes.byref(self.headerData))
|
||||
if self.res==ERAR_BAD_DATA:
|
||||
raise IncorrectRARPassword
|
||||
self.arc.lockStatus = "locked"
|
||||
self.arc.needskip = False
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
if self.index>0:
|
||||
if self.arc.needskip:
|
||||
RARProcessFile(self.arc._handle, RAR_SKIP, None, None)
|
||||
self.res = RARReadHeaderEx(self.arc._handle, ctypes.byref(self.headerData))
|
||||
|
||||
if self.res:
|
||||
raise StopIteration
|
||||
self.arc.needskip = True
|
||||
|
||||
data = {}
|
||||
data['index'] = self.index
|
||||
data['filename'] = self.headerData.FileName
|
||||
data['datetime'] = DosDateTimeToTimeTuple(self.headerData.FileTime)
|
||||
data['isdir'] = ((self.headerData.Flags & 0xE0) == 0xE0)
|
||||
data['size'] = self.headerData.UnpSize + (self.headerData.UnpSizeHigh << 32)
|
||||
if self.headerData.CmtState == 1:
|
||||
data['comment'] = self.headerData.CmtBuf.value
|
||||
else:
|
||||
data['comment'] = None
|
||||
self.index += 1
|
||||
return data
|
||||
|
||||
|
||||
def __del__(self):
|
||||
self.arc.lockStatus = "finished"
|
||||
|
||||
def generate_password_provider(password):
|
||||
def password_provider_callback(msg, UserData, P1, P2):
|
||||
if msg == UCM_NEEDPASSWORD and password!=None:
|
||||
(ctypes.c_char*P2).from_address(P1).value = password
|
||||
return 1
|
||||
return password_provider_callback
|
||||
|
||||
class RarFileImplementation(object):
|
||||
|
||||
def init(self, password=None):
|
||||
self.password = password
|
||||
archiveData = RAROpenArchiveDataEx(ArcNameW=self.archiveName, OpenMode=RAR_OM_EXTRACT)
|
||||
self._handle = RAROpenArchiveEx(ctypes.byref(archiveData))
|
||||
self.c_callback = UNRARCALLBACK(generate_password_provider(self.password))
|
||||
RARSetCallback(self._handle, self.c_callback, 1)
|
||||
|
||||
if archiveData.OpenResult != 0:
|
||||
raise RARExceptions[archiveData.OpenResult]
|
||||
|
||||
if archiveData.CmtState == 1:
|
||||
self.comment = archiveData.CmtBuf.value
|
||||
else:
|
||||
self.comment = None
|
||||
|
||||
if password:
|
||||
RARSetPassword(self._handle, password)
|
||||
|
||||
self.lockStatus = "ready"
|
||||
|
||||
|
||||
|
||||
def destruct(self):
|
||||
if self._handle and RARCloseArchive:
|
||||
RARCloseArchive(self._handle)
|
||||
|
||||
def make_sure_ready(self):
|
||||
if self.lockStatus == "locked":
|
||||
raise InvalidRARArchiveUsage("cannot execute infoiter() without finishing previous one")
|
||||
if self.lockStatus == "finished":
|
||||
self.destruct()
|
||||
self.init(self.password)
|
||||
|
||||
def infoiter(self):
|
||||
self.make_sure_ready()
|
||||
return RarInfoIterator(self)
|
||||
|
||||
def read_files(self, checker):
|
||||
res = []
|
||||
for info in self.infoiter():
|
||||
if checker(info) and not info.isdir:
|
||||
reader = PassiveReader()
|
||||
c_callback = UNRARCALLBACK(reader._callback)
|
||||
RARSetCallback(self._handle, c_callback, 1)
|
||||
tmpres = RARProcessFile(self._handle, RAR_TEST, None, None)
|
||||
if tmpres==ERAR_BAD_DATA:
|
||||
raise IncorrectRARPassword
|
||||
self.needskip = False
|
||||
res.append((info, reader.get_result()))
|
||||
return res
|
||||
|
||||
|
||||
def extract(self, checker, path, withSubpath, overwrite):
|
||||
res = []
|
||||
for info in self.infoiter():
|
||||
checkres = checker(info)
|
||||
if checkres!=False and not info.isdir:
|
||||
if checkres==True:
|
||||
fn = info.filename
|
||||
if not withSubpath:
|
||||
fn = os.path.split(fn)[-1]
|
||||
target = os.path.join(path, fn)
|
||||
else:
|
||||
raise DeprecationWarning, "Condition callbacks returning strings are deprecated and only supported in Windows"
|
||||
target = checkres
|
||||
if overwrite or (not os.path.exists(target)):
|
||||
tmpres = RARProcessFile(self._handle, RAR_EXTRACT, None, target)
|
||||
if tmpres==ERAR_BAD_DATA:
|
||||
raise IncorrectRARPassword
|
||||
|
||||
self.needskip = False
|
||||
res.append(info)
|
||||
return res
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from __future__ import annotations
|
||||
|
||||
5
comictaggerlib/__main__.py
Normal file
5
comictaggerlib/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comictaggerlib.main import main
|
||||
|
||||
main()
|
||||
11
comictaggerlib/__pyinstaller/__init__.py
Normal file
11
comictaggerlib/__pyinstaller/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
import comicapi.__pyinstaller
|
||||
|
||||
|
||||
def get_hook_dirs() -> list[str]:
|
||||
hooks = [os.path.dirname(__file__)]
|
||||
hooks.extend(comicapi.__pyinstaller.get_hook_dirs())
|
||||
return hooks
|
||||
8
comictaggerlib/__pyinstaller/hook-comictaggerlib.py
Normal file
8
comictaggerlib/__pyinstaller/hook-comictaggerlib.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from PyInstaller.utils.hooks import collect_data_files, collect_entry_point, collect_submodules
|
||||
|
||||
datas, hiddenimports = collect_entry_point("comictagger.talker")
|
||||
hiddenimports += collect_submodules("comictaggerlib")
|
||||
datas += collect_data_files("comictaggerlib.ui")
|
||||
datas += collect_data_files("comictaggerlib.graphics")
|
||||
7
comictaggerlib/__pyinstaller/hook-wordninja.py
Normal file
7
comictaggerlib/__pyinstaller/hook-wordninja.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from PyInstaller.utils.hooks import get_module_file_attribute
|
||||
|
||||
datas = [(os.path.join(os.path.dirname(get_module_file_attribute("wordninja")), "wordninja"), "wordninja")]
|
||||
57
comictaggerlib/applicationlogwindow.py
Normal file
57
comictaggerlib/applicationlogwindow.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import pathlib
|
||||
|
||||
from PyQt6 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QTextEditLogger(QtCore.QObject, logging.Handler):
|
||||
qlog = QtCore.pyqtSignal(str)
|
||||
|
||||
def __init__(self, formatter: logging.Formatter, level: int) -> None:
|
||||
super().__init__()
|
||||
self.setFormatter(formatter)
|
||||
self.setLevel(level)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
msg = self.format(record)
|
||||
self.qlog.emit(msg.strip())
|
||||
|
||||
|
||||
class ApplicationLogWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self, log_folder: pathlib.Path, log_handler: QTextEditLogger, parent: QtCore.QObject | None = None
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
with (ui_path / "applicationlogwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.log_handler = log_handler
|
||||
self.log_handler.qlog.connect(self.textEdit.append)
|
||||
|
||||
f = QtGui.QFont("menlo")
|
||||
f.setStyleHint(QtGui.QFont.StyleHint.Monospace)
|
||||
self.setFont(f)
|
||||
self._button = QtWidgets.QPushButton(self)
|
||||
self._button.setText("Test Me")
|
||||
|
||||
self.log_folder = log_folder
|
||||
self.lblLogLocation.setText(f'Log Location: <a href="file://{log_folder}">{log_folder}</a>')
|
||||
|
||||
layout = self.layout()
|
||||
layout.addWidget(self._button)
|
||||
|
||||
# Connect signal to slot
|
||||
self._button.clicked.connect(self.test)
|
||||
self.textEdit.setTabStopDistance(self.textEdit.tabStopDistance() * 2)
|
||||
|
||||
def test(self) -> None:
|
||||
logger.debug("damn, a bug")
|
||||
logger.info("something to remember")
|
||||
logger.warning("that's not right")
|
||||
logger.error("foobar")
|
||||
@@ -1,233 +1,278 @@
|
||||
"""
|
||||
A PyQT4 dialog to select from automated issue matches
|
||||
"""
|
||||
"""A PyQT4 dialog to select from automated issue matches"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
from PyQt4 import QtCore, QtGui, uic
|
||||
from typing import Callable
|
||||
|
||||
from PyQt4.QtCore import QUrl, pyqtSignal, QByteArray
|
||||
from PyQt6 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
from imagefetcher import ImageFetcher
|
||||
from settings import ComicTaggerSettings
|
||||
from comicarchive import MetaDataStyle
|
||||
from coverimagewidget import CoverImageWidget
|
||||
from comicvinetalker import ComicVineTalker
|
||||
import utils
|
||||
from comicapi.comicarchive import ComicArchive, tags
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.md import prepare_metadata
|
||||
from comictaggerlib.resulttypes import IssueResult, Result
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictalker.comictalker import ComicTalker, TalkerError
|
||||
|
||||
class AutoTagMatchWindow(QtGui.QDialog):
|
||||
|
||||
volume_id = 0
|
||||
|
||||
def __init__(self, parent, match_set_list, style, fetch_func):
|
||||
super(AutoTagMatchWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('matchselectionwindow.ui' ), self)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
self.altCoverWidget = CoverImageWidget( self.altCoverContainer, CoverImageWidget.AltCoverMode )
|
||||
gridlayout = QtGui.QGridLayout( self.altCoverContainer )
|
||||
gridlayout.addWidget( self.altCoverWidget )
|
||||
gridlayout.setContentsMargins(0,0,0,0)
|
||||
|
||||
self.archiveCoverWidget = CoverImageWidget( self.archiveCoverContainer, CoverImageWidget.ArchiveMode )
|
||||
gridlayout = QtGui.QGridLayout( self.archiveCoverContainer )
|
||||
gridlayout.addWidget( self.archiveCoverWidget )
|
||||
gridlayout.setContentsMargins(0,0,0,0)
|
||||
class AutoTagMatchWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
match_set_list: list[Result],
|
||||
read_tags: list[str],
|
||||
fetch_func: Callable[[IssueResult], GenericMetadata],
|
||||
config: ct_ns,
|
||||
talker: ComicTalker,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
utils.reduceWidgetFontSize( self.twList )
|
||||
utils.reduceWidgetFontSize( self.teDescription, 1 )
|
||||
with (ui_path / "matchselectionwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
|
||||
self.skipButton = QtGui.QPushButton(self.tr("Skip to Next"))
|
||||
self.buttonBox.addButton(self.skipButton, QtGui.QDialogButtonBox.ActionRole)
|
||||
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setText("Accept and Write Tags")
|
||||
self.config = config
|
||||
|
||||
self.match_set_list = match_set_list
|
||||
self.style = style
|
||||
self.fetch_func = fetch_func
|
||||
self.current_match_set: Result = match_set_list[0]
|
||||
|
||||
self.current_match_set_idx = 0
|
||||
|
||||
self.twList.currentItemChanged.connect(self.currentItemChanged)
|
||||
self.twList.cellDoubleClicked.connect(self.cellDoubleClicked)
|
||||
self.skipButton.clicked.connect(self.skipToNext)
|
||||
|
||||
self.updateData()
|
||||
self.altCoverWidget = CoverImageWidget(
|
||||
self.altCoverContainer, CoverImageWidget.AltCoverMode, config.Runtime_Options__config.user_cache_dir
|
||||
)
|
||||
gridlayout = QtWidgets.QGridLayout(self.altCoverContainer)
|
||||
gridlayout.addWidget(self.altCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
def updateData( self):
|
||||
self.archiveCoverWidget = CoverImageWidget(self.archiveCoverContainer, CoverImageWidget.ArchiveMode, None)
|
||||
gridlayout = QtWidgets.QGridLayout(self.archiveCoverContainer)
|
||||
gridlayout.addWidget(self.archiveCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.current_match_set = self.match_set_list[ self.current_match_set_idx ]
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
if self.current_match_set_idx + 1 == len( self.match_set_list ):
|
||||
self.buttonBox.button(QtGui.QDialogButtonBox.Cancel).setDisabled(True)
|
||||
#self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setText("Accept")
|
||||
self.skipButton.setText(self.tr("Skip"))
|
||||
|
||||
self.setCoverImage()
|
||||
self.populateTable()
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.selectRow( 0 )
|
||||
|
||||
path = self.current_match_set.ca.path
|
||||
self.setWindowTitle( u"Select correct match or skip ({0} of {1}): {2}".format(
|
||||
self.current_match_set_idx+1,
|
||||
len( self.match_set_list ),
|
||||
os.path.split(path)[1] ))
|
||||
|
||||
def populateTable( self ):
|
||||
self.skipButton = QtWidgets.QPushButton("Skip to Next")
|
||||
self.buttonBox.addButton(self.skipButton, QtWidgets.QDialogButtonBox.ButtonRole.ActionRole)
|
||||
self.buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Ok).setText("Accept and Write Tags")
|
||||
|
||||
while self.twList.rowCount() > 0:
|
||||
self.twList.removeRow(0)
|
||||
|
||||
self.twList.setSortingEnabled(False)
|
||||
self.match_set_list = match_set_list
|
||||
self._tags = read_tags
|
||||
self.fetch_func = fetch_func
|
||||
|
||||
row = 0
|
||||
for match in self.current_match_set.matches:
|
||||
self.twList.insertRow(row)
|
||||
|
||||
item_text = match['series']
|
||||
item = QtGui.QTableWidgetItem(item_text)
|
||||
item.setData( QtCore.Qt.ToolTipRole, item_text )
|
||||
item.setData( QtCore.Qt.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable| QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 0, item)
|
||||
self.current_match_set_idx = 0
|
||||
|
||||
if match['publisher'] is not None:
|
||||
item_text = u"{0}".format(match['publisher'])
|
||||
else:
|
||||
item_text = u"Unknown"
|
||||
item = QtGui.QTableWidgetItem(item_text)
|
||||
item.setData( QtCore.Qt.ToolTipRole, item_text )
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable| QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 1, item)
|
||||
|
||||
month_str = u""
|
||||
year_str = u"????"
|
||||
if match['month'] is not None:
|
||||
month_str = u"-{0:02d}".format(int(match['month']))
|
||||
if match['year'] is not None:
|
||||
year_str = u"{0}".format(match['year'])
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed)
|
||||
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
|
||||
self.skipButton.clicked.connect(self.skip_to_next)
|
||||
|
||||
item_text = year_str + month_str
|
||||
item = QtGui.QTableWidgetItem(item_text)
|
||||
item.setData( QtCore.Qt.ToolTipRole, item_text )
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable| QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 2, item)
|
||||
self.update_data()
|
||||
|
||||
item_text = match['issue_title']
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
item = QtGui.QTableWidgetItem(item_text)
|
||||
item.setData( QtCore.Qt.ToolTipRole, item_text )
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable| QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 3, item)
|
||||
|
||||
row += 1
|
||||
def update_data(self) -> None:
|
||||
self.current_match_set = self.match_set_list[self.current_match_set_idx]
|
||||
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.sortItems( 2 , QtCore.Qt.AscendingOrder )
|
||||
self.twList.selectRow(0)
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.horizontalHeader().setStretchLastSection(True)
|
||||
|
||||
if self.current_match_set_idx + 1 == len(self.match_set_list):
|
||||
self.buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Cancel).setDisabled(True)
|
||||
self.skipButton.setText("Skip")
|
||||
|
||||
def cellDoubleClicked( self, r, c ):
|
||||
self.accept()
|
||||
|
||||
def currentItemChanged( self, curr, prev ):
|
||||
self.set_cover_image()
|
||||
self.populate_table()
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.selectRow(0)
|
||||
|
||||
if curr is None:
|
||||
return
|
||||
if prev is not None and prev.row() == curr.row():
|
||||
return
|
||||
|
||||
self.altCoverWidget.setIssueID( self.currentMatch()['issue_id'] )
|
||||
if self.currentMatch()['description'] is None:
|
||||
self.teDescription.setText ( "" )
|
||||
else:
|
||||
self.teDescription.setText ( self.currentMatch()['description'] )
|
||||
|
||||
def setCoverImage( self ):
|
||||
ca = self.current_match_set.ca
|
||||
self.archiveCoverWidget.setArchive(ca)
|
||||
path = self.current_match_set.original_path
|
||||
self.setWindowTitle(
|
||||
"Select correct match or skip ({} of {}): {}".format(
|
||||
self.current_match_set_idx + 1,
|
||||
len(self.match_set_list),
|
||||
os.path.split(path)[1],
|
||||
)
|
||||
)
|
||||
|
||||
def currentMatch( self ):
|
||||
row = self.twList.currentRow()
|
||||
match = self.twList.item(row, 0).data( QtCore.Qt.UserRole ).toPyObject()[0]
|
||||
return match
|
||||
|
||||
def accept(self):
|
||||
def populate_table(self) -> None:
|
||||
if not self.current_match_set:
|
||||
return
|
||||
|
||||
self.saveMatch()
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len( self.match_set_list ):
|
||||
# no more items
|
||||
QtGui.QDialog.accept(self)
|
||||
else:
|
||||
self.updateData()
|
||||
self.twList.setRowCount(0)
|
||||
|
||||
def skipToNext( self ):
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len( self.match_set_list ):
|
||||
# no more items
|
||||
QtGui.QDialog.reject(self)
|
||||
else:
|
||||
self.updateData()
|
||||
|
||||
def reject(self):
|
||||
reply = QtGui.QMessageBox.question(self,
|
||||
self.tr("Cancel Matching"),
|
||||
self.tr("Are you sure you wish to cancel the matching process?"),
|
||||
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No )
|
||||
|
||||
if reply == QtGui.QMessageBox.No:
|
||||
return
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
QtGui.QDialog.reject(self)
|
||||
|
||||
def saveMatch( self ):
|
||||
|
||||
match = self.currentMatch()
|
||||
ca = self.current_match_set.ca
|
||||
for row, match in enumerate(self.current_match_set.online_results):
|
||||
self.twList.insertRow(row)
|
||||
|
||||
md = ca.readMetadata( self.style )
|
||||
if md.isEmpty:
|
||||
md = ca.metadataFromFilename()
|
||||
|
||||
# now get the particular issue data
|
||||
cv_md = self.fetch_func( match )
|
||||
if cv_md is None:
|
||||
QtGui.QMessageBox.critical(self, self.tr("Network Issue"), self.tr("Could not connect to ComicVine to get issue details!"))
|
||||
return
|
||||
item_text = match.series
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 0, item)
|
||||
|
||||
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
|
||||
md.overlay( cv_md )
|
||||
success = ca.writeMetadata( md, self.style )
|
||||
ca.loadCache( [ MetaDataStyle.CBI, MetaDataStyle.CIX ] )
|
||||
|
||||
QtGui.QApplication.restoreOverrideCursor()
|
||||
|
||||
if not success:
|
||||
QtGui.QMessageBox.warning(self, self.tr("Write Error"), self.tr("Saving the tags to the archive seemed to fail!"))
|
||||
if match.publisher is not None:
|
||||
item_text = str(match.publisher)
|
||||
else:
|
||||
item_text = "Unknown"
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 1, item)
|
||||
|
||||
month_str = ""
|
||||
year_str = "????"
|
||||
if match.month is not None:
|
||||
month_str = f"-{int(match.month):02d}"
|
||||
if match.year is not None:
|
||||
year_str = str(match.year)
|
||||
|
||||
item_text = year_str + month_str
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 2, item)
|
||||
|
||||
item_text = match.issue_title
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 3, item)
|
||||
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.sortItems(2, QtCore.Qt.SortOrder.AscendingOrder)
|
||||
self.twList.selectRow(0)
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.horizontalHeader().setStretchLastSection(True)
|
||||
|
||||
def cell_double_clicked(self, r: int, c: int) -> None:
|
||||
self.accept()
|
||||
|
||||
def current_item_changed(self, curr: QtCore.QModelIndex, prev: QtCore.QModelIndex) -> None:
|
||||
if curr is None:
|
||||
return None
|
||||
if prev is not None and prev.row() == curr.row():
|
||||
return None
|
||||
|
||||
match = self.current_match()
|
||||
self.altCoverWidget.set_issue_details(match.issue_id, [match.image_url, *match.alt_image_urls])
|
||||
if match.description is None:
|
||||
self.teDescription.setText("")
|
||||
else:
|
||||
self.teDescription.setText(match.description)
|
||||
|
||||
def set_cover_image(self) -> None:
|
||||
ca = ComicArchive(
|
||||
self.current_match_set.original_path, hash_archive=self.config.Runtime_Options__preferred_hash
|
||||
)
|
||||
self.archiveCoverWidget.set_archive(ca)
|
||||
|
||||
def current_match(self) -> IssueResult:
|
||||
row = self.twList.currentRow()
|
||||
match: IssueResult = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)[0]
|
||||
return match
|
||||
|
||||
def accept(self) -> None:
|
||||
self.save_match()
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len(self.match_set_list):
|
||||
# no more items
|
||||
QtWidgets.QDialog.accept(self)
|
||||
else:
|
||||
self.update_data()
|
||||
|
||||
def skip_to_next(self) -> None:
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len(self.match_set_list):
|
||||
# no more items
|
||||
QtWidgets.QDialog.reject(self)
|
||||
else:
|
||||
self.update_data()
|
||||
|
||||
def reject(self) -> None:
|
||||
reply = QtWidgets.QMessageBox.question(
|
||||
self,
|
||||
"Cancel Matching",
|
||||
"Are you sure you wish to cancel the matching process?",
|
||||
QtWidgets.QMessageBox.StandardButton.Yes,
|
||||
QtWidgets.QMessageBox.StandardButton.No,
|
||||
)
|
||||
|
||||
if reply == QtWidgets.QMessageBox.StandardButton.No:
|
||||
return
|
||||
|
||||
QtWidgets.QDialog.reject(self)
|
||||
|
||||
def save_match(self) -> None:
|
||||
match = self.current_match()
|
||||
ca = ComicArchive(
|
||||
self.current_match_set.original_path, hash_archive=self.config.Runtime_Options__preferred_hash
|
||||
)
|
||||
md, error = self.parent().read_selected_tags(self._tags, ca)
|
||||
if error is not None:
|
||||
logger.error("Failed to load tags for %s: %s", ca.path, error)
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
QtWidgets.QMessageBox.critical(
|
||||
self,
|
||||
"Read Failed!",
|
||||
f"One or more of the read tags failed to load for {ca.path}, check log for details",
|
||||
)
|
||||
return
|
||||
|
||||
if md.is_empty:
|
||||
md = ca.metadata_from_filename(
|
||||
self.config.Filename_Parsing__filename_parser,
|
||||
self.config.Filename_Parsing__remove_c2c,
|
||||
self.config.Filename_Parsing__remove_fcbd,
|
||||
self.config.Filename_Parsing__remove_publisher,
|
||||
)
|
||||
|
||||
# now get the particular issue data
|
||||
|
||||
try:
|
||||
self.current_match_set.md = ct_md = self.fetch_func(match)
|
||||
except TalkerError as e:
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
QtWidgets.QMessageBox.critical(self, f"{e.source} {e.code_name} Error", f"{e}")
|
||||
return
|
||||
|
||||
if ct_md is None or ct_md.is_empty:
|
||||
QtWidgets.QMessageBox.critical(self, "Network Issue", "Could not retrieve issue details!")
|
||||
return
|
||||
|
||||
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
|
||||
md = prepare_metadata(md, ct_md, self.config)
|
||||
for tag_id in self._tags:
|
||||
success = ca.write_tags(md, tag_id)
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
if not success:
|
||||
QtWidgets.QMessageBox.warning(
|
||||
self,
|
||||
"Write Error",
|
||||
f"Saving {tags[tag_id].name()} the tags to the archive seemed to fail!",
|
||||
)
|
||||
break
|
||||
|
||||
ca.reset_cache()
|
||||
|
||||
@@ -1,69 +1,71 @@
|
||||
"""
|
||||
A PyQT4 dialog to show ID log and progress
|
||||
"""
|
||||
"""A PyQT4 dialog to show ID log and progress"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
from PyQt6 import QtCore, QtWidgets, uic
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
import sys
|
||||
from PyQt4 import QtCore, QtGui, uic
|
||||
import os
|
||||
from settings import ComicTaggerSettings
|
||||
from coverimagewidget import CoverImageWidget
|
||||
import utils
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AutoTagProgressWindow(QtGui.QDialog):
|
||||
|
||||
|
||||
def __init__(self, parent):
|
||||
super(AutoTagProgressWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('autotagprogresswindow.ui' ), self)
|
||||
|
||||
self.archiveCoverWidget = CoverImageWidget( self.archiveCoverContainer, CoverImageWidget.DataMode, False )
|
||||
gridlayout = QtGui.QGridLayout( self.archiveCoverContainer )
|
||||
gridlayout.addWidget( self.archiveCoverWidget )
|
||||
gridlayout.setContentsMargins(0,0,0,0)
|
||||
|
||||
self.testCoverWidget = CoverImageWidget( self.testCoverContainer, CoverImageWidget.DataMode, False )
|
||||
gridlayout = QtGui.QGridLayout( self.testCoverContainer )
|
||||
gridlayout.addWidget( self.testCoverWidget )
|
||||
gridlayout.setContentsMargins(0,0,0,0)
|
||||
|
||||
self.isdone = False
|
||||
class AutoTagProgressWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, talker: ComicTalker) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
with (ui_path / "autotagprogresswindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
utils.reduceWidgetFontSize( self.textEdit )
|
||||
|
||||
def setArchiveImage( self, img_data):
|
||||
self.setCoverImage( img_data, self.archiveCoverWidget)
|
||||
self.lblSourceName.setText(talker.attribution)
|
||||
|
||||
def setTestImage( self, img_data):
|
||||
self.setCoverImage( img_data, self.testCoverWidget)
|
||||
self.archiveCoverWidget = CoverImageWidget(self.archiveCoverContainer, CoverImageWidget.DataMode, None, False)
|
||||
gridlayout = QtWidgets.QGridLayout(self.archiveCoverContainer)
|
||||
gridlayout.addWidget(self.archiveCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
def setCoverImage( self, img_data , widget):
|
||||
widget.setImageData( img_data )
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
def reject(self):
|
||||
QtGui.QDialog.reject(self)
|
||||
self.isdone = True
|
||||
self.testCoverWidget = CoverImageWidget(self.testCoverContainer, CoverImageWidget.DataMode, None, False)
|
||||
gridlayout = QtWidgets.QGridLayout(self.testCoverContainer)
|
||||
gridlayout.addWidget(self.testCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
|
||||
self.isdone = False
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
def set_archive_image(self, img_data: bytes) -> None:
|
||||
self.set_cover_image(img_data, self.archiveCoverWidget)
|
||||
|
||||
def set_test_image(self, img_data: bytes) -> None:
|
||||
self.set_cover_image(img_data, self.testCoverWidget)
|
||||
|
||||
def set_cover_image(self, img_data: bytes, widget: CoverImageWidget) -> None:
|
||||
widget.set_image_data(img_data)
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
def reject(self) -> None:
|
||||
QtWidgets.QDialog.reject(self)
|
||||
self.isdone = True
|
||||
|
||||
@@ -1,122 +1,104 @@
|
||||
"""
|
||||
A PyQT4 dialog to confirm and set options for auto-tag
|
||||
"""
|
||||
"""A PyQT4 dialog to confirm and set config for auto-tag"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
from PyQt6 import QtCore, QtWidgets, uic
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
from PyQt4 import QtCore, QtGui, uic
|
||||
from settings import ComicTaggerSettings
|
||||
from settingswindow import SettingsWindow
|
||||
from filerenamer import FileRenamer
|
||||
import os
|
||||
import utils
|
||||
class AutoTagStartWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, config: ct_ns, msg: str) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
|
||||
class AutoTagStartWindow(QtGui.QDialog):
|
||||
|
||||
def __init__( self, parent, settings, msg ):
|
||||
super(AutoTagStartWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('autotagstartwindow.ui' ), self)
|
||||
self.label.setText( msg )
|
||||
with (ui_path / "autotagstartwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
self.label.setText(msg)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() &
|
||||
~QtCore.Qt.WindowContextHelpButtonHint )
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(self.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint)
|
||||
)
|
||||
|
||||
self.settings = settings
|
||||
|
||||
self.cbxSaveOnLowConfidence.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxDontUseYear.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxAssumeIssueOne.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxRemoveAfterSuccess.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxSpecifySearchString.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.leNameLengthMatchTolerance.setText( str(self.settings.id_length_delta_thresh) )
|
||||
self.leSearchString.setEnabled( False )
|
||||
|
||||
if self.settings.save_on_low_confidence:
|
||||
self.cbxSaveOnLowConfidence.setCheckState( QtCore.Qt.Checked)
|
||||
if self.settings.dont_use_year_when_identifying:
|
||||
self.cbxDontUseYear.setCheckState( QtCore.Qt.Checked)
|
||||
if self.settings.assume_1_if_no_issue_num:
|
||||
self.cbxAssumeIssueOne.setCheckState( QtCore.Qt.Checked)
|
||||
if self.settings.ignore_leading_numbers_in_filename:
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setCheckState( QtCore.Qt.Checked)
|
||||
if self.settings.remove_archive_after_successful_match:
|
||||
self.cbxRemoveAfterSuccess.setCheckState( QtCore.Qt.Checked)
|
||||
self.config = config
|
||||
|
||||
nlmtTip = (
|
||||
""" <html>The <b>Name Length Match Tolerance</b> is for eliminating automatic
|
||||
search matches that are too long compared to your series name search. The higher
|
||||
it is, the more likely to have a good match, but each search will take longer and
|
||||
use more bandwidth. Too low, and only the very closest lexical matches will be
|
||||
explored.</html>""" )
|
||||
|
||||
self.leNameLengthMatchTolerance.setToolTip(nlmtTip)
|
||||
|
||||
ssTip = (
|
||||
"""<html>
|
||||
The <b>series search string</b> specifies the search string to be used for all selected archives.
|
||||
Use this when trying to match archives with hard-to-parse or incorrect filenames. All archives selected
|
||||
should be from the same series.
|
||||
</html>"""
|
||||
)
|
||||
self.leSearchString.setToolTip(ssTip)
|
||||
self.cbxSpecifySearchString.setToolTip(ssTip)
|
||||
|
||||
|
||||
validator = QtGui.QIntValidator(0, 99, self)
|
||||
self.leNameLengthMatchTolerance.setValidator(validator)
|
||||
|
||||
self.cbxSpecifySearchString.stateChanged.connect(self.searchStringToggle)
|
||||
|
||||
self.autoSaveOnLow = False
|
||||
self.dontUseYear = False
|
||||
self.assumeIssueOne = False
|
||||
self.ignoreLeadingDigitsInFilename = False
|
||||
self.removeAfterSuccess = False
|
||||
self.searchString = None
|
||||
self.nameLengthMatchTolerance = self.settings.id_length_delta_thresh
|
||||
self.cbxSpecifySearchString.setChecked(False)
|
||||
self.cbxSplitWords.setChecked(False)
|
||||
self.sbNameMatchSearchThresh.setValue(self.config.Issue_Identifier__series_match_identify_thresh)
|
||||
self.leSearchString.setEnabled(False)
|
||||
|
||||
def searchStringToggle(self):
|
||||
enable = self.cbxSpecifySearchString.isChecked()
|
||||
self.leSearchString.setEnabled( enable )
|
||||
self.cbxSaveOnLowConfidence.setChecked(self.config.Auto_Tag__save_on_low_confidence)
|
||||
self.cbxDontUseYear.setChecked(not self.config.Auto_Tag__use_year_when_identifying)
|
||||
self.cbxAssumeIssueOne.setChecked(self.config.Auto_Tag__assume_issue_one)
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setChecked(self.config.Auto_Tag__ignore_leading_numbers_in_filename)
|
||||
self.cbxRemoveAfterSuccess.setChecked(self.config.internal__remove_archive_after_successful_match)
|
||||
self.cbxAutoImprint.setChecked(self.config.Auto_Tag__auto_imprint)
|
||||
|
||||
|
||||
def accept( self ):
|
||||
QtGui.QDialog.accept(self)
|
||||
nlmt_tip = """<html>The <b>Name Match Ratio Threshold: Auto-Identify</b> is for eliminating automatic
|
||||
search matches that are too long compared to your series name search. The lower
|
||||
it is, the more likely to have a good match, but each search will take longer and
|
||||
use more bandwidth. Too high, and only the very closest matches will be explored.</html>"""
|
||||
|
||||
self.autoSaveOnLow = self.cbxSaveOnLowConfidence.isChecked()
|
||||
self.dontUseYear = self.cbxDontUseYear.isChecked()
|
||||
self.assumeIssueOne = self.cbxAssumeIssueOne.isChecked()
|
||||
self.ignoreLeadingDigitsInFilename = self.cbxIgnoreLeadingDigitsInFilename.isChecked()
|
||||
self.removeAfterSuccess = self.cbxRemoveAfterSuccess.isChecked()
|
||||
self.nameLengthMatchTolerance = int(self.leNameLengthMatchTolerance.text())
|
||||
|
||||
#persist some settings
|
||||
self.settings.save_on_low_confidence = self.autoSaveOnLow
|
||||
self.settings.dont_use_year_when_identifying = self.dontUseYear
|
||||
self.settings.assume_1_if_no_issue_num = self.assumeIssueOne
|
||||
self.settings.ignore_leading_numbers_in_filename = self.ignoreLeadingDigitsInFilename
|
||||
self.settings.remove_archive_after_successful_match = self.removeAfterSuccess
|
||||
|
||||
if self.cbxSpecifySearchString.isChecked():
|
||||
self.searchString = unicode(self.leSearchString.text())
|
||||
if len(self.searchString) == 0:
|
||||
self.searchString = None
|
||||
|
||||
self.sbNameMatchSearchThresh.setToolTip(nlmt_tip)
|
||||
|
||||
ss_tip = """<html>
|
||||
The <b>series search string</b> specifies the search string to be used for all selected archives.
|
||||
Use this when trying to match archives with hard-to-parse or incorrect filenames. All archives selected
|
||||
should be from the same series.
|
||||
</html>"""
|
||||
self.leSearchString.setToolTip(ss_tip)
|
||||
self.cbxSpecifySearchString.setToolTip(ss_tip)
|
||||
|
||||
self.cbxSpecifySearchString.stateChanged.connect(self.search_string_toggle)
|
||||
|
||||
self.auto_save_on_low = False
|
||||
self.dont_use_year = False
|
||||
self.assume_issue_one = False
|
||||
self.ignore_leading_digits_in_filename = False
|
||||
self.remove_after_success = False
|
||||
self.search_string = ""
|
||||
self.name_length_match_tolerance = self.config.Issue_Identifier__series_match_search_thresh
|
||||
self.split_words = self.cbxSplitWords.isChecked()
|
||||
|
||||
def search_string_toggle(self) -> None:
|
||||
enable = self.cbxSpecifySearchString.isChecked()
|
||||
self.leSearchString.setEnabled(enable)
|
||||
|
||||
def accept(self) -> None:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
self.auto_save_on_low = self.cbxSaveOnLowConfidence.isChecked()
|
||||
self.dont_use_year = self.cbxDontUseYear.isChecked()
|
||||
self.assume_issue_one = self.cbxAssumeIssueOne.isChecked()
|
||||
self.ignore_leading_digits_in_filename = self.cbxIgnoreLeadingDigitsInFilename.isChecked()
|
||||
self.remove_after_success = self.cbxRemoveAfterSuccess.isChecked()
|
||||
self.name_length_match_tolerance = self.sbNameMatchSearchThresh.value()
|
||||
self.split_words = self.cbxSplitWords.isChecked()
|
||||
|
||||
# persist some settings
|
||||
self.config.Auto_Tag__save_on_low_confidence = self.auto_save_on_low
|
||||
self.config.Auto_Tag__use_year_when_identifying = not self.dont_use_year
|
||||
self.config.Auto_Tag__assume_issue_one = self.assume_issue_one
|
||||
self.config.Auto_Tag__ignore_leading_numbers_in_filename = self.ignore_leading_digits_in_filename
|
||||
self.config.internal__remove_archive_after_successful_match = self.remove_after_success
|
||||
|
||||
if self.cbxSpecifySearchString.isChecked():
|
||||
self.search_string = self.leSearchString.text()
|
||||
|
||||
@@ -1,102 +1,90 @@
|
||||
"""
|
||||
Class to manage modifying metadata specifically for CBL/CBI
|
||||
"""
|
||||
"""A class to manage modifying metadata specifically for CBL/CBI"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
from comicapi.genericmetadata import Credit, GenericMetadata
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import os
|
||||
import utils
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CBLTransformer:
|
||||
def __init__( self, metadata, settings ):
|
||||
self.metadata = metadata
|
||||
self.settings = settings
|
||||
def __init__(self, metadata: GenericMetadata, config: ct_ns) -> None:
|
||||
self.metadata = metadata.copy()
|
||||
self.config = config
|
||||
|
||||
|
||||
def apply( self ):
|
||||
# helper funcs
|
||||
def append_to_tags_if_unique( item ):
|
||||
if item.lower() not in (tag.lower() for tag in self.metadata.tags):
|
||||
self.metadata.tags.append( item )
|
||||
|
||||
def add_string_list_to_tags( str_list ):
|
||||
if str_list is not None and str_list != "":
|
||||
items = [ s.strip() for s in str_list.split(',') ]
|
||||
for item in items:
|
||||
append_to_tags_if_unique( item )
|
||||
def apply(self) -> GenericMetadata:
|
||||
if self.config.Metadata_Options__assume_lone_credit_is_primary:
|
||||
# helper
|
||||
def set_lone_primary(role_list: list[str]) -> tuple[Credit | None, int]:
|
||||
lone_credit: Credit | None = None
|
||||
count = 0
|
||||
for c in self.metadata.credits:
|
||||
if c.role.casefold() in role_list:
|
||||
count += 1
|
||||
lone_credit = c
|
||||
if count > 1:
|
||||
lone_credit = None
|
||||
break
|
||||
if lone_credit is not None:
|
||||
lone_credit.primary = True
|
||||
return lone_credit, count
|
||||
|
||||
if self.settings.assume_lone_credit_is_primary:
|
||||
|
||||
# helper
|
||||
def setLonePrimary( role_list ):
|
||||
lone_credit = None
|
||||
count = 0
|
||||
for c in self.metadata.credits:
|
||||
if c['role'].lower() in role_list:
|
||||
count += 1
|
||||
lone_credit = c
|
||||
if count > 1:
|
||||
lone_credit = None
|
||||
break
|
||||
if lone_credit is not None:
|
||||
lone_credit['primary'] = True
|
||||
return lone_credit, count
|
||||
|
||||
#need to loop three times, once for 'writer', 'artist', and then 'penciler' if no artist
|
||||
setLonePrimary( ['writer'] )
|
||||
c, count = setLonePrimary( ['artist'] )
|
||||
if c is None and count == 0:
|
||||
c, count = setLonePrimary( ['penciler', 'penciller'] )
|
||||
if c is not None:
|
||||
c['primary'] = False
|
||||
self.metadata.addCredit( c['person'], 'Artist', True )
|
||||
# need to loop three times, once for 'writer', 'artist', and then
|
||||
# 'penciler' if no artist
|
||||
set_lone_primary(["writer"])
|
||||
c, count = set_lone_primary(["artist"])
|
||||
if c is None and count == 0:
|
||||
c, count = set_lone_primary(["penciler", "penciller"])
|
||||
if c is not None:
|
||||
c.primary = False
|
||||
self.metadata.add_credit(c.person, "Artist", True)
|
||||
|
||||
if self.settings.copy_characters_to_tags:
|
||||
add_string_list_to_tags( self.metadata.characters )
|
||||
if self.config.Metadata_Options__copy_characters_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.characters)
|
||||
|
||||
if self.settings.copy_teams_to_tags:
|
||||
add_string_list_to_tags( self.metadata.teams )
|
||||
|
||||
if self.settings.copy_locations_to_tags:
|
||||
add_string_list_to_tags( self.metadata.locations )
|
||||
if self.config.Metadata_Options__copy_teams_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.teams)
|
||||
|
||||
if self.settings.copy_storyarcs_to_tags:
|
||||
add_string_list_to_tags( self.metadata.storyArc )
|
||||
|
||||
if self.settings.copy_notes_to_comments:
|
||||
if self.metadata.notes is not None:
|
||||
if self.metadata.comments is None:
|
||||
self.metadata.comments = ""
|
||||
else:
|
||||
self.metadata.comments += "\n\n"
|
||||
if self.metadata.notes not in self.metadata.comments:
|
||||
self.metadata.comments += self.metadata.notes
|
||||
if self.config.Metadata_Options__copy_locations_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.locations)
|
||||
|
||||
if self.settings.copy_weblink_to_comments:
|
||||
if self.metadata.webLink is not None:
|
||||
if self.metadata.comments is None:
|
||||
self.metadata.comments = ""
|
||||
else:
|
||||
self.metadata.comments += "\n\n"
|
||||
if self.metadata.webLink not in self.metadata.comments:
|
||||
self.metadata.comments += self.metadata.webLink
|
||||
if self.config.Metadata_Options__copy_storyarcs_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.story_arcs)
|
||||
|
||||
return self.metadata
|
||||
if self.config.Metadata_Options__copy_notes_to_comments:
|
||||
if self.metadata.notes is not None:
|
||||
if self.metadata.description is None:
|
||||
self.metadata.description = ""
|
||||
else:
|
||||
self.metadata.description += "\n\n"
|
||||
if self.metadata.notes not in self.metadata.description:
|
||||
self.metadata.description += self.metadata.notes
|
||||
|
||||
|
||||
|
||||
if self.config.Metadata_Options__copy_weblink_to_comments:
|
||||
for web_link in self.metadata.web_links:
|
||||
temp_desc = self.metadata.description
|
||||
if temp_desc is None:
|
||||
temp_desc = ""
|
||||
else:
|
||||
temp_desc += "\n\n"
|
||||
if web_link.url and web_link.url not in temp_desc:
|
||||
self.metadata.description = temp_desc + web_link.url
|
||||
|
||||
return self.metadata
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,260 +0,0 @@
|
||||
"""
|
||||
A python class to encapsulate CoMet data
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
import zipfile
|
||||
from pprint import pprint
|
||||
import xml.etree.ElementTree as ET
|
||||
from genericmetadata import GenericMetadata
|
||||
import utils
|
||||
|
||||
class CoMet:
|
||||
|
||||
writer_synonyms = ['writer', 'plotter', 'scripter']
|
||||
penciller_synonyms = [ 'artist', 'penciller', 'penciler', 'breakdowns' ]
|
||||
inker_synonyms = [ 'inker', 'artist', 'finishes' ]
|
||||
colorist_synonyms = [ 'colorist', 'colourist', 'colorer', 'colourer' ]
|
||||
letterer_synonyms = [ 'letterer']
|
||||
cover_synonyms = [ 'cover', 'covers', 'coverartist', 'cover artist' ]
|
||||
editor_synonyms = [ 'editor']
|
||||
|
||||
def metadataFromString( self, string ):
|
||||
|
||||
tree = ET.ElementTree(ET.fromstring( string ))
|
||||
return self.convertXMLToMetadata( tree )
|
||||
|
||||
def stringFromMetadata( self, metadata ):
|
||||
|
||||
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
|
||||
tree = self.convertMetadataToXML( self, metadata )
|
||||
return header + ET.tostring(tree.getroot())
|
||||
|
||||
def indent( self, elem, level=0 ):
|
||||
# for making the XML output readable
|
||||
i = "\n" + level*" "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
self.indent( elem, level+1 )
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def convertMetadataToXML( self, filename, metadata ):
|
||||
|
||||
#shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
# build a tree structure
|
||||
root = ET.Element("comet")
|
||||
root.attrib['xmlns:comet'] = "http://www.denvog.com/comet/"
|
||||
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib['xsi:schemaLocation'] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
|
||||
|
||||
#helper func
|
||||
def assign( comet_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, comet_entry).text = u"{0}".format(md_entry)
|
||||
|
||||
# title is manditory
|
||||
if md.title is None:
|
||||
md.title = ""
|
||||
assign( 'title', md.title )
|
||||
assign( 'series', md.series )
|
||||
assign( 'issue', md.issue ) #must be int??
|
||||
assign( 'volume', md.volume )
|
||||
assign( 'description', md.comments )
|
||||
assign( 'publisher', md.publisher )
|
||||
assign( 'pages', md.pageCount )
|
||||
assign( 'format', md.format )
|
||||
assign( 'language', md.language )
|
||||
assign( 'rating', md.maturityRating )
|
||||
assign( 'price', md.price )
|
||||
assign( 'isVersionOf', md.isVersionOf )
|
||||
assign( 'rights', md.rights )
|
||||
assign( 'identifier', md.identifier )
|
||||
assign( 'lastMark', md.lastMark )
|
||||
assign( 'genre', md.genre ) # TODO repeatable
|
||||
|
||||
if md.characters is not None:
|
||||
char_list = [ c.strip() for c in md.characters.split(',') ]
|
||||
for c in char_list:
|
||||
assign( 'character', c )
|
||||
|
||||
if md.manga is not None and md.manga == "YesAndRightToLeft":
|
||||
assign( 'readingDirection', "rtl")
|
||||
|
||||
date_str = ""
|
||||
if md.year is not None:
|
||||
date_str = str(md.year).zfill(4)
|
||||
if md.month is not None:
|
||||
date_str += "-" + str(md.month).zfill(2)
|
||||
assign( 'date', date_str )
|
||||
|
||||
assign( 'coverImage', md.coverImage )
|
||||
|
||||
# need to specially process the credits, since they are structured differently than CIX
|
||||
credit_writer_list = list()
|
||||
credit_penciller_list = list()
|
||||
credit_inker_list = list()
|
||||
credit_colorist_list = list()
|
||||
credit_letterer_list = list()
|
||||
credit_cover_list = list()
|
||||
credit_editor_list = list()
|
||||
|
||||
# loop thru credits, and build a list for each role that CoMet supports
|
||||
for credit in metadata.credits:
|
||||
|
||||
if credit['role'].lower() in set( self.writer_synonyms ):
|
||||
ET.SubElement(root, 'writer').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.penciller_synonyms ):
|
||||
ET.SubElement(root, 'penciller').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.inker_synonyms ):
|
||||
ET.SubElement(root, 'inker').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.colorist_synonyms ):
|
||||
ET.SubElement(root, 'colorist').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.letterer_synonyms ):
|
||||
ET.SubElement(root, 'letterer').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.cover_synonyms ):
|
||||
ET.SubElement(root, 'coverDesigner').text = u"{0}".format(credit['person'])
|
||||
|
||||
if credit['role'].lower() in set( self.editor_synonyms ):
|
||||
ET.SubElement(root, 'editor').text = u"{0}".format(credit['person'])
|
||||
|
||||
|
||||
# self pretty-print
|
||||
self.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
|
||||
def convertXMLToMetadata( self, tree ):
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != 'comet':
|
||||
raise 1
|
||||
return None
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
# Helper function
|
||||
def xlate( tag ):
|
||||
node = root.find( tag )
|
||||
if node is not None:
|
||||
return node.text
|
||||
else:
|
||||
return None
|
||||
|
||||
md.series = xlate( 'series' )
|
||||
md.title = xlate( 'title' )
|
||||
md.issue = xlate( 'issue' )
|
||||
md.volume = xlate( 'volume' )
|
||||
md.comments = xlate( 'description' )
|
||||
md.publisher = xlate( 'publisher' )
|
||||
md.language = xlate( 'language' )
|
||||
md.format = xlate( 'format' )
|
||||
md.pageCount = xlate( 'pages' )
|
||||
md.maturityRating = xlate( 'rating' )
|
||||
md.price = xlate( 'price' )
|
||||
md.isVersionOf = xlate( 'isVersionOf' )
|
||||
md.rights = xlate( 'rights' )
|
||||
md.identifier = xlate( 'identifier' )
|
||||
md.lastMark = xlate( 'lastMark' )
|
||||
md.genre = xlate( 'genre' ) # TODO - repeatable field
|
||||
|
||||
date = xlate( 'date' )
|
||||
if date is not None:
|
||||
parts = date.split('-')
|
||||
if len( parts) > 0:
|
||||
md.year = parts[0]
|
||||
if len( parts) > 1:
|
||||
md.month = parts[1]
|
||||
|
||||
md.coverImage = xlate( 'coverImage' )
|
||||
|
||||
readingDirection = xlate( 'readingDirection' )
|
||||
if readingDirection is not None and readingDirection == "rtl":
|
||||
md.manga = "YesAndRightToLeft"
|
||||
|
||||
# loop for character tags
|
||||
char_list = []
|
||||
for n in root:
|
||||
if n.tag == 'character':
|
||||
char_list.append(n.text.strip())
|
||||
md.characters = utils.listToString( char_list )
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if ( n.tag == 'writer' or
|
||||
n.tag == 'penciller' or
|
||||
n.tag == 'inker' or
|
||||
n.tag == 'colorist' or
|
||||
n.tag == 'letterer' or
|
||||
n.tag == 'editor'
|
||||
):
|
||||
metadata.addCredit( n.text.strip(), n.tag.title() )
|
||||
|
||||
if n.tag == 'coverDesigner':
|
||||
metadata.addCredit( n.text.strip(), "Cover" )
|
||||
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
#verify that the string actually contains CoMet data in XML format
|
||||
def validateString( self, string ):
|
||||
try:
|
||||
tree = ET.ElementTree(ET.fromstring( string ))
|
||||
root = tree.getroot()
|
||||
if root.tag != 'comet':
|
||||
raise Exception
|
||||
except:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def writeToExternalFile( self, filename, metadata ):
|
||||
|
||||
tree = self.convertMetadataToXML( self, metadata )
|
||||
#ET.dump(tree)
|
||||
tree.write(filename, encoding='utf-8')
|
||||
|
||||
def readFromExternalFile( self, filename ):
|
||||
|
||||
tree = ET.parse( filename )
|
||||
return self.convertXMLToMetadata( tree )
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,152 +0,0 @@
|
||||
"""
|
||||
A python class to encapsulate the ComicBookInfo data
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
import zipfile
|
||||
|
||||
from genericmetadata import GenericMetadata
|
||||
import utils
|
||||
import ctversion
|
||||
|
||||
class ComicBookInfo:
|
||||
|
||||
|
||||
def metadataFromString( self, string ):
|
||||
|
||||
cbi_container = json.loads( unicode(string, 'utf-8') )
|
||||
|
||||
metadata = GenericMetadata()
|
||||
|
||||
cbi = cbi_container[ 'ComicBookInfo/1.0' ]
|
||||
|
||||
#helper func
|
||||
# If item is not in CBI, return None
|
||||
def xlate( cbi_entry):
|
||||
if cbi_entry in cbi:
|
||||
return cbi[cbi_entry]
|
||||
else:
|
||||
return None
|
||||
|
||||
metadata.series = xlate( 'series' )
|
||||
metadata.title = xlate( 'title' )
|
||||
metadata.issue = xlate( 'issue' )
|
||||
metadata.publisher = xlate( 'publisher' )
|
||||
metadata.month = xlate( 'publicationMonth' )
|
||||
metadata.year = xlate( 'publicationYear' )
|
||||
metadata.issueCount = xlate( 'numberOfIssues' )
|
||||
metadata.comments = xlate( 'comments' )
|
||||
metadata.credits = xlate( 'credits' )
|
||||
metadata.genre = xlate( 'genre' )
|
||||
metadata.volume = xlate( 'volume' )
|
||||
metadata.volumeCount = xlate( 'numberOfVolumes' )
|
||||
metadata.language = xlate( 'language' )
|
||||
metadata.country = xlate( 'country' )
|
||||
metadata.criticalRating = xlate( 'rating' )
|
||||
metadata.tags = xlate( 'tags' )
|
||||
|
||||
# make sure credits and tags are at least empty lists and not None
|
||||
if metadata.credits is None:
|
||||
metadata.credits = []
|
||||
if metadata.tags is None:
|
||||
metadata.tags = []
|
||||
|
||||
#need to massage the language string to be ISO
|
||||
if metadata.language is not None:
|
||||
# reverse look-up
|
||||
pattern = metadata.language
|
||||
metadata.language = None
|
||||
for key in utils.getLanguageDict():
|
||||
if utils.getLanguageDict()[ key ] == pattern.encode('utf-8'):
|
||||
metadata.language = key
|
||||
break
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def stringFromMetadata( self, metadata ):
|
||||
|
||||
cbi_container = self.createJSONDictionary( metadata )
|
||||
return json.dumps( cbi_container )
|
||||
|
||||
#verify that the string actually contains CBI data in JSON format
|
||||
def validateString( self, string ):
|
||||
|
||||
try:
|
||||
cbi_container = json.loads( string )
|
||||
except:
|
||||
return False
|
||||
|
||||
return ( 'ComicBookInfo/1.0' in cbi_container )
|
||||
|
||||
|
||||
def createJSONDictionary( self, metadata ):
|
||||
|
||||
# Create the dictionary that we will convert to JSON text
|
||||
cbi = dict()
|
||||
cbi_container = {'appID' : 'ComicTagger/' + ctversion.version,
|
||||
'lastModified' : str(datetime.now()),
|
||||
'ComicBookInfo/1.0' : cbi }
|
||||
|
||||
#helper func
|
||||
def assign( cbi_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
cbi[cbi_entry] = md_entry
|
||||
|
||||
#helper func
|
||||
def toInt(s):
|
||||
i = None
|
||||
if type(s) in [ str, unicode, int ]:
|
||||
try:
|
||||
i = int(s)
|
||||
except ValueError:
|
||||
pass
|
||||
return i
|
||||
|
||||
assign( 'series', metadata.series )
|
||||
assign( 'title', metadata.title )
|
||||
assign( 'issue', metadata.issue )
|
||||
assign( 'publisher', metadata.publisher )
|
||||
assign( 'publicationMonth', toInt(metadata.month) )
|
||||
assign( 'publicationYear', toInt(metadata.year) )
|
||||
assign( 'numberOfIssues', toInt(metadata.issueCount) )
|
||||
assign( 'comments', metadata.comments )
|
||||
assign( 'genre', metadata.genre )
|
||||
assign( 'volume', toInt(metadata.volume) )
|
||||
assign( 'numberOfVolumes', toInt(metadata.volumeCount) )
|
||||
assign( 'language', utils.getLanguageFromISO(metadata.language) )
|
||||
assign( 'country', metadata.country )
|
||||
assign( 'rating', metadata.criticalRating )
|
||||
assign( 'credits', metadata.credits )
|
||||
assign( 'tags', metadata.tags )
|
||||
|
||||
return cbi_container
|
||||
|
||||
|
||||
def writeToExternalFile( self, filename, metadata ):
|
||||
|
||||
cbi_container = self.createJSONDictionary(metadata)
|
||||
|
||||
f = open(filename, 'w')
|
||||
f.write(json.dumps(cbi_container, indent=4))
|
||||
f.close
|
||||
|
||||
@@ -1,293 +0,0 @@
|
||||
"""
|
||||
A python class to encapsulate ComicRack's ComicInfo.xml data
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
import zipfile
|
||||
from pprint import pprint
|
||||
import xml.etree.ElementTree as ET
|
||||
from genericmetadata import GenericMetadata
|
||||
import utils
|
||||
|
||||
class ComicInfoXml:
|
||||
|
||||
writer_synonyms = ['writer', 'plotter', 'scripter']
|
||||
penciller_synonyms = [ 'artist', 'penciller', 'penciler', 'breakdowns' ]
|
||||
inker_synonyms = [ 'inker', 'artist', 'finishes' ]
|
||||
colorist_synonyms = [ 'colorist', 'colourist', 'colorer', 'colourer' ]
|
||||
letterer_synonyms = [ 'letterer']
|
||||
cover_synonyms = [ 'cover', 'covers', 'coverartist', 'cover artist' ]
|
||||
editor_synonyms = [ 'editor']
|
||||
|
||||
|
||||
def getParseableCredits( self ):
|
||||
parsable_credits = []
|
||||
parsable_credits.extend( self.writer_synonyms )
|
||||
parsable_credits.extend( self.penciller_synonyms )
|
||||
parsable_credits.extend( self.inker_synonyms )
|
||||
parsable_credits.extend( self.colorist_synonyms )
|
||||
parsable_credits.extend( self.letterer_synonyms )
|
||||
parsable_credits.extend( self.cover_synonyms )
|
||||
parsable_credits.extend( self.editor_synonyms )
|
||||
return parsable_credits
|
||||
|
||||
def metadataFromString( self, string ):
|
||||
|
||||
tree = ET.ElementTree(ET.fromstring( string ))
|
||||
return self.convertXMLToMetadata( tree )
|
||||
|
||||
def stringFromMetadata( self, metadata ):
|
||||
|
||||
header = '<?xml version="1.0"?>\n'
|
||||
|
||||
tree = self.convertMetadataToXML( self, metadata )
|
||||
return header + ET.tostring(tree.getroot())
|
||||
|
||||
def indent( self, elem, level=0 ):
|
||||
# for making the XML output readable
|
||||
i = "\n" + level*" "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
self.indent( elem, level+1 )
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def convertMetadataToXML( self, filename, metadata ):
|
||||
|
||||
#shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
# build a tree structure
|
||||
root = ET.Element("ComicInfo")
|
||||
root.attrib['xmlns:xsi']="http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib['xmlns:xsd']="http://www.w3.org/2001/XMLSchema"
|
||||
#helper func
|
||||
def assign( cix_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, cix_entry).text = u"{0}".format(md_entry)
|
||||
|
||||
assign( 'Title', md.title )
|
||||
assign( 'Series', md.series )
|
||||
assign( 'Number', md.issue )
|
||||
assign( 'Count', md.issueCount )
|
||||
assign( 'Volume', md.volume )
|
||||
assign( 'AlternateSeries', md.alternateSeries )
|
||||
assign( 'AlternateNumber', md.alternateNumber )
|
||||
assign( 'StoryArc', md.storyArc )
|
||||
assign( 'SeriesGroup', md.seriesGroup )
|
||||
assign( 'AlternateCount', md.alternateCount )
|
||||
assign( 'Summary', md.comments )
|
||||
assign( 'Notes', md.notes )
|
||||
assign( 'Year', md.year )
|
||||
assign( 'Month', md.month )
|
||||
assign( 'Day', md.day )
|
||||
|
||||
# need to specially process the credits, since they are structured differently than CIX
|
||||
credit_writer_list = list()
|
||||
credit_penciller_list = list()
|
||||
credit_inker_list = list()
|
||||
credit_colorist_list = list()
|
||||
credit_letterer_list = list()
|
||||
credit_cover_list = list()
|
||||
credit_editor_list = list()
|
||||
|
||||
# first, loop thru credits, and build a list for each role that CIX supports
|
||||
for credit in metadata.credits:
|
||||
|
||||
if credit['role'].lower() in set( self.writer_synonyms ):
|
||||
credit_writer_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.penciller_synonyms ):
|
||||
credit_penciller_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.inker_synonyms ):
|
||||
credit_inker_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.colorist_synonyms ):
|
||||
credit_colorist_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.letterer_synonyms ):
|
||||
credit_letterer_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.cover_synonyms ):
|
||||
credit_cover_list.append(credit['person'].replace(",",""))
|
||||
|
||||
if credit['role'].lower() in set( self.editor_synonyms ):
|
||||
credit_editor_list.append(credit['person'].replace(",",""))
|
||||
|
||||
# second, convert each list to string, and add to XML struct
|
||||
if len( credit_writer_list ) > 0:
|
||||
node = ET.SubElement(root, 'Writer')
|
||||
node.text = utils.listToString( credit_writer_list )
|
||||
|
||||
if len( credit_penciller_list ) > 0:
|
||||
node = ET.SubElement(root, 'Penciller')
|
||||
node.text = utils.listToString( credit_penciller_list )
|
||||
|
||||
if len( credit_inker_list ) > 0:
|
||||
node = ET.SubElement(root, 'Inker')
|
||||
node.text = utils.listToString( credit_inker_list )
|
||||
|
||||
if len( credit_colorist_list ) > 0:
|
||||
node = ET.SubElement(root, 'Colorist')
|
||||
node.text = utils.listToString( credit_colorist_list )
|
||||
|
||||
if len( credit_letterer_list ) > 0:
|
||||
node = ET.SubElement(root, 'Letterer')
|
||||
node.text = utils.listToString( credit_letterer_list )
|
||||
|
||||
if len( credit_cover_list ) > 0:
|
||||
node = ET.SubElement(root, 'CoverArtist')
|
||||
node.text = utils.listToString( credit_cover_list )
|
||||
|
||||
if len( credit_editor_list ) > 0:
|
||||
node = ET.SubElement(root, 'Editor')
|
||||
node.text = utils.listToString( credit_editor_list )
|
||||
|
||||
assign( 'Publisher', md.publisher )
|
||||
assign( 'Imprint', md.imprint )
|
||||
assign( 'Genre', md.genre )
|
||||
assign( 'Web', md.webLink )
|
||||
assign( 'PageCount', md.pageCount )
|
||||
assign( 'LanguageISO', md.language )
|
||||
assign( 'Format', md.format )
|
||||
assign( 'AgeRating', md.maturityRating )
|
||||
if md.blackAndWhite is not None and md.blackAndWhite:
|
||||
ET.SubElement(root, 'BlackAndWhite').text = "Yes"
|
||||
assign( 'Manga', md.manga )
|
||||
assign( 'Characters', md.characters )
|
||||
assign( 'Teams', md.teams )
|
||||
assign( 'Locations', md.locations )
|
||||
assign( 'ScanInformation', md.scanInfo )
|
||||
|
||||
# loop and add the page entries under pages node
|
||||
if len( md.pages ) > 0:
|
||||
pages_node = ET.SubElement(root, 'Pages')
|
||||
for page_dict in md.pages:
|
||||
page_node = ET.SubElement(pages_node, 'Page')
|
||||
page_node.attrib = page_dict
|
||||
|
||||
# self pretty-print
|
||||
self.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
|
||||
def convertXMLToMetadata( self, tree ):
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != 'ComicInfo':
|
||||
raise 1
|
||||
return None
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
|
||||
# Helper function
|
||||
def xlate( tag ):
|
||||
node = root.find( tag )
|
||||
if node is not None:
|
||||
return node.text
|
||||
else:
|
||||
return None
|
||||
|
||||
md.series = xlate( 'Series' )
|
||||
md.title = xlate( 'Title' )
|
||||
md.issue = xlate( 'Number' )
|
||||
md.issueCount = xlate( 'Count' )
|
||||
md.volume = xlate( 'Volume' )
|
||||
md.alternateSeries = xlate( 'AlternateSeries' )
|
||||
md.alternateNumber = xlate( 'AlternateNumber' )
|
||||
md.alternateCount = xlate( 'AlternateCount' )
|
||||
md.comments = xlate( 'Summary' )
|
||||
md.notes = xlate( 'Notes' )
|
||||
md.year = xlate( 'Year' )
|
||||
md.month = xlate( 'Month' )
|
||||
md.day = xlate( 'Day' )
|
||||
md.publisher = xlate( 'Publisher' )
|
||||
md.imprint = xlate( 'Imprint' )
|
||||
md.genre = xlate( 'Genre' )
|
||||
md.webLink = xlate( 'Web' )
|
||||
md.language = xlate( 'LanguageISO' )
|
||||
md.format = xlate( 'Format' )
|
||||
md.manga = xlate( 'Manga' )
|
||||
md.characters = xlate( 'Characters' )
|
||||
md.teams = xlate( 'Teams' )
|
||||
md.locations = xlate( 'Locations' )
|
||||
md.pageCount = xlate( 'PageCount' )
|
||||
md.scanInfo = xlate( 'ScanInformation' )
|
||||
md.storyArc = xlate( 'StoryArc' )
|
||||
md.seriesGroup = xlate( 'SeriesGroup' )
|
||||
md.maturityRating = xlate( 'AgeRating' )
|
||||
|
||||
tmp = xlate( 'BlackAndWhite' )
|
||||
md.blackAndWhite = False
|
||||
if tmp is not None and tmp.lower() in [ "yes", "true", "1" ]:
|
||||
md.blackAndWhite = True
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if ( n.tag == 'Writer' or
|
||||
n.tag == 'Penciller' or
|
||||
n.tag == 'Inker' or
|
||||
n.tag == 'Colorist' or
|
||||
n.tag == 'Letterer' or
|
||||
n.tag == 'Editor'
|
||||
):
|
||||
if n.text is not None:
|
||||
for name in n.text.split(','):
|
||||
metadata.addCredit( name.strip(), n.tag )
|
||||
|
||||
if n.tag == 'CoverArtist':
|
||||
if n.text is not None:
|
||||
for name in n.text.split(','):
|
||||
metadata.addCredit( name.strip(), "Cover" )
|
||||
|
||||
# parse page data now
|
||||
pages_node = root.find( "Pages" )
|
||||
if pages_node is not None:
|
||||
for page in pages_node:
|
||||
metadata.pages.append( page.attrib )
|
||||
#print page.attrib
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def writeToExternalFile( self, filename, metadata ):
|
||||
|
||||
tree = self.convertMetadataToXML( self, metadata )
|
||||
#ET.dump(tree)
|
||||
tree.write(filename, encoding='utf-8')
|
||||
|
||||
def readFromExternalFile( self, filename ):
|
||||
|
||||
tree = ET.parse( filename )
|
||||
return self.convertXMLToMetadata( tree )
|
||||
|
||||
@@ -1,459 +0,0 @@
|
||||
"""
|
||||
A python class to manage caching of data from Comic Vine
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from pprint import pprint
|
||||
|
||||
import sqlite3 as lite
|
||||
import sys
|
||||
import os
|
||||
import datetime
|
||||
|
||||
import ctversion
|
||||
from settings import ComicTaggerSettings
|
||||
import utils
|
||||
|
||||
class ComicVineCacher:
|
||||
|
||||
def __init__(self ):
|
||||
self.settings_folder = ComicTaggerSettings.getSettingsFolder()
|
||||
self.db_file = os.path.join( self.settings_folder, "cv_cache.db")
|
||||
self.version_file = os.path.join( self.settings_folder, "cache_version.txt")
|
||||
|
||||
#verify that cache is from same version as this one
|
||||
data = ""
|
||||
try:
|
||||
with open( self.version_file, 'rb' ) as f:
|
||||
data = f.read()
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
if data != ctversion.version:
|
||||
self.clearCache()
|
||||
|
||||
if not os.path.exists( self.db_file ):
|
||||
self.create_cache_db()
|
||||
|
||||
def clearCache( self ):
|
||||
try:
|
||||
os.unlink( self.db_file )
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
os.unlink( self.version_file )
|
||||
except:
|
||||
pass
|
||||
|
||||
def create_cache_db( self ):
|
||||
|
||||
#create the version file
|
||||
with open( self.version_file, 'w' ) as f:
|
||||
f.write( ctversion.version )
|
||||
|
||||
# this will wipe out any existing version
|
||||
open( self.db_file, 'w').close()
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
# create tables
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
#name,id,start_year,publisher,image,description,count_of_issues
|
||||
cur.execute("CREATE TABLE VolumeSearchCache(" +
|
||||
"search_term TEXT," +
|
||||
"id INT," +
|
||||
"name TEXT," +
|
||||
"start_year INT," +
|
||||
"publisher TEXT," +
|
||||
"count_of_issues INT," +
|
||||
"image_url TEXT," +
|
||||
"description TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')) ) "
|
||||
)
|
||||
|
||||
cur.execute("CREATE TABLE Volumes(" +
|
||||
"id INT," +
|
||||
"name TEXT," +
|
||||
"publisher TEXT," +
|
||||
"count_of_issues INT," +
|
||||
"start_year INT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (id) )"
|
||||
)
|
||||
|
||||
cur.execute("CREATE TABLE AltCovers(" +
|
||||
"issue_id INT," +
|
||||
"url_list TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (issue_id) )"
|
||||
)
|
||||
|
||||
cur.execute("CREATE TABLE Issues(" +
|
||||
"id INT," +
|
||||
"volume_id INT," +
|
||||
"name TEXT," +
|
||||
"issue_number TEXT," +
|
||||
"super_url TEXT," +
|
||||
"thumb_url TEXT," +
|
||||
"cover_date TEXT," +
|
||||
"site_detail_url TEXT," +
|
||||
"description TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (id ) )"
|
||||
)
|
||||
|
||||
def add_search_results( self, search_term, cv_search_results ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
with con:
|
||||
con.text_factory = unicode
|
||||
cur = con.cursor()
|
||||
|
||||
# remove all previous entries with this search term
|
||||
cur.execute("DELETE FROM VolumeSearchCache WHERE search_term = ?", [ search_term.lower() ])
|
||||
|
||||
# now add in new results
|
||||
for record in cv_search_results:
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
if record['publisher'] is None:
|
||||
pub_name = ""
|
||||
else:
|
||||
pub_name = record['publisher']['name']
|
||||
|
||||
if record['image'] is None:
|
||||
url = ""
|
||||
else:
|
||||
url = record['image']['super_url']
|
||||
|
||||
cur.execute("INSERT INTO VolumeSearchCache " +
|
||||
"(search_term, id, name, start_year, publisher, count_of_issues, image_url, description ) " +
|
||||
"VALUES( ?, ?, ?, ?, ?, ?, ?, ? )" ,
|
||||
( search_term.lower(),
|
||||
record['id'],
|
||||
record['name'],
|
||||
record['start_year'],
|
||||
pub_name,
|
||||
record['count_of_issues'],
|
||||
url,
|
||||
record['description'])
|
||||
)
|
||||
|
||||
def get_search_results( self, search_term ):
|
||||
|
||||
results = list()
|
||||
con = lite.connect( self.db_file )
|
||||
with con:
|
||||
con.text_factory = unicode
|
||||
cur = con.cursor()
|
||||
|
||||
|
||||
# purge stale search results
|
||||
a_day_ago = datetime.datetime.today()-datetime.timedelta(days=1)
|
||||
cur.execute( "DELETE FROM VolumeSearchCache WHERE timestamp < ?", [ str(a_day_ago) ] )
|
||||
|
||||
# fetch
|
||||
cur.execute("SELECT * FROM VolumeSearchCache WHERE search_term=?", [ search_term.lower() ] )
|
||||
rows = cur.fetchall()
|
||||
# now process the results
|
||||
for record in rows:
|
||||
|
||||
result = dict()
|
||||
result['id'] = record[1]
|
||||
result['name'] = record[2]
|
||||
result['start_year'] = record[3]
|
||||
result['publisher'] = dict()
|
||||
result['publisher']['name'] = record[4]
|
||||
result['count_of_issues'] = record[5]
|
||||
result['image'] = dict()
|
||||
result['image']['super_url'] = record[6]
|
||||
result['description'] = record[7]
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def add_alt_covers( self, issue_id, url_list ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
with con:
|
||||
con.text_factory = unicode
|
||||
cur = con.cursor()
|
||||
|
||||
# remove all previous entries with this search term
|
||||
cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [ issue_id ])
|
||||
|
||||
url_list_str = utils.listToString(url_list)
|
||||
# now add in new record
|
||||
cur.execute("INSERT INTO AltCovers " +
|
||||
"(issue_id, url_list ) " +
|
||||
"VALUES( ?, ? )" ,
|
||||
( issue_id,
|
||||
url_list_str)
|
||||
)
|
||||
|
||||
|
||||
def get_alt_covers( self, issue_id ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = unicode
|
||||
|
||||
# purge stale issue info - probably issue data won't change much....
|
||||
a_month_ago = datetime.datetime.today()-datetime.timedelta(days=30)
|
||||
cur.execute( "DELETE FROM AltCovers WHERE timestamp < ?", [ str(a_month_ago) ] )
|
||||
|
||||
cur.execute("SELECT url_list FROM AltCovers WHERE issue_id=?", [ issue_id ])
|
||||
row = cur.fetchone()
|
||||
if row is None :
|
||||
return None
|
||||
else:
|
||||
url_list_str = row[0]
|
||||
if len(url_list_str) == 0:
|
||||
return []
|
||||
raw_list = url_list_str.split(",")
|
||||
url_list = []
|
||||
for item in raw_list:
|
||||
url_list.append( str(item).strip())
|
||||
return url_list
|
||||
|
||||
def add_volume_info( self, cv_volume_record ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
if cv_volume_record['publisher'] is None:
|
||||
pub_name = ""
|
||||
else:
|
||||
pub_name = cv_volume_record['publisher']['name']
|
||||
|
||||
data = {
|
||||
"name": cv_volume_record['name'],
|
||||
"publisher": pub_name,
|
||||
"count_of_issues": cv_volume_record['count_of_issues'],
|
||||
"start_year": cv_volume_record['start_year'],
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert( cur, "volumes", "id", cv_volume_record['id'], data)
|
||||
|
||||
|
||||
def add_volume_issues_info( self, volume_id, cv_volume_issues ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
# add in issues
|
||||
|
||||
for issue in cv_volume_issues:
|
||||
|
||||
data = {
|
||||
"volume_id": volume_id,
|
||||
"name": issue['name'],
|
||||
"issue_number": issue['issue_number'],
|
||||
"site_detail_url": issue['site_detail_url'],
|
||||
"cover_date": issue['cover_date'],
|
||||
"super_url": issue['image']['super_url'],
|
||||
"thumb_url": issue['image']['thumb_url'],
|
||||
"description": issue['description'],
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert( cur, "issues" , "id", issue['id'], data)
|
||||
|
||||
|
||||
def get_volume_info( self, volume_id ):
|
||||
|
||||
result = None
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = unicode
|
||||
|
||||
# purge stale volume info
|
||||
a_week_ago = datetime.datetime.today()-datetime.timedelta(days=7)
|
||||
cur.execute( "DELETE FROM Volumes WHERE timestamp < ?", [ str(a_week_ago) ] )
|
||||
|
||||
# fetch
|
||||
cur.execute("SELECT id,name,publisher,count_of_issues,start_year FROM Volumes WHERE id = ?", [ volume_id ] )
|
||||
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None :
|
||||
return result
|
||||
|
||||
result = dict()
|
||||
|
||||
#since ID is primary key, there is only one row
|
||||
result['id'] = row[0]
|
||||
result['name'] = row[1]
|
||||
result['publisher'] = dict()
|
||||
result['publisher']['name'] = row[2]
|
||||
result['count_of_issues'] = row[3]
|
||||
result['start_year'] = row[4]
|
||||
result['issues'] = list()
|
||||
|
||||
return result
|
||||
|
||||
def get_volume_issues_info( self, volume_id ):
|
||||
|
||||
result = None
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = unicode
|
||||
|
||||
# purge stale issue info - probably issue data won't change much....
|
||||
a_week_ago = datetime.datetime.today()-datetime.timedelta(days=7)
|
||||
cur.execute( "DELETE FROM Issues WHERE timestamp < ?", [ str(a_week_ago) ] )
|
||||
|
||||
# fetch
|
||||
results = list()
|
||||
|
||||
cur.execute("SELECT id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description FROM Issues WHERE volume_id = ?", [ volume_id ] )
|
||||
rows = cur.fetchall()
|
||||
|
||||
# now process the results
|
||||
for row in rows:
|
||||
record = dict()
|
||||
|
||||
record['id'] = row[0]
|
||||
record['name'] = row[1]
|
||||
record['issue_number'] = row[2]
|
||||
record['site_detail_url'] = row[3]
|
||||
record['cover_date'] = row[4]
|
||||
record['image'] = dict()
|
||||
record['image']['super_url'] = row[5]
|
||||
record['image']['thumb_url'] = row[6]
|
||||
record['description'] = row[7]
|
||||
|
||||
results.append(record)
|
||||
|
||||
if len(results) == 0:
|
||||
return None
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def add_issue_select_details( self, issue_id, image_url, thumb_image_url, cover_date, site_detail_url ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = unicode
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
data = {
|
||||
"super_url": image_url,
|
||||
"thumb_url": thumb_image_url,
|
||||
"cover_date": cover_date,
|
||||
"site_detail_url": site_detail_url,
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert( cur, "issues" , "id", issue_id, data)
|
||||
|
||||
|
||||
|
||||
def get_issue_select_details( self, issue_id ):
|
||||
|
||||
con = lite.connect( self.db_file )
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = unicode
|
||||
|
||||
cur.execute("SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=?", [ issue_id ])
|
||||
row = cur.fetchone()
|
||||
|
||||
details = dict()
|
||||
if row is None or row[0] is None :
|
||||
details['image_url'] = None
|
||||
details['thumb_image_url'] = None
|
||||
details['cover_date'] = None
|
||||
details['site_detail_url'] = None
|
||||
|
||||
else:
|
||||
details['image_url'] = row[0]
|
||||
details['thumb_image_url'] = row[1]
|
||||
details['cover_date'] = row[2]
|
||||
details['site_detail_url'] = row[3]
|
||||
|
||||
return details
|
||||
|
||||
|
||||
def upsert( self, cur, tablename, pkname, pkval, data):
|
||||
"""
|
||||
This does an insert if the given PK doesn't exist, and an update it if does
|
||||
"""
|
||||
|
||||
# TODO - look into checking if UPDATE is needed
|
||||
# TODO - should the cursor be created here, and not up the stack?
|
||||
|
||||
ins_count = len(data) + 1
|
||||
|
||||
keys = ""
|
||||
vals = list()
|
||||
ins_slots = ""
|
||||
set_slots = ""
|
||||
|
||||
for key in data:
|
||||
|
||||
if keys != "":
|
||||
keys += ", "
|
||||
if ins_slots != "":
|
||||
ins_slots += ", "
|
||||
if set_slots != "":
|
||||
set_slots += ", "
|
||||
|
||||
keys += key
|
||||
vals.append( data[key] )
|
||||
ins_slots += "?"
|
||||
set_slots += key + " = ?"
|
||||
|
||||
keys += ", " + pkname
|
||||
vals.append( pkval )
|
||||
ins_slots += ", ?"
|
||||
condition = pkname + " = ?"
|
||||
|
||||
sql_ins = ( "INSERT OR IGNORE INTO " + tablename +
|
||||
" ( " + keys + " ) " +
|
||||
" VALUES ( " + ins_slots + " )" )
|
||||
cur.execute( sql_ins , vals )
|
||||
|
||||
sql_upd = ( "UPDATE " + tablename +
|
||||
" SET " + set_slots + " WHERE " + condition )
|
||||
cur.execute( sql_upd , vals )
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,741 +0,0 @@
|
||||
"""
|
||||
A python class to manage communication with Comic Vine's REST API
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
from pprint import pprint
|
||||
import urllib2, urllib
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
import datetime
|
||||
import ctversion
|
||||
import sys
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
try:
|
||||
from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest
|
||||
from PyQt4.QtCore import QUrl, pyqtSignal, QObject, QByteArray
|
||||
except ImportError:
|
||||
# No Qt, so define a few dummy QObjects to help us compile
|
||||
class QObject():
|
||||
def __init__(self,*args):
|
||||
pass
|
||||
class pyqtSignal():
|
||||
def __init__(self,*args):
|
||||
pass
|
||||
def emit(a,b,c):
|
||||
pass
|
||||
|
||||
import utils
|
||||
from settings import ComicTaggerSettings
|
||||
from comicvinecacher import ComicVineCacher
|
||||
from genericmetadata import GenericMetadata
|
||||
from issuestring import IssueString
|
||||
|
||||
class CVTypeID:
|
||||
Volume = "4050"
|
||||
Issue = "4000"
|
||||
|
||||
class ComicVineTalkerException(Exception):
|
||||
pass
|
||||
|
||||
class ComicVineTalker(QObject):
|
||||
|
||||
logo_url = "http://static.comicvine.com/bundles/comicvinesite/images/logo.png"
|
||||
|
||||
def __init__(self, api_key=""):
|
||||
QObject.__init__(self)
|
||||
|
||||
self.api_base_url = "http://www.comicvine.com/api"
|
||||
|
||||
# key that is registered to comictagger
|
||||
self.api_key = '27431e6787042105bd3e47e169a624521f89f3a4'
|
||||
|
||||
self.log_func = None
|
||||
|
||||
def setLogFunc( self , log_func ):
|
||||
self.log_func = log_func
|
||||
|
||||
def writeLog( self , text ):
|
||||
if self.log_func is None:
|
||||
#sys.stdout.write(text.encode( errors='replace') )
|
||||
#sys.stdout.flush()
|
||||
print >> sys.stderr, text
|
||||
else:
|
||||
self.log_func( text )
|
||||
|
||||
def parseDateStr( self, date_str):
|
||||
day = None
|
||||
month = None
|
||||
year = None
|
||||
if date_str is not None:
|
||||
parts = date_str.split('-')
|
||||
year = parts[0]
|
||||
if len(parts) > 1:
|
||||
month = parts[1]
|
||||
if len(parts) > 2:
|
||||
day = parts[2]
|
||||
return day, month, year
|
||||
|
||||
def testKey( self ):
|
||||
|
||||
test_url = self.api_base_url + "/issue/1/?api_key=" + self.api_key + "&format=json&field_list=name"
|
||||
resp = urllib2.urlopen( test_url )
|
||||
content = resp.read()
|
||||
|
||||
cv_response = json.loads( content )
|
||||
|
||||
# Bogus request, but if the key is wrong, you get error 100: "Invalid API Key"
|
||||
return cv_response[ 'status_code' ] != 100
|
||||
|
||||
def getUrlContent( self, url ):
|
||||
# connect to server:
|
||||
# if there is a 500 error, try a few more times before giving up
|
||||
# any other error, just bail
|
||||
#print "ATB---", url
|
||||
for tries in range(3):
|
||||
try:
|
||||
resp = urllib2.urlopen( url )
|
||||
return resp.read()
|
||||
except urllib2.HTTPError as e:
|
||||
if e.getcode() == 500:
|
||||
self.writeLog( "Try #{0}: ".format(tries+1) )
|
||||
time.sleep(1)
|
||||
self.writeLog( str(e) + "\n" )
|
||||
|
||||
if e.getcode() != 500:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
self.writeLog( str(e) + "\n" )
|
||||
raise ComicVineTalkerException("Network Error!")
|
||||
|
||||
raise ComicVineTalkerException("Error on Comic Vine server")
|
||||
|
||||
def searchForSeries( self, series_name , callback=None, refresh_cache=False ):
|
||||
|
||||
# remove cruft from the search string
|
||||
series_name = utils.removearticles( series_name ).lower().strip()
|
||||
|
||||
# before we search online, look in our cache, since we might have
|
||||
# done this same search recently
|
||||
cvc = ComicVineCacher( )
|
||||
if not refresh_cache:
|
||||
cached_search_results = cvc.get_search_results( series_name )
|
||||
|
||||
if len (cached_search_results) > 0:
|
||||
return cached_search_results
|
||||
|
||||
original_series_name = series_name
|
||||
|
||||
# We need to make the series name into an "AND"ed query list
|
||||
query_word_list = series_name.split()
|
||||
and_list = ['AND'] * (len(query_word_list)-1)
|
||||
and_list.append('')
|
||||
# zipper up the two lists
|
||||
query_list = zip(query_word_list, and_list)
|
||||
# flatten the list
|
||||
query_list = [ item for sublist in query_list for item in sublist]
|
||||
# convert back to a string
|
||||
query_string = " ".join( query_list ).strip()
|
||||
#print "Query string = ", query_string
|
||||
|
||||
query_string = urllib.quote_plus(query_string.encode("utf-8"))
|
||||
|
||||
search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + query_string + "&field_list=name,id,start_year,publisher,image,description,count_of_issues"
|
||||
content = self.getUrlContent(search_url + "&page=1")
|
||||
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] ))
|
||||
return None
|
||||
|
||||
search_results = list()
|
||||
|
||||
# see http://api.comicvine.com/documentation/#handling_responses
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
|
||||
if callback is None:
|
||||
self.writeLog( "Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results']))
|
||||
search_results.extend( cv_response['results'])
|
||||
page = 1
|
||||
|
||||
if callback is not None:
|
||||
callback( current_result_count, total_result_count )
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while ( current_result_count < total_result_count ):
|
||||
if callback is None:
|
||||
self.writeLog("getting another page of results {0} of {1}...\n".format( current_result_count, total_result_count))
|
||||
page += 1
|
||||
|
||||
content = self.getUrlContent(search_url + "&page="+str(page))
|
||||
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] ))
|
||||
return None
|
||||
search_results.extend( cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
if callback is not None:
|
||||
callback( current_result_count, total_result_count )
|
||||
|
||||
|
||||
#for record in search_results:
|
||||
# #print( u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year'] ) )
|
||||
# #print record
|
||||
# #record['count_of_issues'] = record['count_of_isssues']
|
||||
#print u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year'] )
|
||||
|
||||
# cache these search results
|
||||
cvc.add_search_results( original_series_name, search_results )
|
||||
|
||||
return search_results
|
||||
|
||||
def fetchVolumeData( self, series_id ):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher( )
|
||||
cached_volume_result = cvc.get_volume_info( series_id )
|
||||
|
||||
if cached_volume_result is not None:
|
||||
return cached_volume_result
|
||||
|
||||
|
||||
volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + str(series_id) + "/?api_key=" + self.api_key + "&field_list=name,id,start_year,publisher,count_of_issues&format=json"
|
||||
|
||||
content = self.getUrlContent(volume_url)
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return None
|
||||
|
||||
volume_results = cv_response['results']
|
||||
|
||||
cvc.add_volume_info( volume_results )
|
||||
|
||||
return volume_results
|
||||
|
||||
def fetchIssuesByVolume( self, series_id ):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher( )
|
||||
cached_volume_issues_result = cvc.get_volume_issues_info( series_id )
|
||||
|
||||
if cached_volume_issues_result is not None:
|
||||
return cached_volume_issues_result
|
||||
|
||||
#---------------------------------
|
||||
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + "&filter=volume:" + str(series_id) + "&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
|
||||
content = self.getUrlContent(issues_url)
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return None
|
||||
#------------------------------------
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
#print "ATB total_result_count", total_result_count
|
||||
|
||||
#print "ATB Found {0} of {1} results".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])
|
||||
volume_issues_result = cv_response['results']
|
||||
page = 1
|
||||
offset = 0
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while ( current_result_count < total_result_count ):
|
||||
#print "ATB getting another page of issue results {0} of {1}...".format( current_result_count, total_result_count)
|
||||
page += 1
|
||||
offset += cv_response['number_of_page_results']
|
||||
|
||||
#print issues_url+ "&offset="+str(offset)
|
||||
content = self.getUrlContent(issues_url + "&offset="+str(offset))
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] ))
|
||||
return None
|
||||
volume_issues_result.extend( cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
self.repairUrls( volume_issues_result )
|
||||
|
||||
cvc.add_volume_issues_info( series_id, volume_issues_result )
|
||||
|
||||
return volume_issues_result
|
||||
|
||||
|
||||
def fetchIssuesByVolumeIssueNumAndYear( self, volume_id_list, issue_number, year ):
|
||||
volume_filter = "volume:"
|
||||
for vid in volume_id_list:
|
||||
volume_filter += str(vid) + "|"
|
||||
|
||||
year_filter = ""
|
||||
if year is not None and str(year).isdigit():
|
||||
year_filter = ",cover_date:{0}-1-1|{1}-1-1".format(year, int(year)+1)
|
||||
|
||||
issue_number = urllib.quote_plus(unicode(issue_number).encode("utf-8"))
|
||||
|
||||
filter = "&filter=" + volume_filter + year_filter + ",issue_number:" + issue_number
|
||||
|
||||
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + filter + "&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
|
||||
|
||||
content = self.getUrlContent(issues_url)
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return None
|
||||
#------------------------------------
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
#print "ATB total_result_count", total_result_count
|
||||
|
||||
#print "ATB Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])
|
||||
filtered_issues_result = cv_response['results']
|
||||
page = 1
|
||||
offset = 0
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while ( current_result_count < total_result_count ):
|
||||
#print "ATB getting another page of issue results {0} of {1}...\n".format( current_result_count, total_result_count)
|
||||
page += 1
|
||||
offset += cv_response['number_of_page_results']
|
||||
|
||||
#print issues_url+ "&offset="+str(offset)
|
||||
content = self.getUrlContent(issues_url + "&offset="+str(offset))
|
||||
cv_response = json.loads(content)
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
self.writeLog( "Comic Vine query failed with error: [{0}]. \n".format( cv_response[ 'error' ] ))
|
||||
return None
|
||||
filtered_issues_result.extend( cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
self.repairUrls( filtered_issues_result )
|
||||
|
||||
return filtered_issues_result
|
||||
|
||||
|
||||
|
||||
def fetchIssueData( self, series_id, issue_number, settings ):
|
||||
|
||||
volume_results = self.fetchVolumeData( series_id )
|
||||
issues_list_results = self.fetchIssuesByVolume( series_id )
|
||||
|
||||
found = False
|
||||
for record in issues_list_results:
|
||||
if IssueString(issue_number).asString() is None:
|
||||
issue_number = 1
|
||||
if IssueString(record['issue_number']).asString().lower() == IssueString(issue_number).asString().lower():
|
||||
found = True
|
||||
break
|
||||
|
||||
if (found):
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + str(record['id']) + "/?api_key=" + self.api_key + "&format=json"
|
||||
|
||||
content = self.getUrlContent(issue_url)
|
||||
cv_response = json.loads(content)
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return None
|
||||
issue_results = cv_response['results']
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
# now, map the comicvine data to generic metadata
|
||||
return self.mapCVDataToMetadata( volume_results, issue_results, settings )
|
||||
|
||||
def fetchIssueDataByIssueID( self, issue_id, settings ):
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + str(issue_id) + "/?api_key=" + self.api_key + "&format=json"
|
||||
content = self.getUrlContent(issue_url)
|
||||
cv_response = json.loads(content)
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return None
|
||||
|
||||
issue_results = cv_response['results']
|
||||
|
||||
volume_results = self.fetchVolumeData( issue_results['volume']['id'] )
|
||||
|
||||
# now, map the comicvine data to generic metadata
|
||||
md = self.mapCVDataToMetadata( volume_results, issue_results, settings )
|
||||
md.isEmpty = False
|
||||
return md
|
||||
|
||||
def mapCVDataToMetadata(self, volume_results, issue_results, settings ):
|
||||
|
||||
# now, map the comicvine data to generic metadata
|
||||
metadata = GenericMetadata()
|
||||
|
||||
metadata.series = issue_results['volume']['name']
|
||||
|
||||
num_s = IssueString(issue_results['issue_number']).asString()
|
||||
metadata.issue = num_s
|
||||
metadata.title = issue_results['name']
|
||||
|
||||
metadata.publisher = volume_results['publisher']['name']
|
||||
metadata.day, metadata.month, metadata.year = self.parseDateStr( issue_results['cover_date'] )
|
||||
|
||||
#metadata.issueCount = volume_results['count_of_issues']
|
||||
metadata.comments = self.cleanup_html(issue_results['description'], settings.remove_html_tables)
|
||||
if settings.use_series_start_as_volume:
|
||||
metadata.volume = volume_results['start_year']
|
||||
|
||||
metadata.notes = "Tagged with ComicTagger {0} using info from Comic Vine on {1}. [Issue ID {2}]".format(
|
||||
ctversion.version,
|
||||
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
issue_results['id'])
|
||||
#metadata.notes += issue_results['site_detail_url']
|
||||
|
||||
metadata.webLink = issue_results['site_detail_url']
|
||||
|
||||
person_credits = issue_results['person_credits']
|
||||
for person in person_credits:
|
||||
if person.has_key('role'):
|
||||
roles = person['role'].split(',')
|
||||
for role in roles:
|
||||
# can we determine 'primary' from CV??
|
||||
metadata.addCredit( person['name'], role.title().strip(), False )
|
||||
|
||||
character_credits = issue_results['character_credits']
|
||||
character_list = list()
|
||||
for character in character_credits:
|
||||
character_list.append( character['name'] )
|
||||
metadata.characters = utils.listToString( character_list )
|
||||
|
||||
team_credits = issue_results['team_credits']
|
||||
team_list = list()
|
||||
for team in team_credits:
|
||||
team_list.append( team['name'] )
|
||||
metadata.teams = utils.listToString( team_list )
|
||||
|
||||
location_credits = issue_results['location_credits']
|
||||
location_list = list()
|
||||
for location in location_credits:
|
||||
location_list.append( location['name'] )
|
||||
metadata.locations = utils.listToString( location_list )
|
||||
|
||||
story_arc_credits = issue_results['story_arc_credits']
|
||||
arc_list = []
|
||||
for arc in story_arc_credits:
|
||||
arc_list.append(arc['name'])
|
||||
if len(arc_list) > 0:
|
||||
metadata.storyArc = utils.listToString(arc_list)
|
||||
|
||||
return metadata
|
||||
def cleanup_html( self, string, remove_html_tables):
|
||||
"""
|
||||
converter = html2text.HTML2Text()
|
||||
#converter.emphasis_mark = '*'
|
||||
#converter.ignore_links = True
|
||||
converter.body_width = 0
|
||||
|
||||
print html2text.html2text(string)
|
||||
return string
|
||||
#return converter.handle(string)
|
||||
"""
|
||||
|
||||
|
||||
if string is None:
|
||||
return ""
|
||||
# find any tables
|
||||
soup = BeautifulSoup(string)
|
||||
tables = soup.findAll('table')
|
||||
|
||||
# remove all newlines first
|
||||
string = string.replace("\n", "")
|
||||
|
||||
#put in our own
|
||||
string = string.replace("<br>", "\n")
|
||||
string = string.replace("</p>", "\n\n")
|
||||
string = string.replace("<h4>", "*")
|
||||
string = string.replace("</h4>", "*\n")
|
||||
|
||||
#remove the tables
|
||||
p = re.compile(r'<table[^<]*?>.*?<\/table>')
|
||||
if remove_html_tables:
|
||||
string = p.sub('',string)
|
||||
string = string.replace("*List of covers and their creators:*","")
|
||||
else:
|
||||
string = p.sub('{}',string)
|
||||
|
||||
# now strip all other tags
|
||||
p = re.compile(r'<[^<]*?>')
|
||||
newstring = p.sub('',string)
|
||||
|
||||
newstring = newstring.replace(' ',' ')
|
||||
newstring = newstring.replace('&','&')
|
||||
|
||||
newstring = newstring.strip()
|
||||
|
||||
if not remove_html_tables:
|
||||
# now rebuild the tables into text from BSoup
|
||||
try:
|
||||
table_strings = []
|
||||
for table in tables:
|
||||
rows = []
|
||||
hdrs = []
|
||||
col_widths = []
|
||||
for hdr in table.findAll('th'):
|
||||
item = hdr.string.strip()
|
||||
hdrs.append(item)
|
||||
col_widths.append(len(item))
|
||||
rows.append(hdrs)
|
||||
|
||||
for row in table.findAll('tr'):
|
||||
cols = []
|
||||
col = row.findAll('td')
|
||||
i = 0
|
||||
for c in col:
|
||||
item = c.string.strip()
|
||||
cols.append(item)
|
||||
if len(item) > col_widths[i]:
|
||||
col_widths[i] = len(item)
|
||||
i += 1
|
||||
if len(cols) != 0:
|
||||
rows.append(cols)
|
||||
# now we have the data, make it into text
|
||||
fmtstr =""
|
||||
for w in col_widths:
|
||||
fmtstr += " {{:{}}}|".format(w+1)
|
||||
width = sum(col_widths) + len(col_widths)*2
|
||||
print "width=" , width
|
||||
table_text = ""
|
||||
counter = 0
|
||||
for row in rows:
|
||||
table_text += fmtstr.format(*row) + "\n"
|
||||
if counter == 0 and len(hdrs)!= 0:
|
||||
table_text += "-" * width + "\n"
|
||||
counter += 1
|
||||
|
||||
table_strings.append(table_text)
|
||||
|
||||
newstring = newstring.format(*table_strings)
|
||||
except:
|
||||
# we caught an error rebuilding the table.
|
||||
# just bail and remove the formatting
|
||||
print "table parse error"
|
||||
newstring.replace("{}", "")
|
||||
|
||||
|
||||
return newstring
|
||||
|
||||
def fetchIssueDate( self, issue_id ):
|
||||
details = self.fetchIssueSelectDetails( issue_id )
|
||||
day, month, year = self.parseDateStr( details['cover_date'] )
|
||||
return month, year
|
||||
|
||||
def fetchIssueCoverURLs( self, issue_id ):
|
||||
details = self.fetchIssueSelectDetails( issue_id )
|
||||
return details['image_url'], details['thumb_image_url']
|
||||
|
||||
def fetchIssuePageURL( self, issue_id ):
|
||||
details = self.fetchIssueSelectDetails( issue_id )
|
||||
return details['site_detail_url']
|
||||
|
||||
def fetchIssueSelectDetails( self, issue_id ):
|
||||
|
||||
#cached_image_url,cached_thumb_url,cached_month,cached_year = self.fetchCachedIssueSelectDetails( issue_id )
|
||||
cached_details = self.fetchCachedIssueSelectDetails( issue_id )
|
||||
if cached_details['image_url'] is not None:
|
||||
return cached_details
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + str(issue_id) + "/?api_key=" + self.api_key + "&format=json&field_list=image,cover_date,site_detail_url"
|
||||
|
||||
content = self.getUrlContent(issue_url)
|
||||
|
||||
details = dict()
|
||||
details['image_url'] = None
|
||||
details['thumb_image_url'] = None
|
||||
details['cover_date'] = None
|
||||
details['site_detail_url'] = None
|
||||
|
||||
cv_response = json.loads(content)
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return details
|
||||
|
||||
details['image_url'] = cv_response['results']['image']['super_url']
|
||||
details['thumb_image_url'] = cv_response['results']['image']['thumb_url']
|
||||
details['cover_date'] = cv_response['results']['cover_date']
|
||||
details['site_detail_url'] = cv_response['results']['site_detail_url']
|
||||
|
||||
if details['image_url'] is not None:
|
||||
self.cacheIssueSelectDetails( issue_id,
|
||||
details['image_url'],
|
||||
details['thumb_image_url'],
|
||||
details['cover_date'],
|
||||
details['site_detail_url'] )
|
||||
#print details['site_detail_url']
|
||||
return details
|
||||
|
||||
def fetchCachedIssueSelectDetails( self, issue_id ):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher( )
|
||||
return cvc.get_issue_select_details( issue_id )
|
||||
|
||||
def cacheIssueSelectDetails( self, issue_id, image_url, thumb_url, cover_date, page_url ):
|
||||
cvc = ComicVineCacher( )
|
||||
cvc.add_issue_select_details( issue_id, image_url, thumb_url, cover_date, page_url )
|
||||
|
||||
|
||||
def fetchAlternateCoverURLs(self, issue_id, issue_page_url):
|
||||
url_list = self.fetchCachedAlternateCoverURLs( issue_id )
|
||||
if url_list is not None:
|
||||
return url_list
|
||||
|
||||
# scrape the CV issue page URL to get the alternate cover URLs
|
||||
resp = urllib2.urlopen( issue_page_url )
|
||||
content = resp.read()
|
||||
alt_cover_url_list = self.parseOutAltCoverUrls( content)
|
||||
|
||||
# cache this alt cover URL list
|
||||
self.cacheAlternateCoverURLs( issue_id, alt_cover_url_list )
|
||||
|
||||
return alt_cover_url_list
|
||||
|
||||
def parseOutAltCoverUrls( self, page_html ):
|
||||
soup = BeautifulSoup( page_html )
|
||||
|
||||
alt_cover_url_list = []
|
||||
|
||||
# Using knowledge of the layout of the ComicVine issue page here:
|
||||
# look for the divs that are in the classes 'content-pod' and 'alt-cover'
|
||||
div_list = soup.find_all( 'div')
|
||||
covers_found = 0
|
||||
for d in div_list:
|
||||
if d.has_key('class'):
|
||||
c = d['class']
|
||||
if 'imgboxart' in c and 'issue-cover' in c:
|
||||
covers_found += 1
|
||||
if covers_found != 1:
|
||||
alt_cover_url_list.append( d.img['src'] )
|
||||
|
||||
return alt_cover_url_list
|
||||
|
||||
def fetchCachedAlternateCoverURLs( self, issue_id ):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher( )
|
||||
url_list = cvc.get_alt_covers( issue_id )
|
||||
if url_list is not None:
|
||||
return url_list
|
||||
else:
|
||||
return None
|
||||
|
||||
def cacheAlternateCoverURLs( self, issue_id, url_list ):
|
||||
cvc = ComicVineCacher( )
|
||||
cvc.add_alt_covers( issue_id, url_list )
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
urlFetchComplete = pyqtSignal( str , str, int)
|
||||
|
||||
def asyncFetchIssueCoverURLs( self, issue_id ):
|
||||
|
||||
self.issue_id = issue_id
|
||||
details = self.fetchCachedIssueSelectDetails( issue_id )
|
||||
if details['image_url'] is not None:
|
||||
self.urlFetchComplete.emit( details['image_url'],details['thumb_image_url'], self.issue_id )
|
||||
return
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + str(issue_id) + "/?api_key=" + self.api_key + "&format=json&field_list=image,cover_date,site_detail_url"
|
||||
self.nam = QNetworkAccessManager()
|
||||
self.nam.finished.connect( self.asyncFetchIssueCoverURLComplete )
|
||||
self.nam.get(QNetworkRequest(QUrl(issue_url)))
|
||||
|
||||
def asyncFetchIssueCoverURLComplete( self, reply ):
|
||||
|
||||
# read in the response
|
||||
data = reply.readAll()
|
||||
|
||||
try:
|
||||
cv_response = json.loads(str(data))
|
||||
except:
|
||||
print >> sys.stderr, "Comic Vine query failed to get JSON data"
|
||||
print >> sys.stderr, str(data)
|
||||
return
|
||||
|
||||
if cv_response[ 'status_code' ] != 1:
|
||||
print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response[ 'error' ] )
|
||||
return
|
||||
|
||||
image_url = cv_response['results']['image']['super_url']
|
||||
thumb_url = cv_response['results']['image']['thumb_url']
|
||||
cover_date = cv_response['results']['cover_date']
|
||||
page_url = cv_response['results']['site_detail_url']
|
||||
|
||||
self.cacheIssueSelectDetails( self.issue_id, image_url, thumb_url, cover_date, page_url )
|
||||
|
||||
self.urlFetchComplete.emit( image_url, thumb_url, self.issue_id )
|
||||
|
||||
altUrlListFetchComplete = pyqtSignal( list, int)
|
||||
|
||||
def asyncFetchAlternateCoverURLs( self, issue_id, issue_page_url ):
|
||||
# This async version requires the issue page url to be provided!
|
||||
self.issue_id = issue_id
|
||||
url_list = self.fetchCachedAlternateCoverURLs( issue_id )
|
||||
if url_list is not None:
|
||||
self.altUrlListFetchComplete.emit( url_list, int(self.issue_id) )
|
||||
return
|
||||
|
||||
self.nam = QNetworkAccessManager()
|
||||
self.nam.finished.connect( self.asyncFetchAlternateCoverURLsComplete )
|
||||
self.nam.get(QNetworkRequest(QUrl(str(issue_page_url))))
|
||||
|
||||
|
||||
def asyncFetchAlternateCoverURLsComplete( self, reply ):
|
||||
# read in the response
|
||||
html = str(reply.readAll())
|
||||
alt_cover_url_list = self.parseOutAltCoverUrls( html )
|
||||
|
||||
# cache this alt cover URL list
|
||||
self.cacheAlternateCoverURLs( self.issue_id, alt_cover_url_list )
|
||||
|
||||
self.altUrlListFetchComplete.emit( alt_cover_url_list, int(self.issue_id) )
|
||||
|
||||
def repairUrls(self, issue_list):
|
||||
#make sure there are URLs for the image fields
|
||||
for issue in issue_list:
|
||||
if issue['image'] is None:
|
||||
issue['image'] = dict()
|
||||
issue['image']['super_url'] = ComicVineTalker.logo_url
|
||||
issue['image']['thumb_url'] = ComicVineTalker.logo_url
|
||||
|
||||
@@ -1,312 +1,308 @@
|
||||
"""
|
||||
A PyQt4 widget display cover images from either local archive, or from ComicVine
|
||||
"""A PyQt6 widget to display cover images
|
||||
|
||||
(TODO: This should be re-factored using subclasses!)
|
||||
Display cover images from either a local archive, or from comic source metadata.
|
||||
TODO: This should be re-factored using subclasses!
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
import pathlib
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
from PyQt6 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comictaggerlib.imagefetcher import ImageFetcher
|
||||
from comictaggerlib.imagepopup import ImagePopup
|
||||
from comictaggerlib.pageloader import PageLoader
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import get_qimage_from_data
|
||||
|
||||
import os
|
||||
|
||||
from PyQt4.QtCore import *
|
||||
from PyQt4.QtGui import *
|
||||
from PyQt4 import uic
|
||||
|
||||
from settings import ComicTaggerSettings
|
||||
from genericmetadata import GenericMetadata, PageType
|
||||
from comicarchive import MetaDataStyle
|
||||
from comicvinetalker import ComicVineTalker, ComicVineTalkerException
|
||||
from imagefetcher import ImageFetcher
|
||||
from pageloader import PageLoader
|
||||
from imagepopup import ImagePopup
|
||||
import utils
|
||||
|
||||
# helper func to allow a label to be clickable
|
||||
def clickable(widget):
|
||||
|
||||
class Filter(QObject):
|
||||
|
||||
dblclicked = pyqtSignal()
|
||||
|
||||
def eventFilter(self, obj, event):
|
||||
|
||||
if obj == widget:
|
||||
if event.type() == QEvent.MouseButtonDblClick:
|
||||
self.dblclicked.emit()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
filter = Filter(widget)
|
||||
widget.installEventFilter(filter)
|
||||
return filter.dblclicked
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CoverImageWidget(QWidget):
|
||||
|
||||
ArchiveMode = 0
|
||||
AltCoverMode = 1
|
||||
URLMode = 1
|
||||
DataMode = 3
|
||||
|
||||
def __init__(self, parent, mode, expand_on_click = True ):
|
||||
super(CoverImageWidget, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('coverimagewidget.ui' ), self)
|
||||
def clickable(widget: QtWidgets.QWidget) -> QtCore.pyqtBoundSignal:
|
||||
"""Allow a label to be clickable"""
|
||||
|
||||
utils.reduceWidgetFontSize( self.label )
|
||||
class Filter(QtCore.QObject):
|
||||
dblclicked = QtCore.pyqtSignal()
|
||||
|
||||
self.mode = mode
|
||||
self.comicVine = ComicVineTalker()
|
||||
self.page_loader = None
|
||||
self.showControls = True
|
||||
def eventFilter(self, obj: QtCore.QObject, event: QtCore.QEvent) -> bool:
|
||||
if obj == widget:
|
||||
if event.type() == QtCore.QEvent.Type.MouseButtonDblClick:
|
||||
self.dblclicked.emit()
|
||||
return True
|
||||
return False
|
||||
|
||||
self.btnLeft.setIcon(QIcon(ComicTaggerSettings.getGraphic('left.png')))
|
||||
self.btnRight.setIcon(QIcon(ComicTaggerSettings.getGraphic('right.png')))
|
||||
|
||||
self.btnLeft.clicked.connect( self.decrementImage )
|
||||
self.btnRight.clicked.connect( self.incrementImage )
|
||||
self.resetWidget()
|
||||
if expand_on_click:
|
||||
clickable(self.lblImage).connect(self.showPopup)
|
||||
else:
|
||||
self.lblImage.setToolTip( "" )
|
||||
flt = Filter(widget)
|
||||
widget.installEventFilter(flt)
|
||||
return flt.dblclicked
|
||||
|
||||
self.updateContent()
|
||||
|
||||
def resetWidget(self):
|
||||
self.comic_archive = None
|
||||
self.issue_id = None
|
||||
self.comicVine = None
|
||||
self.cover_fetcher = None
|
||||
self.url_list = []
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = None
|
||||
self.imageIndex = -1
|
||||
self.imageCount = 1
|
||||
self.imageData = None
|
||||
|
||||
def clear( self ):
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
|
||||
def incrementImage( self ):
|
||||
self.imageIndex += 1
|
||||
if self.imageIndex == self.imageCount:
|
||||
self.imageIndex = 0
|
||||
self.updateContent()
|
||||
class CoverImageWidget(QtWidgets.QWidget):
|
||||
ArchiveMode = 0
|
||||
AltCoverMode = 1
|
||||
URLMode = 1
|
||||
DataMode = 3
|
||||
|
||||
def decrementImage( self ):
|
||||
self.imageIndex -= 1
|
||||
if self.imageIndex == -1:
|
||||
self.imageIndex = self.imageCount -1
|
||||
self.updateContent()
|
||||
|
||||
def setArchive( self, ca, page=0 ):
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.resetWidget()
|
||||
self.comic_archive = ca
|
||||
self.imageIndex = page
|
||||
self.imageCount = ca.getNumberOfPages()
|
||||
self.updateContent()
|
||||
image_fetch_complete = QtCore.pyqtSignal(str, QtCore.QByteArray)
|
||||
|
||||
def setURL( self, url ):
|
||||
if self.mode == CoverImageWidget.URLMode:
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
|
||||
self.url_list = [ url ]
|
||||
self.imageIndex = 0
|
||||
self.imageCount = 1
|
||||
self.updateContent()
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
mode: int,
|
||||
cache_folder: pathlib.Path | None,
|
||||
blur: bool = False,
|
||||
expand_on_click: bool = True,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def setIssueID( self, issue_id ):
|
||||
if self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
|
||||
self.issue_id = issue_id
|
||||
if mode not in (self.AltCoverMode, self.URLMode) or cache_folder is None:
|
||||
self.cover_fetcher = None
|
||||
self.talker = None
|
||||
else:
|
||||
self.cover_fetcher = ImageFetcher(cache_folder)
|
||||
self.talker = None
|
||||
with (ui_path / "coverimagewidget.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.comicVine = ComicVineTalker()
|
||||
self.comicVine.urlFetchComplete.connect( self.primaryUrlFetchComplete )
|
||||
self.comicVine.asyncFetchIssueCoverURLs( int(self.issue_id) )
|
||||
self.cache_folder = cache_folder
|
||||
self.mode: int = mode
|
||||
self.page_loader: PageLoader | None = None
|
||||
self.showControls = True
|
||||
self.blur = blur
|
||||
self.scene = QtWidgets.QGraphicsScene(parent=self)
|
||||
|
||||
def setImageData( self, image_data ):
|
||||
if self.mode == CoverImageWidget.DataMode:
|
||||
self.resetWidget()
|
||||
|
||||
if image_data is None:
|
||||
self.imageIndex = -1
|
||||
else:
|
||||
self.imageIndex = 0
|
||||
self.imageData = image_data
|
||||
|
||||
self.updateContent()
|
||||
|
||||
def primaryUrlFetchComplete( self, primary_url, thumb_url, issue_id ):
|
||||
self.url_list.append(str(primary_url))
|
||||
self.imageIndex = 0
|
||||
self.imageCount = len(self.url_list)
|
||||
self.updateContent()
|
||||
self.current_pixmap = QtGui.QPixmap()
|
||||
|
||||
#defer the alt cover search
|
||||
QTimer.singleShot(1, self.startAltCoverSearch)
|
||||
self.comic_archive: ComicArchive | None = None
|
||||
self.issue_id: str = ""
|
||||
self.issue_url: str | None = None
|
||||
self.url_list: list[str] = []
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = None
|
||||
self.imageIndex = -1
|
||||
self.imageCount = 1
|
||||
self.imageData = b""
|
||||
|
||||
def startAltCoverSearch( self ):
|
||||
self.btnLeft.setIcon(QtGui.QIcon(":/graphics/left.png"))
|
||||
self.btnRight.setIcon(QtGui.QIcon(":/graphics/right.png"))
|
||||
|
||||
# now we need to get the list of alt cover URLs
|
||||
self.label.setText("Searching for alt. covers...")
|
||||
|
||||
# page URL should already be cached, so no need to defer
|
||||
self.comicVine = ComicVineTalker()
|
||||
issue_page_url = self.comicVine.fetchIssuePageURL( self.issue_id )
|
||||
self.comicVine.altUrlListFetchComplete.connect( self.altCoverUrlListFetchComplete )
|
||||
self.comicVine.asyncFetchAlternateCoverURLs( int(self.issue_id), issue_page_url)
|
||||
|
||||
def altCoverUrlListFetchComplete( self, url_list, issue_id ):
|
||||
if len(url_list) > 0:
|
||||
self.url_list.extend(url_list)
|
||||
self.imageCount = len(self.url_list)
|
||||
self.updateControls()
|
||||
self.btnLeft.clicked.connect(self.decrement_image)
|
||||
self.btnRight.clicked.connect(self.increment_image)
|
||||
self.image_fetch_complete.connect(self.cover_remote_fetch_complete)
|
||||
if expand_on_click:
|
||||
clickable(self.graphicsView).connect(self.show_popup)
|
||||
else:
|
||||
self.graphicsView.setToolTip("")
|
||||
self.graphicsView.setScene(self.scene)
|
||||
|
||||
def setPage( self, pagenum ):
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.imageIndex = pagenum
|
||||
self.updateContent()
|
||||
|
||||
def updateContent( self ):
|
||||
self.updateImage()
|
||||
self.updateControls()
|
||||
|
||||
def updateImage( self ):
|
||||
if self.imageIndex == -1:
|
||||
self.loadDefault()
|
||||
elif self.mode in [ CoverImageWidget.AltCoverMode, CoverImageWidget.URLMode ]:
|
||||
self.loadURL()
|
||||
elif self.mode == CoverImageWidget.DataMode:
|
||||
self.coverRemoteFetchComplete( self.imageData, 0 )
|
||||
else:
|
||||
self.loadPage()
|
||||
|
||||
def updateControls( self ):
|
||||
if not self.showControls or self.mode == CoverImageWidget.DataMode:
|
||||
self.btnLeft.hide()
|
||||
self.btnRight.hide()
|
||||
self.label.hide()
|
||||
return
|
||||
|
||||
if self.imageIndex == -1 or self.imageCount == 1:
|
||||
self.btnLeft.setEnabled(False)
|
||||
self.btnRight.setEnabled(False)
|
||||
self.btnLeft.hide()
|
||||
self.btnRight.hide()
|
||||
else:
|
||||
self.btnLeft.setEnabled(True)
|
||||
self.btnRight.setEnabled(True)
|
||||
self.btnLeft.show()
|
||||
self.btnRight.show()
|
||||
|
||||
if self.imageIndex == -1 or self.imageCount == 1:
|
||||
self.label.setText("")
|
||||
elif self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.label.setText("Cover {0} ( of {1} )".format(self.imageIndex+1, self.imageCount))
|
||||
else:
|
||||
self.label.setText("Page {0} ( of {1} )".format(self.imageIndex+1, self.imageCount))
|
||||
|
||||
def loadURL( self ):
|
||||
self.loadDefault()
|
||||
self.cover_fetcher = ImageFetcher( )
|
||||
self.cover_fetcher.fetchComplete.connect(self.coverRemoteFetchComplete)
|
||||
self.cover_fetcher.fetch( self.url_list[self.imageIndex] )
|
||||
#print "ATB cover fetch started...."
|
||||
|
||||
# called when the image is done loading from internet
|
||||
def coverRemoteFetchComplete( self, image_data, issue_id ):
|
||||
img = QImage()
|
||||
img.loadFromData( image_data )
|
||||
self.current_pixmap = QPixmap(img)
|
||||
self.setDisplayPixmap( 0, 0)
|
||||
#print "ATB cover fetch complete!"
|
||||
self.update_content()
|
||||
|
||||
def loadPage( self ):
|
||||
if self.comic_archive is not None:
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = PageLoader( self.comic_archive, self.imageIndex )
|
||||
self.page_loader.loadComplete.connect( self.pageLoadComplete )
|
||||
self.page_loader.start()
|
||||
def reset_widget(self) -> None:
|
||||
self.comic_archive = None
|
||||
self.issue_id = ""
|
||||
self.issue_url = None
|
||||
self.url_list = []
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = None
|
||||
self.imageIndex = -1
|
||||
self.imageCount = 1
|
||||
self.imageData = b""
|
||||
|
||||
def pageLoadComplete( self, img ):
|
||||
self.current_pixmap = QPixmap(img)
|
||||
self.setDisplayPixmap( 0, 0)
|
||||
self.page_loader = None
|
||||
|
||||
def loadDefault( self ):
|
||||
self.current_pixmap = QPixmap(ComicTaggerSettings.getGraphic('nocover.png'))
|
||||
#print "loadDefault called"
|
||||
self.setDisplayPixmap( 0, 0)
|
||||
def clear(self) -> None:
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
|
||||
def resizeEvent( self, resize_event ):
|
||||
if self.current_pixmap is not None:
|
||||
delta_w = resize_event.size().width() - resize_event.oldSize().width()
|
||||
delta_h = resize_event.size().height() - resize_event.oldSize().height()
|
||||
#print "ATB resizeEvent deltas", resize_event.size().width(), resize_event.size().height()
|
||||
self.setDisplayPixmap( delta_w , delta_h )
|
||||
|
||||
def setDisplayPixmap( self, delta_w , delta_h ):
|
||||
# the deltas let us know what the new width and height of the label will be
|
||||
"""
|
||||
new_h = self.frame.height() + delta_h
|
||||
new_w = self.frame.width() + delta_w
|
||||
print "ATB setDisplayPixmap deltas", delta_w , delta_h
|
||||
print "ATB self.frame", self.frame.width(), self.frame.height()
|
||||
print "ATB self.", self.width(), self.height()
|
||||
|
||||
frame_w = new_w
|
||||
frame_h = new_h
|
||||
"""
|
||||
new_h = self.frame.height()
|
||||
new_w = self.frame.width()
|
||||
frame_w = self.frame.width()
|
||||
frame_h = self.frame.height()
|
||||
def increment_image(self) -> None:
|
||||
self.imageIndex += 1
|
||||
if self.imageIndex == self.imageCount:
|
||||
self.imageIndex = 0
|
||||
self.update_content()
|
||||
|
||||
new_h -= 4
|
||||
new_w -= 4
|
||||
|
||||
if new_h < 0:
|
||||
new_h = 0;
|
||||
if new_w < 0:
|
||||
new_w = 0;
|
||||
def decrement_image(self) -> None:
|
||||
self.imageIndex -= 1
|
||||
if self.imageIndex == -1:
|
||||
self.imageIndex = self.imageCount - 1
|
||||
self.update_content()
|
||||
|
||||
#print "ATB setDisplayPixmap deltas", delta_w , delta_h
|
||||
#print "ATB self.frame", frame_w, frame_h
|
||||
#print "ATB new size", new_w, new_h
|
||||
|
||||
# scale the pixmap to fit in the frame
|
||||
scaled_pixmap = self.current_pixmap.scaled(new_w, new_h, Qt.KeepAspectRatio)
|
||||
self.lblImage.setPixmap( scaled_pixmap )
|
||||
|
||||
# move and resize the label to be centered in the fame
|
||||
img_w = scaled_pixmap.width()
|
||||
img_h = scaled_pixmap.height()
|
||||
self.lblImage.resize( img_w, img_h )
|
||||
self.lblImage.move( (frame_w - img_w)/2, (frame_h - img_h)/2 )
|
||||
|
||||
def showPopup( self ):
|
||||
self.popup = ImagePopup(self, self.current_pixmap)
|
||||
def set_archive(self, ca: ComicArchive, page: int = 0) -> None:
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.reset_widget()
|
||||
self.comic_archive = ca
|
||||
self.imageIndex = page
|
||||
self.imageCount = ca.get_number_of_pages()
|
||||
self.update_content()
|
||||
|
||||
def set_url(self, url: str) -> None:
|
||||
if self.mode == CoverImageWidget.URLMode:
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
|
||||
self.url_list = [url]
|
||||
self.imageIndex = 0
|
||||
self.imageCount = 1
|
||||
self.update_content()
|
||||
|
||||
def set_issue_details(self, issue_id: str, url_list: list[str]) -> None:
|
||||
if self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
self.issue_id = issue_id
|
||||
|
||||
self.set_url_list(url_list)
|
||||
|
||||
def set_image_data(self, image_data: bytes) -> None:
|
||||
if self.mode == CoverImageWidget.DataMode:
|
||||
self.reset_widget()
|
||||
|
||||
if image_data:
|
||||
self.imageIndex = 0
|
||||
self.imageData = image_data
|
||||
else:
|
||||
self.imageIndex = -1
|
||||
|
||||
self.update_content()
|
||||
|
||||
def set_url_list(self, url_list: list[str]) -> None:
|
||||
self.url_list = url_list
|
||||
self.imageIndex = 0
|
||||
self.imageCount = len(self.url_list)
|
||||
self.update_content()
|
||||
self.update_controls()
|
||||
|
||||
def set_page(self, pagenum: int) -> None:
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.imageIndex = pagenum
|
||||
self.update_content()
|
||||
|
||||
def update_content(self) -> None:
|
||||
self.update_image()
|
||||
self.update_controls()
|
||||
|
||||
def update_image(self) -> None:
|
||||
if self.imageIndex == -1:
|
||||
self.load_default()
|
||||
elif self.mode in [CoverImageWidget.AltCoverMode, CoverImageWidget.URLMode]:
|
||||
self.load_url()
|
||||
elif self.mode == CoverImageWidget.DataMode:
|
||||
self.cover_remote_fetch_complete("", self.imageData)
|
||||
else:
|
||||
self.load_page()
|
||||
|
||||
def update_controls(self) -> None:
|
||||
if not self.showControls or self.mode == CoverImageWidget.DataMode:
|
||||
self.btnLeft.hide()
|
||||
self.btnRight.hide()
|
||||
self.label.hide()
|
||||
return
|
||||
|
||||
if self.imageIndex == -1 or self.imageCount == 1:
|
||||
self.btnLeft.setEnabled(False)
|
||||
self.btnRight.setEnabled(False)
|
||||
self.btnLeft.hide()
|
||||
self.btnRight.hide()
|
||||
else:
|
||||
self.btnLeft.setEnabled(True)
|
||||
self.btnRight.setEnabled(True)
|
||||
self.btnLeft.show()
|
||||
self.btnRight.show()
|
||||
|
||||
if self.imageIndex == -1 or self.imageCount == 1:
|
||||
self.label.setText("")
|
||||
elif self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.label.setText(f"Cover {self.imageIndex + 1} (of {self.imageCount})")
|
||||
else:
|
||||
self.label.setText(f"Page {self.imageIndex + 1} (of {self.imageCount})")
|
||||
|
||||
def load_url(self) -> None:
|
||||
assert isinstance(self.cache_folder, pathlib.Path)
|
||||
self.load_default()
|
||||
self.cover_fetcher = ImageFetcher(self.cache_folder)
|
||||
ImageFetcher.image_fetch_complete = self.image_fetch_complete.emit
|
||||
data = self.cover_fetcher.fetch(self.url_list[self.imageIndex])
|
||||
if data:
|
||||
self.cover_remote_fetch_complete(self.url_list[self.imageIndex], data)
|
||||
|
||||
# called when the image is done loading from internet
|
||||
def cover_remote_fetch_complete(self, url: str, image_data: bytes) -> None:
|
||||
if url and url not in self.url_list:
|
||||
return
|
||||
img = get_qimage_from_data(image_data)
|
||||
self.current_pixmap = QtGui.QPixmap.fromImage(img)
|
||||
self.set_display_pixmap()
|
||||
|
||||
def load_page(self) -> None:
|
||||
if self.comic_archive is not None:
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = PageLoader(self.comic_archive, self.imageIndex)
|
||||
self.page_loader.loadComplete.connect(self.page_load_complete)
|
||||
self.page_loader.start()
|
||||
|
||||
def page_load_complete(self, image_data: bytes) -> None:
|
||||
img = get_qimage_from_data(image_data)
|
||||
self.current_pixmap = QtGui.QPixmap.fromImage(img)
|
||||
self.set_display_pixmap()
|
||||
self.page_loader = None
|
||||
|
||||
def load_default(self) -> None:
|
||||
self.current_pixmap = QtGui.QPixmap(":/graphics/nocover.png")
|
||||
self.set_display_pixmap()
|
||||
|
||||
def resizeEvent(self, resize_event: QtGui.QResizeEvent) -> None:
|
||||
if self.current_pixmap is not None:
|
||||
self.set_display_pixmap()
|
||||
|
||||
def set_display_pixmap(self) -> None:
|
||||
"""The deltas let us know what the new width and height of the label will be"""
|
||||
|
||||
new_w = self.frame.width()
|
||||
new_h = self.frame.height()
|
||||
frame_w = self.frame.width()
|
||||
frame_h = self.frame.height()
|
||||
|
||||
new_h -= 8
|
||||
new_w -= 8
|
||||
|
||||
new_h = max(new_h, 0)
|
||||
new_w = max(new_w, 0)
|
||||
|
||||
# scale the pixmap to fit in the frame
|
||||
scaled_pixmap = self.current_pixmap.scaled(
|
||||
new_w, new_h, QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.TransformationMode.SmoothTransformation
|
||||
)
|
||||
self.scene.clear()
|
||||
qpix = self.scene.addPixmap(scaled_pixmap)
|
||||
assert qpix
|
||||
if self.blur:
|
||||
blur = QtWidgets.QGraphicsBlurEffect(parent=self)
|
||||
blur.setBlurHints(QtWidgets.QGraphicsBlurEffect.BlurHint.PerformanceHint)
|
||||
blur.setBlurRadius(30)
|
||||
qpix.setGraphicsEffect(blur)
|
||||
|
||||
# move and resize the label to be centered in the fame
|
||||
img_w = scaled_pixmap.width()
|
||||
img_h = scaled_pixmap.height()
|
||||
self.scene.setSceneRect(0, 0, img_w, img_h)
|
||||
self.graphicsView.resize(img_w + 2, img_h + 2)
|
||||
self.graphicsView.move(int((frame_w - img_w) / 2), int((frame_h - img_h) / 2))
|
||||
|
||||
def show_popup(self) -> None:
|
||||
ImagePopup(self, self.current_pixmap)
|
||||
|
||||
@@ -1,99 +1,98 @@
|
||||
"""
|
||||
A PyQT4 dialog to edit credits
|
||||
"""
|
||||
"""A PyQT4 dialog to edit credits"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
import operator
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
import natsort
|
||||
from PyQt6 import QtCore, QtWidgets, uic
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from comicapi import utils
|
||||
from comicapi.genericmetadata import Credit
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
from PyQt4 import QtCore, QtGui, uic
|
||||
from settings import ComicTaggerSettings
|
||||
import os
|
||||
class CreditEditorWindow(QtWidgets.QDialog):
|
||||
ModeEdit = 0
|
||||
ModeNew = 1
|
||||
|
||||
class CreditEditorWindow(QtGui.QDialog):
|
||||
|
||||
|
||||
ModeEdit = 0
|
||||
ModeNew = 1
|
||||
|
||||
|
||||
def __init__(self, parent, mode, role, name, primary ):
|
||||
super(CreditEditorWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('crediteditorwindow.ui' ), self)
|
||||
|
||||
self.mode = mode
|
||||
|
||||
if self.mode == self.ModeEdit:
|
||||
self.setWindowTitle("Edit Credit")
|
||||
else:
|
||||
self.setWindowTitle("New Credit")
|
||||
def __init__(self, parent: QtWidgets.QWidget, mode: int, credit: Credit) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
# Add the entries to the role combobox
|
||||
self.cbRole.addItem( "" )
|
||||
self.cbRole.addItem( "Writer" )
|
||||
self.cbRole.addItem( "Artist" )
|
||||
self.cbRole.addItem( "Penciller" )
|
||||
self.cbRole.addItem( "Inker" )
|
||||
self.cbRole.addItem( "Colorist" )
|
||||
self.cbRole.addItem( "Letterer" )
|
||||
self.cbRole.addItem( "Cover Artist" )
|
||||
self.cbRole.addItem( "Editor" )
|
||||
self.cbRole.addItem( "Other" )
|
||||
self.cbRole.addItem( "Plotter" )
|
||||
self.cbRole.addItem( "Scripter" )
|
||||
|
||||
self.leName.setText( name )
|
||||
|
||||
if role is not None and role != "":
|
||||
i = self.cbRole.findText( role )
|
||||
if i == -1:
|
||||
self.cbRole.setEditText( role )
|
||||
else:
|
||||
self.cbRole.setCurrentIndex( i )
|
||||
with (ui_path / "crediteditorwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
if primary:
|
||||
self.cbPrimary.setCheckState( QtCore.Qt.Checked )
|
||||
|
||||
self.cbRole.currentIndexChanged.connect(self.roleChanged)
|
||||
self.cbRole.editTextChanged.connect(self.roleChanged)
|
||||
|
||||
self.updatePrimaryButton()
|
||||
self.mode = mode
|
||||
|
||||
def updatePrimaryButton( self ):
|
||||
enabled =self.currentRoleCanBePrimary()
|
||||
self.cbPrimary.setEnabled( enabled )
|
||||
if self.mode == self.ModeEdit:
|
||||
self.setWindowTitle("Edit Credit")
|
||||
else:
|
||||
self.setWindowTitle("New Credit")
|
||||
|
||||
def currentRoleCanBePrimary( self ):
|
||||
role = self.cbRole.currentText()
|
||||
if str(role).lower() == "writer" or str(role).lower() == "artist":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def roleChanged( self, s ):
|
||||
self.updatePrimaryButton()
|
||||
|
||||
def getCredits( self ):
|
||||
primary = self.currentRoleCanBePrimary() and self.cbPrimary.isChecked()
|
||||
return self.cbRole.currentText(), self.leName.text(), primary
|
||||
# Add the entries to the role combobox
|
||||
self.cbRole.addItem("")
|
||||
self.cbRole.addItem("Artist")
|
||||
self.cbRole.addItem("Colorist")
|
||||
self.cbRole.addItem("Cover Artist")
|
||||
self.cbRole.addItem("Editor")
|
||||
self.cbRole.addItem("Inker")
|
||||
self.cbRole.addItem("Letterer")
|
||||
self.cbRole.addItem("Penciller")
|
||||
self.cbRole.addItem("Plotter")
|
||||
self.cbRole.addItem("Scripter")
|
||||
self.cbRole.addItem("Translator")
|
||||
self.cbRole.addItem("Writer")
|
||||
self.cbRole.addItem("Other")
|
||||
|
||||
self.cbLanguage.addItem("", "")
|
||||
for f in natsort.humansorted(utils.languages().items(), operator.itemgetter(1)):
|
||||
self.cbLanguage.addItem(f[1], f[0])
|
||||
|
||||
def accept( self ):
|
||||
if self.cbRole.currentText() == "" or self.leName.text() == "":
|
||||
QtGui.QMessageBox.warning(self, self.tr("Whoops"), self.tr("You need to enter both role and name for a credit."))
|
||||
else:
|
||||
QtGui.QDialog.accept(self)
|
||||
self.leName.setText(credit.person)
|
||||
|
||||
if credit.role is not None and credit.role != "":
|
||||
i = self.cbRole.findText(credit.role)
|
||||
if i == -1:
|
||||
self.cbRole.setEditText(credit.role)
|
||||
else:
|
||||
self.cbRole.setCurrentIndex(i)
|
||||
|
||||
if credit.language != "":
|
||||
i = (
|
||||
self.cbLanguage.findData(credit.language, QtCore.Qt.ItemDataRole.UserRole)
|
||||
if self.cbLanguage.findData(credit.language, QtCore.Qt.ItemDataRole.UserRole) > -1
|
||||
else self.cbLanguage.findText(credit.language)
|
||||
)
|
||||
if i == -1:
|
||||
self.cbLanguage.setEditText(credit.language)
|
||||
else:
|
||||
self.cbLanguage.setCurrentIndex(i)
|
||||
|
||||
self.cbPrimary.setChecked(credit.primary)
|
||||
|
||||
def get_credit(self) -> Credit:
|
||||
lang = self.cbLanguage.currentData() or self.cbLanguage.currentText()
|
||||
return Credit(self.leName.text(), self.cbRole.currentText(), self.cbPrimary.isChecked(), lang)
|
||||
|
||||
def accept(self) -> None:
|
||||
if self.leName.text() == "":
|
||||
QtWidgets.QMessageBox.warning(self, "Whoops", "You need to enter a name for a credit.")
|
||||
else:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
122
comictaggerlib/ctsettings/__init__.py
Normal file
122
comictaggerlib/ctsettings/__init__.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import pathlib
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import settngs
|
||||
|
||||
from comictaggerlib.ctsettings.commandline import (
|
||||
initial_commandline_parser,
|
||||
register_commandline_settings,
|
||||
validate_commandline_settings,
|
||||
)
|
||||
from comictaggerlib.ctsettings.file import register_file_settings, validate_file_settings
|
||||
from comictaggerlib.ctsettings.plugin import group_for_plugin, register_plugin_settings, validate_plugin_settings
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS as ct_ns
|
||||
from comictaggerlib.ctsettings.types import ComicTaggerPaths
|
||||
from comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
talkers: dict[str, ComicTalker] = {}
|
||||
|
||||
__all__ = [
|
||||
"initial_commandline_parser",
|
||||
"register_commandline_settings",
|
||||
"register_file_settings",
|
||||
"register_plugin_settings",
|
||||
"validate_commandline_settings",
|
||||
"validate_file_settings",
|
||||
"validate_plugin_settings",
|
||||
"ComicTaggerPaths",
|
||||
"ct_ns",
|
||||
"group_for_plugin",
|
||||
]
|
||||
|
||||
|
||||
class SettingsEncoder(json.JSONEncoder):
|
||||
def default(self, obj: Any) -> Any:
|
||||
if isinstance(obj, pathlib.Path):
|
||||
return str(obj)
|
||||
|
||||
# Let the base class default method raise the TypeError
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def validate_types(config: settngs.Config[settngs.Values]) -> settngs.Config[settngs.Values]:
|
||||
# Go through each setting
|
||||
for group in config.definitions.values():
|
||||
for setting in group.v.values():
|
||||
# Get the value and if it is the default
|
||||
value, default = settngs.get_option(config.values, setting)
|
||||
if not default and setting.type is not None:
|
||||
# If it is not the default and the type attribute is not None
|
||||
# use it to convert the loaded string into the expected value
|
||||
if (
|
||||
isinstance(value, str)
|
||||
or isinstance(default, Enum)
|
||||
or (isinstance(setting.type, type) and issubclass(setting.type, Enum))
|
||||
):
|
||||
if isinstance(setting.type, type) and issubclass(setting.type, Enum) and isinstance(value, list):
|
||||
config.values[setting.group][setting.dest] = [setting.type(x) for x in value]
|
||||
else:
|
||||
config.values[setting.group][setting.dest] = setting.type(value)
|
||||
return config
|
||||
|
||||
|
||||
def parse_config(
|
||||
manager: settngs.Manager,
|
||||
config_path: pathlib.Path,
|
||||
args: list[str] | None = None,
|
||||
) -> tuple[settngs.Config[settngs.Values], bool]:
|
||||
"""
|
||||
Function to parse options from a json file and passes the resulting Config object to parse_cmdline.
|
||||
|
||||
Args:
|
||||
manager: settngs Manager object
|
||||
config_path: A `pathlib.Path` object
|
||||
args: Passed to argparse.ArgumentParser.parse_args
|
||||
"""
|
||||
file_options, success = settngs.parse_file(manager.definitions, config_path)
|
||||
file_options = validate_types(file_options)
|
||||
cmdline_options = settngs.parse_cmdline(
|
||||
manager.definitions,
|
||||
manager.description,
|
||||
manager.epilog,
|
||||
args,
|
||||
file_options,
|
||||
)
|
||||
|
||||
final_options = settngs.normalize_config(cmdline_options, file=True, cmdline=True)
|
||||
return final_options, success
|
||||
|
||||
|
||||
def save_file(
|
||||
config: settngs.Config[settngs.T],
|
||||
filename: pathlib.Path,
|
||||
) -> bool:
|
||||
"""
|
||||
Helper function to save options from a json dictionary to a file
|
||||
|
||||
Args:
|
||||
config: The options to save to a json dictionary
|
||||
filename: A pathlib.Path object to save the json dictionary to
|
||||
"""
|
||||
file_options = settngs.clean_config(config, file=True)
|
||||
if "Quick Tag" in file_options and "url" in file_options["Quick Tag"]:
|
||||
file_options["Quick Tag"]["url"] = str(file_options["Quick Tag"]["url"])
|
||||
|
||||
try:
|
||||
if not filename.exists():
|
||||
filename.parent.mkdir(exist_ok=True, parents=True)
|
||||
filename.touch()
|
||||
|
||||
json_str = json.dumps(file_options, cls=SettingsEncoder, indent=2)
|
||||
filename.write_text(json_str + "\n", encoding="utf-8")
|
||||
except Exception:
|
||||
logger.exception("Failed to save config file: %s", filename)
|
||||
return False
|
||||
return True
|
||||
382
comictaggerlib/ctsettings/commandline.py
Normal file
382
comictaggerlib/ctsettings/commandline.py
Normal file
@@ -0,0 +1,382 @@
|
||||
"""CLI settings for ComicTagger"""
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
import settngs
|
||||
|
||||
from comicapi import comicarchive, utils
|
||||
from comicapi.comicarchive import tags
|
||||
from comictaggerlib import ctversion, quick_tag
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS as ct_ns
|
||||
from comictaggerlib.ctsettings.types import ComicTaggerPaths, tag
|
||||
from comictaggerlib.resulttypes import Action
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def initial_commandline_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
# Ensure this stays up to date with register_runtime
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
help="Config directory for ComicTagger to use.\ndefault: %(default)s\n\n",
|
||||
type=ComicTaggerPaths,
|
||||
default=ComicTaggerPaths(),
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Be noisy when doing what it does. Use a second time to enable debug logs.\nShort option cannot be combined with other options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enable-quick-tag",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help='Enable the expiremental "quick tagger"',
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def register_runtime(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--config",
|
||||
help="Config directory for ComicTagger to use.\ndefault: %(default)s\n\n",
|
||||
type=ComicTaggerPaths,
|
||||
default=ComicTaggerPaths(),
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Be noisy when doing what it does. Use a second time to enable debug logs.\nShort option cannot be combined with other options.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--enable-quick-tag",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help='Enable the expiremental "quick tagger"',
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--enable-embedding-hashes",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Enable embedding hashes in metadata (currently only CR/CIX has support)",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--preferred-hash",
|
||||
default="shake_256",
|
||||
choices=hashlib.algorithms_available,
|
||||
help="The type of embedded hash to save when --enable-embedding-hashes is set\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("-q", "--quiet", action="store_true", help="Don't say much (for print mode).", file=False)
|
||||
parser.add_setting(
|
||||
"-j",
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Output json on stdout. Ignored in interactive mode.\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--raw",
|
||||
action="store_true",
|
||||
help="""With -p, will print out the raw tag block(s) from the file.""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-i",
|
||||
"--interactive",
|
||||
action="store_true",
|
||||
help="""Interactively query the user when there are\nmultiple matches for an online search. Disabled json output\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--abort",
|
||||
dest="abort_on_low_confidence",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="""Abort save operation when online match is of low confidence.\ndefault: %(default)s""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-n",
|
||||
"--dryrun",
|
||||
action="store_true",
|
||||
help="Don't actually modify file (only relevant for -d, -s, or -r).\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--summary",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Show the summary after a save operation.\ndefault: %(default)s",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-R",
|
||||
"--recursive",
|
||||
action="store_true",
|
||||
help="Recursively include files in sub-folders.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("-g", "--glob", action="store_true", help="Windows only. Enable globbing", file=False)
|
||||
parser.add_setting("--darkmode", action="store_true", help="Windows only. Force a dark pallet", file=False)
|
||||
parser.add_setting("--no-gui", action="store_true", help="Do not open the GUI, force the commandline", file=False)
|
||||
|
||||
parser.add_setting(
|
||||
"--abort-on-conflict",
|
||||
action="store_true",
|
||||
help="""Don't export to zip if intended new filename exists\n(otherwise, creates a new unique filename).\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--delete-original",
|
||||
action="store_true",
|
||||
help="""Delete original archive after successful export to Zip.\n(only relevant for -e)\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-t",
|
||||
"--tags-read",
|
||||
metavar=f"{{{','.join(tags).upper()}}}",
|
||||
default=[],
|
||||
type=tag,
|
||||
help="""Specify the tags to read.\nUse commas for multiple tags.\nSee --list-plugins for the available tags.\nThe tags used will be 'overlaid' in order:\ne.g. '-t cbl,cr' with no CBL tags, CR will be used if they exist and CR will overwrite any shared CBL tags.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--tags-write",
|
||||
metavar=f"{{{','.join(tags).upper()}}}",
|
||||
default=[],
|
||||
type=tag,
|
||||
help="""Specify the tags to write.\nUse commas for multiple tags.\nRead tags will be used if unspecified\nSee --list-plugins for the available tags.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--skip-existing-tags",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="""Skip archives that already have tags specified with -t,\notherwise merges new tags with existing tags (relevant for -s or -c).\ndefault: %(default)s""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("files", nargs="*", default=[], file=False)
|
||||
|
||||
|
||||
def register_commands(parser: settngs.Manager) -> None:
|
||||
parser.add_setting("--version", action="store_true", help="Display version.", file=False)
|
||||
|
||||
parser.add_setting(
|
||||
"-p",
|
||||
"--print",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.print,
|
||||
default=Action.gui,
|
||||
help="""Print out tag info from file. Specify via -t to only print specific tags.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-d",
|
||||
"--delete",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.delete,
|
||||
help="Deletes the tags specified via -t.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-c",
|
||||
"--copy",
|
||||
type=tag,
|
||||
default=[],
|
||||
metavar=f"{{{','.join(tags).upper()}}}",
|
||||
help="Copy the specified source tags to\ndestination tags specified via --tags-write\n(potentially lossy operation).\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-s",
|
||||
"--save",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.save,
|
||||
help="Save out tags as specified tags (via --tags-write).\nMust specify also at least -o, -f, or -m.\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-r",
|
||||
"--rename",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.rename,
|
||||
help="Rename the file based on specified tags.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-e",
|
||||
"--export-to-zip",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.export,
|
||||
help="Export archive to Zip format.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--only-save-config",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.save_config,
|
||||
help="Only save the configuration (eg, Comic Vine API key) and quit.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--list-plugins",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.list_plugins,
|
||||
default=Action.gui,
|
||||
help="List the available plugins.\n\n",
|
||||
file=False,
|
||||
)
|
||||
|
||||
|
||||
def register_commandline_settings(parser: settngs.Manager, enable_quick_tag: bool) -> None:
|
||||
parser.add_group("Commands", register_commands, True)
|
||||
parser.add_persistent_group("Runtime Options", register_runtime)
|
||||
if enable_quick_tag:
|
||||
parser.add_group("Quick Tag", quick_tag.settings)
|
||||
|
||||
|
||||
def validate_commandline_settings(config: settngs.Config[ct_ns], parser: settngs.Manager) -> settngs.Config[ct_ns]:
|
||||
if config[0].Commands__version:
|
||||
parser.exit(
|
||||
status=1,
|
||||
message=f"ComicTagger {ctversion.version}: Copyright (c) 2012-2022 ComicTagger Team\n"
|
||||
+ "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n",
|
||||
)
|
||||
|
||||
enabled_tags = {tag for tag in comicarchive.tags if comicarchive.tags[tag].enabled}
|
||||
if (
|
||||
(not config[0].Metadata_Options__cr)
|
||||
and "cr" in comicarchive.tags
|
||||
and comicarchive.tags["cr"].enabled
|
||||
and len(enabled_tags) > 1
|
||||
):
|
||||
comicarchive.tags["cr"].enabled = False
|
||||
|
||||
config[0].Runtime_Options__no_gui = any(
|
||||
(config[0].Commands__command != Action.gui, config[0].Runtime_Options__no_gui, config[0].Commands__copy)
|
||||
)
|
||||
|
||||
if platform.system() == "Windows" and config[0].Runtime_Options__glob:
|
||||
# no globbing on windows shell, so do it for them
|
||||
import glob
|
||||
|
||||
globs = config[0].Runtime_Options__files
|
||||
config[0].Runtime_Options__files = []
|
||||
for item in globs:
|
||||
config[0].Runtime_Options__files.extend(glob.glob(item))
|
||||
|
||||
if config[0].Runtime_Options__json and config[0].Runtime_Options__interactive:
|
||||
config[0].Runtime_Options__json = False
|
||||
|
||||
if config[0].Runtime_Options__tags_read and not config[0].Runtime_Options__tags_write:
|
||||
config[0].Runtime_Options__tags_write = config[0].Runtime_Options__tags_read
|
||||
|
||||
disabled_tags = {tag for tag in comicarchive.tags if not comicarchive.tags[tag].enabled}
|
||||
to_be_removed = (
|
||||
set(config[0].Runtime_Options__tags_read)
|
||||
.union(config[0].Runtime_Options__tags_write)
|
||||
.intersection(disabled_tags)
|
||||
)
|
||||
if to_be_removed:
|
||||
logger.debug("Removing disabled tags: %s", to_be_removed)
|
||||
config[0].Runtime_Options__tags_read = [
|
||||
tag for tag in config[0].Runtime_Options__tags_read if tag not in to_be_removed
|
||||
]
|
||||
config[0].Runtime_Options__tags_write = [
|
||||
tag for tag in config[0].Runtime_Options__tags_write if tag not in to_be_removed
|
||||
]
|
||||
|
||||
if (
|
||||
config[0].Runtime_Options__no_gui
|
||||
and not [tag.id for tag in tags.values() if tag.enabled]
|
||||
and config[0].Commands__command != Action.list_plugins
|
||||
):
|
||||
parser.exit(status=1, message="There are no tags enabled see --list-plugins\n")
|
||||
|
||||
if config[0].Runtime_Options__no_gui and not config[0].Runtime_Options__files:
|
||||
if config[0].Commands__command == Action.print and not config[0].Auto_Tag__metadata.is_empty:
|
||||
... # allow printing the metadata provided on the commandline
|
||||
elif config[0].Commands__command not in (Action.save_config, Action.list_plugins):
|
||||
parser.exit(message="Command requires at least one filename!\n", status=1)
|
||||
|
||||
if config[0].Commands__command == Action.delete and not config[0].Runtime_Options__tags_write:
|
||||
parser.exit(message="Please specify the tags to delete with --tags-write\n", status=1)
|
||||
|
||||
if config[0].Commands__command == Action.save and not config[0].Runtime_Options__tags_write:
|
||||
parser.exit(message="Please specify the tags to save with --tags-write\n", status=1)
|
||||
|
||||
if config[0].Commands__copy:
|
||||
config[0].Commands__command = Action.copy
|
||||
if not config[0].Runtime_Options__tags_write:
|
||||
parser.exit(message="Please specify the tags to copy to with --tags-write\n", status=1)
|
||||
|
||||
if config[0].Runtime_Options__recursive:
|
||||
config[0].Runtime_Options__files = utils.os_sorted(
|
||||
set(utils.get_recursive_filelist(config[0].Runtime_Options__files))
|
||||
)
|
||||
|
||||
if not config[0].Runtime_Options__enable_embedding_hashes:
|
||||
config[0].Runtime_Options__preferred_hash = ""
|
||||
|
||||
# take a crack at finding rar exe if it's not in the path
|
||||
if not utils.which("rar"):
|
||||
if platform.system() == "Windows":
|
||||
letters = ["C"]
|
||||
letters.extend({f"{d}" for d in "ABDEFGHIJKLMNOPQRSTUVWXYZ" if os.path.exists(f"{d}:\\")})
|
||||
for letter in letters:
|
||||
# look in some likely places for Windows machines
|
||||
utils.add_to_path(rf"{letter}:\Program Files\WinRAR")
|
||||
utils.add_to_path(rf"{letter}:\Program Files (x86)\WinRAR")
|
||||
else:
|
||||
if platform.system() == "Darwin":
|
||||
result = subprocess.run(("/usr/libexec/path_helper", "-s"), capture_output=True)
|
||||
for path in reversed(
|
||||
shlex.split(result.stdout.decode("utf-8", errors="ignore"))[0]
|
||||
.partition("=")[2]
|
||||
.rstrip(";")
|
||||
.split(os.pathsep)
|
||||
):
|
||||
utils.add_to_path(path)
|
||||
utils.add_to_path("/opt/homebrew/bin")
|
||||
|
||||
return config
|
||||
398
comictaggerlib/ctsettings/file.py
Normal file
398
comictaggerlib/ctsettings/file.py
Normal file
@@ -0,0 +1,398 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import uuid
|
||||
|
||||
import settngs
|
||||
|
||||
from comicapi import merge, utils
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS as ct_ns
|
||||
from comictaggerlib.ctsettings.types import parse_metadata_from_string
|
||||
from comictaggerlib.defaults import DEFAULT_REPLACEMENTS, Replacement, Replacements
|
||||
|
||||
|
||||
def general(parser: settngs.Manager) -> None:
|
||||
# General Settings
|
||||
parser.add_setting("check_for_new_version", default=False, cmdline=False)
|
||||
parser.add_setting("blur", default=False, cmdline=False)
|
||||
parser.add_setting(
|
||||
"--prompt-on-save",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Prompts the user to confirm saving tags when using the GUI.\ndefault: %(default)s",
|
||||
)
|
||||
|
||||
|
||||
def internal(parser: settngs.Manager) -> None:
|
||||
# automatic settings
|
||||
parser.add_setting("install_id", default=uuid.uuid4().hex, cmdline=False)
|
||||
parser.add_setting("embedded_hash_type", default="shake_256", cmdline=False)
|
||||
parser.add_setting("write_tags", default=["cr"], cmdline=False)
|
||||
parser.add_setting("read_tags", default=["cr"], cmdline=False)
|
||||
parser.add_setting("last_opened_folder", default="", cmdline=False)
|
||||
parser.add_setting("window_width", default=0, cmdline=False)
|
||||
parser.add_setting("window_height", default=0, cmdline=False)
|
||||
parser.add_setting("window_x", default=0, cmdline=False)
|
||||
parser.add_setting("window_y", default=0, cmdline=False)
|
||||
parser.add_setting("form_width", default=-1, cmdline=False)
|
||||
parser.add_setting("list_width", default=-1, cmdline=False)
|
||||
parser.add_setting("sort_column", default=-1, cmdline=False)
|
||||
parser.add_setting("sort_direction", default=0, cmdline=False)
|
||||
parser.add_setting("remove_archive_after_successful_match", default=False, cmdline=False)
|
||||
|
||||
|
||||
def identifier(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--series-match-identify-thresh",
|
||||
default=91,
|
||||
type=int,
|
||||
help="The minimum Series name similarity needed to auto-identify an issue default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--series-match-search-thresh",
|
||||
default=90,
|
||||
type=int,
|
||||
help="The minimum Series name similarity to return from a search result default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"-b",
|
||||
"--border-crop-percent",
|
||||
default=10,
|
||||
type=int,
|
||||
help="ComicTagger will automatically add an additional cover that has any black borders cropped.\nIf the difference in height is less than %(default)s%% the cover will not be cropped.\ndefault: %(default)s\n\n",
|
||||
)
|
||||
|
||||
parser.add_setting(
|
||||
"--sort-series-by-year",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Sorts series by year default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--exact-series-matches-first",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Puts series that are an exact match at the top of the list default: %(default)s",
|
||||
)
|
||||
|
||||
|
||||
def dialog(parser: settngs.Manager) -> None:
|
||||
parser.add_setting("show_disclaimer", default=True, cmdline=False)
|
||||
parser.add_setting("dont_notify_about_this_version", default="", cmdline=False)
|
||||
parser.add_setting("notify_plugin_changes", default=True, cmdline=False)
|
||||
|
||||
|
||||
def filename(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--filename-parser",
|
||||
default=utils.Parser.ORIGINAL,
|
||||
metavar=f"{{{','.join(utils.Parser)}}}",
|
||||
type=utils.Parser,
|
||||
choices=utils.Parser,
|
||||
help="Select the filename parser.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-c2c",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Removes c2c from filenames.\nRequires --complicated-parser\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-fcbd",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Removes FCBD/free comic book day from filenames.\nRequires --complicated-parser\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-publisher",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Attempts to remove publisher names from filenames, currently limited to Marvel and DC.\nRequires --complicated-parser\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--split-words",
|
||||
action="store_true",
|
||||
help="""Splits words before parsing the filename.\ne.g. 'judgedredd' to 'judge dredd'\ndefault: %(default)s\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--protofolius-issue-number-scheme",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Use an issue number scheme devised by protofolius for encoding format information as a letter in front of an issue number.\nImplies --allow-issue-start-with-letter. Requires --complicated-parser\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--allow-issue-start-with-letter",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Allows an issue number to start with a single letter (e.g. '#X01').\nRequires --complicated-parser\ndefault: %(default)s\n\n",
|
||||
)
|
||||
|
||||
|
||||
def talker(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--source",
|
||||
default="comicvine",
|
||||
help="Use a specified source by source ID (use --list-plugins to list all sources).\ndefault: %(default)s",
|
||||
)
|
||||
|
||||
|
||||
def md_options(parser: settngs.Manager) -> None:
|
||||
# CBL Transform settings
|
||||
parser.add_setting("--assume-lone-credit-is-primary", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-characters-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-teams-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-locations-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-storyarcs-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-notes-to-comments", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-weblink-to-comments", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--apply-transform-on-import", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--apply-transform-on-bulk-operation", default=False, action=argparse.BooleanOptionalAction)
|
||||
|
||||
parser.add_setting(
|
||||
"--remove-html-tables",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
display_name="Remove HTML tables",
|
||||
help="Removes html tables instead of converting them to text",
|
||||
)
|
||||
parser.add_setting("use_short_tag_names", default=False, action=argparse.BooleanOptionalAction, cmdline=False)
|
||||
parser.add_setting(
|
||||
"--cr",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enable ComicRack tags. Turn off to only use CIX tags.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--tag-merge",
|
||||
metavar=f"{{{','.join(merge.Mode)}}}",
|
||||
default=merge.Mode.OVERLAY,
|
||||
choices=merge.Mode,
|
||||
type=merge.Mode,
|
||||
help="How to merge fields when reading enabled tags (CR, CBL, etc.) See -t, --tags-read default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--metadata-merge",
|
||||
metavar=f"{{{','.join(merge.Mode)}}}",
|
||||
default=merge.Mode.OVERLAY,
|
||||
choices=merge.Mode,
|
||||
type=merge.Mode,
|
||||
help="How to merge fields when downloading new metadata (CV, Metron, GCD, etc.) default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--tag-merge-lists",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Merge lists when reading enabled tags (genres, characters, etc.) default: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--metadata-merge-lists",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="Merge lists when downloading new metadata (genres, characters, etc.) default: %(default)s",
|
||||
)
|
||||
|
||||
|
||||
def rename(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--template",
|
||||
default="{series} #{issue} ({year})",
|
||||
help="The teplate to use when renaming.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--issue-number-padding",
|
||||
default=3,
|
||||
type=int,
|
||||
help="The minimum number of digits to use for the issue number when renaming.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--use-smart-string-cleanup",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Attempts to intelligently cleanup whitespace when renaming.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--auto-extension",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Automatically sets the extension based on the archive type e.g. cbr for rar, cbz for zip.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting("--dir", default="", help="The directory to move renamed files to.")
|
||||
parser.add_setting(
|
||||
"--move",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables moving renamed files to a separate directory.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--only-move",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Ignores the filename when moving renamed files to a separate directory.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--strict-filenames",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Ensures that filenames are valid for all OSs.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting("replacements", default=DEFAULT_REPLACEMENTS, cmdline=False)
|
||||
|
||||
|
||||
def autotag(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"-o",
|
||||
"--online",
|
||||
action="store_true",
|
||||
help="""Search online and attempt to identify file\nusing existing tags and images in archive.\nMay be used in conjunction with -f and -m.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--save-on-low-confidence",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Automatically save tags on low-confidence matches.\ndefault: %(default)s",
|
||||
cmdline=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--use-year-when-identifying",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Use the year metadata attribute when auto-tagging a comic.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"-1",
|
||||
"--assume-issue-one",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Assume issue number is 1 if not found (relevant for -s).\ndefault: %(default)s\n\n",
|
||||
default=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--ignore-leading-numbers-in-filename",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="When searching ignore leading numbers in the filename.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"-f",
|
||||
"--parse-filename",
|
||||
action="store_true",
|
||||
help="""Parse the filename to get some info,\nspecifically series name, issue number,\nvolume, and publication year.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--prefer-filename",
|
||||
action="store_true",
|
||||
help="""Prefer metadata parsed from the filename. CLI only.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--id",
|
||||
dest="issue_id",
|
||||
type=str,
|
||||
help="""Use the issue ID when searching online.\nOverrides all other metadata.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-m",
|
||||
"--metadata",
|
||||
default=GenericMetadata(),
|
||||
type=parse_metadata_from_string,
|
||||
help="""Explicitly define some metadata to be used in YAML syntax. Use @file.yaml to read from a file. e.g.:\n"series: Plastic Man, publisher: Quality Comics, year: "\n"series: 'Kickers, Inc.', issue: '1', year: 1986"\nIf you want to erase a tag leave the value blank.\nSome names that can be used: series, issue, issue_count, year,\npublisher, title\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--clear-tags",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Clears all existing tags during import, default is to merge tags.\nMay be used in conjunction with -o, -f and -m.\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--publisher-filter",
|
||||
default=["Panini Comics", "Abril", "Planeta DeAgostini", "Editorial Televisa", "Dino Comics"],
|
||||
action="extend",
|
||||
nargs="+",
|
||||
help="When enabled, filters the listed publishers from all search results.\nEnding a publisher with a '-' removes a publisher from this list\ndefault: %(default)s\n\n",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--use-publisher-filter",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables the publisher filter.\ndefault: %(default)s",
|
||||
)
|
||||
parser.add_setting(
|
||||
"-a",
|
||||
"--auto-imprint",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables the auto imprint functionality.\ne.g. if the publisher is set to 'vertigo' it\nwill be updated to 'DC Comics' and the imprint\nproperty will be set to 'Vertigo'.\ndefault: %(default)s\n\n",
|
||||
)
|
||||
|
||||
|
||||
def parse_filter(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
new_filter = []
|
||||
remove = []
|
||||
for x in config[0].Auto_Tag__publisher_filter:
|
||||
x = x.strip()
|
||||
if x: # ignore empty arguments
|
||||
if x[-1] == "-": # this publisher needs to be removed. We remove after all publishers have been enumerated
|
||||
remove.append(x.strip("-"))
|
||||
else:
|
||||
if x not in new_filter:
|
||||
new_filter.append(x)
|
||||
for x in remove: # remove publishers
|
||||
if x in new_filter:
|
||||
new_filter.remove(x)
|
||||
config[0].Auto_Tag__publisher_filter = new_filter
|
||||
return config
|
||||
|
||||
|
||||
def migrate_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
original_types = ("cbi", "cr", "comet")
|
||||
write_Tags = config[0].internal__write_tags
|
||||
if not isinstance(write_Tags, list):
|
||||
if isinstance(write_Tags, int) and write_Tags in (0, 1, 2):
|
||||
config[0].internal__write_tags = [original_types[write_Tags]]
|
||||
elif isinstance(write_Tags, str):
|
||||
config[0].internal__write_tags = [write_Tags]
|
||||
else:
|
||||
config[0].internal__write_tags = ["cr"]
|
||||
|
||||
read_tags = config[0].internal__read_tags
|
||||
if not isinstance(read_tags, list):
|
||||
if isinstance(read_tags, int) and read_tags in (0, 1, 2):
|
||||
config[0].internal__read_tags = [original_types[read_tags]]
|
||||
elif isinstance(read_tags, str):
|
||||
config[0].internal__read_tags = [read_tags]
|
||||
else:
|
||||
config[0].internal__read_tags = ["cr"]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def validate_file_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
config = parse_filter(config)
|
||||
|
||||
config = migrate_settings(config)
|
||||
|
||||
if config[0].Filename_Parsing__protofolius_issue_number_scheme:
|
||||
config[0].Filename_Parsing__allow_issue_start_with_letter = True
|
||||
|
||||
config[0].File_Rename__replacements = Replacements(
|
||||
[Replacement(x[0], x[1], x[2]) for x in config[0].File_Rename__replacements[0]],
|
||||
[Replacement(x[0], x[1], x[2]) for x in config[0].File_Rename__replacements[1]],
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
def register_file_settings(parser: settngs.Manager) -> None:
|
||||
parser.add_group("internal", internal, False)
|
||||
parser.add_group("Issue Identifier", identifier, False)
|
||||
parser.add_group("Filename Parsing", filename, False)
|
||||
parser.add_group("Sources", talker, False)
|
||||
parser.add_group("Metadata Options", md_options, False)
|
||||
parser.add_group("File Rename", rename, False)
|
||||
parser.add_group("Auto-Tag", autotag, False)
|
||||
parser.add_group("General", general, False)
|
||||
parser.add_group("Dialog Flags", dialog, False)
|
||||
107
comictaggerlib/ctsettings/plugin.py
Normal file
107
comictaggerlib/ctsettings/plugin.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, cast
|
||||
|
||||
import settngs
|
||||
|
||||
import comicapi.comicarchive
|
||||
import comicapi.utils
|
||||
import comictaggerlib.ctsettings
|
||||
from comicapi.comicarchive import Archiver
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS as ct_ns
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
|
||||
|
||||
def group_for_plugin(plugin: Archiver | ComicTalker | type[Archiver]) -> str:
|
||||
if isinstance(plugin, ComicTalker):
|
||||
return f"Source {plugin.id}"
|
||||
if isinstance(plugin, Archiver) or plugin == Archiver:
|
||||
return "Archive"
|
||||
raise NotImplementedError(f"Invalid plugin received: {plugin=}")
|
||||
|
||||
|
||||
def archiver(manager: settngs.Manager) -> None:
|
||||
for archiver in comicapi.comicarchive.archivers:
|
||||
if archiver.exe:
|
||||
# add_setting will overwrite anything with the same name.
|
||||
# So we only end up with one option even if multiple archivers use the same exe.
|
||||
manager.add_setting(
|
||||
f"--{settngs.sanitize_name(archiver.exe)}",
|
||||
default=archiver.exe,
|
||||
help="Path to the %(default)s executable",
|
||||
)
|
||||
|
||||
|
||||
def register_talker_settings(manager: settngs.Manager, talkers: dict[str, ComicTalker]) -> None:
|
||||
for talker in talkers.values():
|
||||
|
||||
def api_options(manager: settngs.Manager) -> None:
|
||||
# The default needs to be unset or None.
|
||||
# This allows this setting to be unset with the empty string, allowing the default to change
|
||||
manager.add_setting(
|
||||
f"--{talker.id}-key",
|
||||
display_name="API Key",
|
||||
help=f"API Key for {talker.name} (default: {talker.default_api_key})",
|
||||
)
|
||||
manager.add_setting(
|
||||
f"--{talker.id}-url",
|
||||
display_name="URL",
|
||||
help=f"URL for {talker.name} (default: {talker.default_api_url})",
|
||||
)
|
||||
|
||||
try:
|
||||
manager.add_persistent_group(group_for_plugin(talker), api_options, False)
|
||||
if hasattr(talker, "register_settings"):
|
||||
manager.add_persistent_group(group_for_plugin(talker), talker.register_settings, False)
|
||||
except Exception:
|
||||
logger.exception("Failed to register settings for %s", talker.id)
|
||||
|
||||
|
||||
def validate_archive_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
cfg = settngs.normalize_config(config, file=True, cmdline=True, default=False)
|
||||
for archiver in comicapi.comicarchive.archivers:
|
||||
group = group_for_plugin(archiver())
|
||||
exe_name = settngs.sanitize_name(archiver.exe)
|
||||
if not exe_name:
|
||||
continue
|
||||
|
||||
if exe_name in cfg[0][group] and cfg[0][group][exe_name]:
|
||||
path = cfg[0][group][exe_name]
|
||||
name = os.path.basename(path)
|
||||
# If the path is not the basename then this is a relative or absolute path.
|
||||
# Ensure it is absolute
|
||||
if path != name:
|
||||
path = os.path.abspath(path)
|
||||
|
||||
archiver.exe = path
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def validate_talker_settings(config: settngs.Config[ct_ns], talkers: dict[str, ComicTalker]) -> settngs.Config[ct_ns]:
|
||||
# Apply talker settings from config file
|
||||
cfg = cast(settngs.Config[dict[str, Any]], settngs.normalize_config(config, True, True))
|
||||
for talker in list(talkers.values()):
|
||||
try:
|
||||
cfg[0][group_for_plugin(talker)] = talker.parse_settings(cfg[0][group_for_plugin(talker)])
|
||||
except Exception as e:
|
||||
# Remove talker as we failed to apply the settings
|
||||
del comictaggerlib.ctsettings.talkers[talker.id]
|
||||
logger.exception("Failed to initialize talker settings: %s", e)
|
||||
|
||||
return cast(settngs.Config[ct_ns], settngs.get_namespace(cfg, file=True, cmdline=True))
|
||||
|
||||
|
||||
def validate_plugin_settings(config: settngs.Config[ct_ns], talkers: dict[str, ComicTalker]) -> settngs.Config[ct_ns]:
|
||||
config = validate_archive_settings(config)
|
||||
config = validate_talker_settings(config, talkers)
|
||||
return config
|
||||
|
||||
|
||||
def register_plugin_settings(manager: settngs.Manager, talkers: dict[str, ComicTalker]) -> None:
|
||||
manager.add_persistent_group("Archive", archiver, False)
|
||||
register_talker_settings(manager, talkers)
|
||||
186
comictaggerlib/ctsettings/plugin_finder.py
Normal file
186
comictaggerlib/ctsettings/plugin_finder.py
Normal file
@@ -0,0 +1,186 @@
|
||||
"""Functions related to finding and loading plugins."""
|
||||
|
||||
# Lifted from flake8 https://github.com/PyCQA/flake8/blob/main/src/flake8/plugins/finder.py#L127
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import logging
|
||||
import pathlib
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Generator, Iterable, Sequence
|
||||
from typing import Any, NamedTuple, TypeVar
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
import importlib_metadata
|
||||
else:
|
||||
import importlib.metadata as importlib_metadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NORMALIZE_PACKAGE_NAME_RE = re.compile(r"[-_.]+")
|
||||
PLUGIN_GROUPS = frozenset(("comictagger.talker", "comicapi.archiver", "comicapi.tags"))
|
||||
icu_available = importlib.util.find_spec("icu") is not None
|
||||
|
||||
|
||||
def _custom_key(tup: Any) -> Any:
|
||||
import natsort
|
||||
|
||||
lst = []
|
||||
for x in natsort.os_sort_keygen()(tup):
|
||||
ret = x
|
||||
if isinstance(x, Sequence) and len(x) > 1 and isinstance(x[1], int) and isinstance(x[0], str) and x[0] == "":
|
||||
ret = ("a", *x[1:])
|
||||
|
||||
lst.append(ret)
|
||||
return tuple(lst)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def os_sorted(lst: Iterable[T]) -> Iterable[T]:
|
||||
import natsort
|
||||
|
||||
key = _custom_key
|
||||
if icu_available or platform.system() == "Windows":
|
||||
key = natsort.os_sort_keygen()
|
||||
return sorted(lst, key=key)
|
||||
|
||||
|
||||
class FailedToLoadPlugin(Exception):
|
||||
"""Exception raised when a plugin fails to load."""
|
||||
|
||||
FORMAT = 'ComicTagger failed to load local plugin "{name}" due to {exc}.'
|
||||
|
||||
def __init__(self, plugin_name: str, exception: Exception) -> None:
|
||||
"""Initialize our FailedToLoadPlugin exception."""
|
||||
self.plugin_name = plugin_name
|
||||
self.original_exception = exception
|
||||
super().__init__(plugin_name, exception)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Format our exception message."""
|
||||
return self.FORMAT.format(
|
||||
name=self.plugin_name,
|
||||
exc=self.original_exception,
|
||||
)
|
||||
|
||||
|
||||
def normalize_pypi_name(s: str) -> str:
|
||||
"""Normalize a distribution name according to PEP 503."""
|
||||
return NORMALIZE_PACKAGE_NAME_RE.sub("-", s).lower()
|
||||
|
||||
|
||||
class Plugin(NamedTuple):
|
||||
"""A plugin before loading."""
|
||||
|
||||
package: str
|
||||
version: str
|
||||
entry_point: importlib_metadata.EntryPoint
|
||||
path: pathlib.Path
|
||||
|
||||
def load(self) -> LoadedPlugin:
|
||||
return LoadedPlugin(self, self.entry_point.load())
|
||||
|
||||
|
||||
class LoadedPlugin(NamedTuple):
|
||||
"""Represents a plugin after being imported."""
|
||||
|
||||
plugin: Plugin
|
||||
obj: Any
|
||||
|
||||
@property
|
||||
def entry_name(self) -> str:
|
||||
"""Return the name given in the packaging metadata."""
|
||||
return self.plugin.entry_point.name
|
||||
|
||||
@property
|
||||
def display_name(self) -> str:
|
||||
"""Return the name for use in user-facing / error messages."""
|
||||
return f"{self.plugin.package}[{self.entry_name}]"
|
||||
|
||||
|
||||
class Plugins(NamedTuple):
|
||||
"""Classified plugins."""
|
||||
|
||||
archivers: list[LoadedPlugin]
|
||||
tags: list[LoadedPlugin]
|
||||
talkers: list[LoadedPlugin]
|
||||
|
||||
def all_plugins(self) -> Generator[LoadedPlugin]:
|
||||
"""Return an iterator over all :class:`LoadedPlugin`s."""
|
||||
yield from self.archivers
|
||||
yield from self.tags
|
||||
yield from self.talkers
|
||||
|
||||
def versions_str(self) -> str:
|
||||
"""Return a user-displayed list of plugin versions."""
|
||||
return ", ".join(sorted({f"{plugin.plugin.package}: {plugin.plugin.version}" for plugin in self.all_plugins()}))
|
||||
|
||||
|
||||
def _find_local_plugins(plugin_path: pathlib.Path) -> Generator[Plugin]:
|
||||
logger.debug("Checking for distributions in %s", plugin_path)
|
||||
for dist in importlib_metadata.distributions(path=[str(plugin_path)]):
|
||||
logger.debug("found distribution %s", dist.name)
|
||||
eps = dist.entry_points
|
||||
for group in PLUGIN_GROUPS:
|
||||
for ep in eps.select(group=group):
|
||||
logger.debug("found EntryPoint group %s %s=%s", group, ep.name, ep.value)
|
||||
yield Plugin(plugin_path.name, dist.version, ep, plugin_path)
|
||||
|
||||
|
||||
def find_plugins(plugin_folder: pathlib.Path) -> Plugins:
|
||||
"""Discovers all plugins (but does not load them)."""
|
||||
ret: list[LoadedPlugin] = []
|
||||
if not plugin_folder.is_dir():
|
||||
return _classify_plugins(ret)
|
||||
|
||||
zips = [x for x in plugin_folder.iterdir() if x.is_file() and x.suffix in (".zip", ".whl")]
|
||||
|
||||
for plugin_path in os_sorted(zips):
|
||||
logger.debug("looking for plugins in %s", plugin_path)
|
||||
sys_path = sys.path.copy()
|
||||
try:
|
||||
sys.path.append(str(plugin_path))
|
||||
for plugin in _find_local_plugins(plugin_path):
|
||||
logger.debug("Attempting to load %s from %s", plugin.entry_point.name, plugin.path)
|
||||
ret.append(plugin.load())
|
||||
except Exception as err:
|
||||
logger.exception(FailedToLoadPlugin(plugin_path.name, err))
|
||||
finally:
|
||||
sys.path = sys_path
|
||||
for mod in list(sys.modules.values()):
|
||||
if (
|
||||
mod is not None
|
||||
and hasattr(mod, "__spec__")
|
||||
and mod.__spec__
|
||||
and str(plugin_path) in (mod.__spec__.origin or "")
|
||||
):
|
||||
sys.modules.pop(mod.__name__)
|
||||
|
||||
return _classify_plugins(ret)
|
||||
|
||||
|
||||
def _classify_plugins(plugins: list[LoadedPlugin]) -> Plugins:
|
||||
archivers = []
|
||||
tags = []
|
||||
talkers = []
|
||||
|
||||
for p in plugins:
|
||||
if p.plugin.entry_point.group == "comictagger.talker":
|
||||
talkers.append(p)
|
||||
elif p.plugin.entry_point.group == "comicapi.tags":
|
||||
tags.append(p)
|
||||
elif p.plugin.entry_point.group == "comicapi.archiver":
|
||||
archivers.append(p)
|
||||
else:
|
||||
logger.warning(NotImplementedError(f"what plugin type? {p}"))
|
||||
|
||||
return Plugins(
|
||||
tags=tags,
|
||||
archivers=archivers,
|
||||
talkers=talkers,
|
||||
)
|
||||
304
comictaggerlib/ctsettings/settngs_namespace.py
Normal file
304
comictaggerlib/ctsettings/settngs_namespace.py
Normal file
@@ -0,0 +1,304 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
|
||||
import settngs
|
||||
import urllib3.util.url
|
||||
|
||||
import comicapi.genericmetadata
|
||||
import comicapi.merge
|
||||
import comicapi.utils
|
||||
import comictaggerlib.ctsettings.types
|
||||
import comictaggerlib.defaults
|
||||
import comictaggerlib.resulttypes
|
||||
|
||||
|
||||
class SettngsNS(settngs.TypedNS):
|
||||
Commands__version: bool
|
||||
Commands__command: comictaggerlib.resulttypes.Action
|
||||
Commands__copy: list[str]
|
||||
|
||||
Runtime_Options__config: comictaggerlib.ctsettings.types.ComicTaggerPaths
|
||||
Runtime_Options__verbose: int
|
||||
Runtime_Options__enable_quick_tag: bool
|
||||
Runtime_Options__enable_embedding_hashes: bool
|
||||
Runtime_Options__preferred_hash: str
|
||||
Runtime_Options__quiet: bool
|
||||
Runtime_Options__json: bool
|
||||
Runtime_Options__raw: bool
|
||||
Runtime_Options__interactive: bool
|
||||
Runtime_Options__abort_on_low_confidence: bool
|
||||
Runtime_Options__dryrun: bool
|
||||
Runtime_Options__summary: bool
|
||||
Runtime_Options__recursive: bool
|
||||
Runtime_Options__glob: bool
|
||||
Runtime_Options__darkmode: bool
|
||||
Runtime_Options__no_gui: bool
|
||||
Runtime_Options__abort_on_conflict: bool
|
||||
Runtime_Options__delete_original: bool
|
||||
Runtime_Options__tags_read: list[str]
|
||||
Runtime_Options__tags_write: list[str]
|
||||
Runtime_Options__skip_existing_tags: bool
|
||||
Runtime_Options__files: list[str]
|
||||
|
||||
Quick_Tag__url: urllib3.util.url.Url
|
||||
Quick_Tag__max: int
|
||||
Quick_Tag__aggressive_filtering: bool
|
||||
Quick_Tag__hash: list[comictaggerlib.quick_tag.HashType]
|
||||
Quick_Tag__exact_only: bool
|
||||
|
||||
internal__install_id: str
|
||||
internal__embedded_hash_type: str
|
||||
internal__write_tags: list[str]
|
||||
internal__read_tags: list[str]
|
||||
internal__last_opened_folder: str
|
||||
internal__window_width: int
|
||||
internal__window_height: int
|
||||
internal__window_x: int
|
||||
internal__window_y: int
|
||||
internal__form_width: int
|
||||
internal__list_width: int
|
||||
internal__sort_column: int
|
||||
internal__sort_direction: int
|
||||
internal__remove_archive_after_successful_match: bool
|
||||
|
||||
Issue_Identifier__series_match_identify_thresh: int
|
||||
Issue_Identifier__series_match_search_thresh: int
|
||||
Issue_Identifier__border_crop_percent: int
|
||||
Issue_Identifier__sort_series_by_year: bool
|
||||
Issue_Identifier__exact_series_matches_first: bool
|
||||
|
||||
Filename_Parsing__filename_parser: comicapi.utils.Parser
|
||||
Filename_Parsing__remove_c2c: bool
|
||||
Filename_Parsing__remove_fcbd: bool
|
||||
Filename_Parsing__remove_publisher: bool
|
||||
Filename_Parsing__split_words: bool
|
||||
Filename_Parsing__protofolius_issue_number_scheme: bool
|
||||
Filename_Parsing__allow_issue_start_with_letter: bool
|
||||
|
||||
Sources__source: str
|
||||
|
||||
Metadata_Options__assume_lone_credit_is_primary: bool
|
||||
Metadata_Options__copy_characters_to_tags: bool
|
||||
Metadata_Options__copy_teams_to_tags: bool
|
||||
Metadata_Options__copy_locations_to_tags: bool
|
||||
Metadata_Options__copy_storyarcs_to_tags: bool
|
||||
Metadata_Options__copy_notes_to_comments: bool
|
||||
Metadata_Options__copy_weblink_to_comments: bool
|
||||
Metadata_Options__apply_transform_on_import: bool
|
||||
Metadata_Options__apply_transform_on_bulk_operation: bool
|
||||
Metadata_Options__remove_html_tables: bool
|
||||
Metadata_Options__use_short_tag_names: bool
|
||||
Metadata_Options__cr: bool
|
||||
Metadata_Options__tag_merge: comicapi.merge.Mode
|
||||
Metadata_Options__metadata_merge: comicapi.merge.Mode
|
||||
Metadata_Options__tag_merge_lists: bool
|
||||
Metadata_Options__metadata_merge_lists: bool
|
||||
|
||||
File_Rename__template: str
|
||||
File_Rename__issue_number_padding: int
|
||||
File_Rename__use_smart_string_cleanup: bool
|
||||
File_Rename__auto_extension: bool
|
||||
File_Rename__dir: str
|
||||
File_Rename__move: bool
|
||||
File_Rename__only_move: bool
|
||||
File_Rename__strict_filenames: bool
|
||||
File_Rename__replacements: comictaggerlib.defaults.Replacements
|
||||
|
||||
Auto_Tag__online: bool
|
||||
Auto_Tag__save_on_low_confidence: bool
|
||||
Auto_Tag__use_year_when_identifying: bool
|
||||
Auto_Tag__assume_issue_one: bool
|
||||
Auto_Tag__ignore_leading_numbers_in_filename: bool
|
||||
Auto_Tag__parse_filename: bool
|
||||
Auto_Tag__prefer_filename: bool
|
||||
Auto_Tag__issue_id: str | None
|
||||
Auto_Tag__metadata: comicapi.genericmetadata.GenericMetadata
|
||||
Auto_Tag__clear_tags: bool
|
||||
Auto_Tag__publisher_filter: list[str]
|
||||
Auto_Tag__use_publisher_filter: bool
|
||||
Auto_Tag__auto_imprint: bool
|
||||
|
||||
General__check_for_new_version: bool
|
||||
General__blur: bool
|
||||
General__prompt_on_save: bool
|
||||
|
||||
Dialog_Flags__show_disclaimer: bool
|
||||
Dialog_Flags__dont_notify_about_this_version: str
|
||||
Dialog_Flags__notify_plugin_changes: bool
|
||||
|
||||
Archive__rar: str
|
||||
|
||||
Source_comicvine__comicvine_key: str | None
|
||||
Source_comicvine__comicvine_url: str | None
|
||||
Source_comicvine__cv_use_series_start_as_volume: bool
|
||||
Source_comicvine__comicvine_custom_parameters: str | None
|
||||
|
||||
|
||||
class Commands(typing.TypedDict):
|
||||
version: bool
|
||||
command: comictaggerlib.resulttypes.Action
|
||||
copy: list[str]
|
||||
|
||||
|
||||
class Runtime_Options(typing.TypedDict):
|
||||
config: comictaggerlib.ctsettings.types.ComicTaggerPaths
|
||||
verbose: int
|
||||
enable_quick_tag: bool
|
||||
enable_embedding_hashes: bool
|
||||
preferred_hash: str
|
||||
quiet: bool
|
||||
json: bool
|
||||
raw: bool
|
||||
interactive: bool
|
||||
abort_on_low_confidence: bool
|
||||
dryrun: bool
|
||||
summary: bool
|
||||
recursive: bool
|
||||
glob: bool
|
||||
darkmode: bool
|
||||
no_gui: bool
|
||||
abort_on_conflict: bool
|
||||
delete_original: bool
|
||||
tags_read: list[str]
|
||||
tags_write: list[str]
|
||||
skip_existing_tags: bool
|
||||
files: list[str]
|
||||
|
||||
|
||||
class Quick_Tag(typing.TypedDict):
|
||||
url: urllib3.util.url.Url
|
||||
max: int
|
||||
aggressive_filtering: bool
|
||||
hash: list[comictaggerlib.quick_tag.HashType]
|
||||
exact_only: bool
|
||||
|
||||
|
||||
class internal(typing.TypedDict):
|
||||
install_id: str
|
||||
embedded_hash_type: str
|
||||
write_tags: list[str]
|
||||
read_tags: list[str]
|
||||
last_opened_folder: str
|
||||
window_width: int
|
||||
window_height: int
|
||||
window_x: int
|
||||
window_y: int
|
||||
form_width: int
|
||||
list_width: int
|
||||
sort_column: int
|
||||
sort_direction: int
|
||||
remove_archive_after_successful_match: bool
|
||||
|
||||
|
||||
class Issue_Identifier(typing.TypedDict):
|
||||
series_match_identify_thresh: int
|
||||
series_match_search_thresh: int
|
||||
border_crop_percent: int
|
||||
sort_series_by_year: bool
|
||||
exact_series_matches_first: bool
|
||||
|
||||
|
||||
class Filename_Parsing(typing.TypedDict):
|
||||
filename_parser: comicapi.utils.Parser
|
||||
remove_c2c: bool
|
||||
remove_fcbd: bool
|
||||
remove_publisher: bool
|
||||
split_words: bool
|
||||
protofolius_issue_number_scheme: bool
|
||||
allow_issue_start_with_letter: bool
|
||||
|
||||
|
||||
class Sources(typing.TypedDict):
|
||||
source: str
|
||||
|
||||
|
||||
class Metadata_Options(typing.TypedDict):
|
||||
assume_lone_credit_is_primary: bool
|
||||
copy_characters_to_tags: bool
|
||||
copy_teams_to_tags: bool
|
||||
copy_locations_to_tags: bool
|
||||
copy_storyarcs_to_tags: bool
|
||||
copy_notes_to_comments: bool
|
||||
copy_weblink_to_comments: bool
|
||||
apply_transform_on_import: bool
|
||||
apply_transform_on_bulk_operation: bool
|
||||
remove_html_tables: bool
|
||||
use_short_tag_names: bool
|
||||
cr: bool
|
||||
tag_merge: comicapi.merge.Mode
|
||||
metadata_merge: comicapi.merge.Mode
|
||||
tag_merge_lists: bool
|
||||
metadata_merge_lists: bool
|
||||
|
||||
|
||||
class File_Rename(typing.TypedDict):
|
||||
template: str
|
||||
issue_number_padding: int
|
||||
use_smart_string_cleanup: bool
|
||||
auto_extension: bool
|
||||
dir: str
|
||||
move: bool
|
||||
only_move: bool
|
||||
strict_filenames: bool
|
||||
replacements: comictaggerlib.defaults.Replacements
|
||||
|
||||
|
||||
class Auto_Tag(typing.TypedDict):
|
||||
online: bool
|
||||
save_on_low_confidence: bool
|
||||
use_year_when_identifying: bool
|
||||
assume_issue_one: bool
|
||||
ignore_leading_numbers_in_filename: bool
|
||||
parse_filename: bool
|
||||
prefer_filename: bool
|
||||
issue_id: str | None
|
||||
metadata: comicapi.genericmetadata.GenericMetadata
|
||||
clear_tags: bool
|
||||
publisher_filter: list[str]
|
||||
use_publisher_filter: bool
|
||||
auto_imprint: bool
|
||||
|
||||
|
||||
class General(typing.TypedDict):
|
||||
check_for_new_version: bool
|
||||
blur: bool
|
||||
prompt_on_save: bool
|
||||
|
||||
|
||||
class Dialog_Flags(typing.TypedDict):
|
||||
show_disclaimer: bool
|
||||
dont_notify_about_this_version: str
|
||||
notify_plugin_changes: bool
|
||||
|
||||
|
||||
class Archive(typing.TypedDict):
|
||||
rar: str
|
||||
|
||||
|
||||
class Source_comicvine(typing.TypedDict):
|
||||
comicvine_key: str | None
|
||||
comicvine_url: str | None
|
||||
cv_use_series_start_as_volume: bool
|
||||
comicvine_custom_parameters: str | None
|
||||
|
||||
|
||||
SettngsDict = typing.TypedDict(
|
||||
"SettngsDict",
|
||||
{
|
||||
"Commands": Commands,
|
||||
"Runtime Options": Runtime_Options,
|
||||
"Quick Tag": Quick_Tag,
|
||||
"internal": internal,
|
||||
"Issue Identifier": Issue_Identifier,
|
||||
"Filename Parsing": Filename_Parsing,
|
||||
"Sources": Sources,
|
||||
"Metadata Options": Metadata_Options,
|
||||
"File Rename": File_Rename,
|
||||
"Auto-Tag": Auto_Tag,
|
||||
"General": General,
|
||||
"Dialog Flags": Dialog_Flags,
|
||||
"Archive": Archive,
|
||||
"Source comicvine": Source_comicvine,
|
||||
},
|
||||
)
|
||||
248
comictaggerlib/ctsettings/types.py
Normal file
248
comictaggerlib/ctsettings/types.py
Normal file
@@ -0,0 +1,248 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import pathlib
|
||||
import sys
|
||||
import types
|
||||
import typing
|
||||
from collections.abc import Collection, Mapping
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from appdirs import AppDirs
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import tags
|
||||
from comicapi.genericmetadata import REMOVE, GenericMetadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
|
||||
@typing.no_type_check
|
||||
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
|
||||
if getattr(obj, "__no_type_check__", None):
|
||||
return {}
|
||||
# Classes require a special treatment.
|
||||
if isinstance(obj, type):
|
||||
hints = {}
|
||||
for base in reversed(obj.__mro__):
|
||||
if globalns is None:
|
||||
base_globals = getattr(sys.modules.get(base.__module__, None), "__dict__", {})
|
||||
else:
|
||||
base_globals = globalns
|
||||
ann = base.__dict__.get("__annotations__", {})
|
||||
if isinstance(ann, types.GetSetDescriptorType):
|
||||
ann = {}
|
||||
base_locals = dict(vars(base)) if localns is None else localns
|
||||
if localns is None and globalns is None:
|
||||
# This is surprising, but required. Before Python 3.10,
|
||||
# get_type_hints only evaluated the globalns of
|
||||
# a class. To maintain backwards compatibility, we reverse
|
||||
# the globalns and localns order so that eval() looks into
|
||||
# *base_globals* first rather than *base_locals*.
|
||||
# This only affects ForwardRefs.
|
||||
base_globals, base_locals = base_locals, base_globals
|
||||
for name, value in ann.items():
|
||||
if value is None:
|
||||
value = type(None)
|
||||
if isinstance(value, str):
|
||||
if "|" in value:
|
||||
value = "Union[" + value.replace(" |", ",") + "]"
|
||||
value = typing.ForwardRef(value, is_argument=False, is_class=True)
|
||||
value = typing._eval_type(value, base_globals, base_locals)
|
||||
hints[name] = value
|
||||
return hints if include_extras else {k: typing._strip_annotations(t) for k, t in hints.items()}
|
||||
|
||||
if globalns is None:
|
||||
if isinstance(obj, types.ModuleType):
|
||||
globalns = obj.__dict__
|
||||
else:
|
||||
nsobj = obj
|
||||
# Find globalns for the unwrapped object.
|
||||
while hasattr(nsobj, "__wrapped__"):
|
||||
nsobj = nsobj.__wrapped__
|
||||
globalns = getattr(nsobj, "__globals__", {})
|
||||
if localns is None:
|
||||
localns = globalns
|
||||
elif localns is None:
|
||||
localns = globalns
|
||||
hints = getattr(obj, "__annotations__", None)
|
||||
if hints is None:
|
||||
# Return empty annotations for something that _could_ have them.
|
||||
if isinstance(obj, typing._allowed_types):
|
||||
return {}
|
||||
else:
|
||||
raise TypeError("{!r} is not a module, class, method, " "or function.".format(obj))
|
||||
hints = dict(hints)
|
||||
for name, value in hints.items():
|
||||
if value is None:
|
||||
value = type(None)
|
||||
if isinstance(value, str):
|
||||
if "|" in value:
|
||||
value = "Union[" + value.replace(" |", ",") + "]"
|
||||
# class-level forward refs were handled above, this must be either
|
||||
# a module-level annotation or a function argument annotation
|
||||
value = typing.ForwardRef(
|
||||
value,
|
||||
is_argument=not isinstance(obj, types.ModuleType),
|
||||
is_class=False,
|
||||
)
|
||||
hints[name] = typing._eval_type(value, globalns, localns)
|
||||
return hints if include_extras else {k: typing._strip_annotations(t) for k, t in hints.items()}
|
||||
|
||||
else:
|
||||
from typing import get_type_hints
|
||||
|
||||
|
||||
class ComicTaggerPaths(AppDirs):
|
||||
def __init__(self, config_path: pathlib.Path | str | None = None) -> None:
|
||||
super().__init__("ComicTagger", None, None, False, False)
|
||||
self.path: pathlib.Path | None = None
|
||||
if config_path:
|
||||
self.path = pathlib.Path(config_path).absolute()
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_data_dir)
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_config_dir)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "cache"
|
||||
return pathlib.Path(super().user_cache_dir)
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_state_dir)
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "log"
|
||||
return pathlib.Path(super().user_log_dir)
|
||||
|
||||
@property
|
||||
def user_plugin_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "plugins"
|
||||
return pathlib.Path(super().user_config_dir) / "plugins"
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> pathlib.Path:
|
||||
return pathlib.Path(super().site_data_dir)
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> pathlib.Path:
|
||||
return pathlib.Path(super().site_config_dir)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"logs: {self.user_log_dir}, config: {self.user_config_dir}, cache: {self.user_cache_dir}"
|
||||
|
||||
|
||||
def tag(types: str) -> list[str]:
|
||||
enabled_tags = [tag for tag in tags if tags[tag].enabled]
|
||||
result = []
|
||||
types = types.casefold()
|
||||
for typ in utils.split(types, ","):
|
||||
if typ not in enabled_tags:
|
||||
choices = ", ".join(enabled_tags)
|
||||
raise argparse.ArgumentTypeError(f"invalid choice: {typ} (choose from {choices.upper()})")
|
||||
result.append(tags[typ].id)
|
||||
return result
|
||||
|
||||
|
||||
def parse_metadata_from_string(mdstr: str) -> GenericMetadata:
|
||||
|
||||
def get_type(key: str, tt: Any = get_type_hints(GenericMetadata)) -> Any:
|
||||
t: Any = tt.get(key, None)
|
||||
if t is None:
|
||||
return None
|
||||
if getattr(t, "__origin__", None) is typing.Union and len(t.__args__) == 2 and t.__args__[1] is type(None):
|
||||
t = t.__args__[0]
|
||||
elif isinstance(t, types.GenericAlias) and issubclass(t.mro()[0], Collection):
|
||||
t = t.mro()[0], t.__args__[0]
|
||||
|
||||
if isinstance(t, tuple) and issubclass(t[1], dict):
|
||||
return (t[0], dict)
|
||||
if isinstance(t, type) and issubclass(t, dict):
|
||||
return dict
|
||||
return t
|
||||
|
||||
def convert_value(t: type, value: Any) -> Any:
|
||||
if isinstance(value, t):
|
||||
return value
|
||||
try:
|
||||
if isinstance(value, (Mapping)):
|
||||
value = t(**value)
|
||||
elif not isinstance(value, str) and isinstance(value, (Collection)):
|
||||
value = t(*value)
|
||||
else:
|
||||
if t is utils.Url and isinstance(value, str):
|
||||
value = utils.parse_url(value)
|
||||
else:
|
||||
value = t(value)
|
||||
except (ValueError, TypeError):
|
||||
raise argparse.ArgumentTypeError(f"Invalid syntax for tag {key!r}: {value!r}")
|
||||
return value
|
||||
|
||||
md = GenericMetadata()
|
||||
|
||||
try:
|
||||
if not mdstr:
|
||||
return md
|
||||
if mdstr[0] == "@":
|
||||
p = pathlib.Path(mdstr[1:])
|
||||
if not p.is_file():
|
||||
raise argparse.ArgumentTypeError("Invalid filepath")
|
||||
mdstr = p.read_text()
|
||||
if mdstr[0] != "{":
|
||||
mdstr = "{" + mdstr + "}"
|
||||
|
||||
md_dict = yaml.safe_load(mdstr)
|
||||
|
||||
empty = True
|
||||
# Map the dict to the metadata object
|
||||
for key, value in md_dict.items():
|
||||
if hasattr(md, key):
|
||||
t = get_type(key)
|
||||
if value is None:
|
||||
value = REMOVE
|
||||
elif isinstance(t, tuple):
|
||||
if value == "":
|
||||
value = t[0]()
|
||||
else:
|
||||
if isinstance(value, str):
|
||||
value = [value]
|
||||
if not isinstance(value, Collection):
|
||||
raise argparse.ArgumentTypeError(f"Invalid syntax for tag '{key}'")
|
||||
values = list(value)
|
||||
for idx, v in enumerate(values):
|
||||
if not isinstance(v, t[1]):
|
||||
values[idx] = convert_value(t[1], v)
|
||||
value = t[0](values)
|
||||
else:
|
||||
value = convert_value(t, value)
|
||||
|
||||
empty = False
|
||||
setattr(md, key, value)
|
||||
else:
|
||||
raise argparse.ArgumentTypeError(f"'{key}' is not a valid tag name")
|
||||
md.is_empty = empty
|
||||
except argparse.ArgumentTypeError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.exception("Unable to read metadata from the commandline '%s'", mdstr)
|
||||
raise Exception("Unable to read metadata from the commandline") from e
|
||||
return md
|
||||
@@ -1,3 +0,0 @@
|
||||
# This file should contan only these comments, and the line below.
|
||||
# Used by packaging makefiles and app
|
||||
version="1.1.13-beta"
|
||||
29
comictaggerlib/defaults.py
Normal file
29
comictaggerlib/defaults.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class Replacement(NamedTuple):
|
||||
find: str
|
||||
replce: str
|
||||
strict_only: bool
|
||||
|
||||
|
||||
class Replacements(NamedTuple):
|
||||
literal_text: list[Replacement]
|
||||
format_value: list[Replacement]
|
||||
|
||||
|
||||
DEFAULT_REPLACEMENTS = Replacements(
|
||||
literal_text=[
|
||||
Replacement(": ", " - ", True),
|
||||
Replacement(":", "-", True),
|
||||
],
|
||||
format_value=[
|
||||
Replacement(": ", " - ", True),
|
||||
Replacement(":", "-", True),
|
||||
Replacement("/", "-", False),
|
||||
Replacement("//", "--", False),
|
||||
Replacement("\\", "-", True),
|
||||
],
|
||||
)
|
||||
@@ -1,65 +1,62 @@
|
||||
"""
|
||||
A PyQT4 dialog to confirm and set options for export to zip
|
||||
"""
|
||||
"""A PyQT4 dialog to confirm and set options for export to zip"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
import logging
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
from PyQt6 import QtCore, QtWidgets, uic
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from PyQt4 import QtCore, QtGui, uic
|
||||
from settings import ComicTaggerSettings
|
||||
from settingswindow import SettingsWindow
|
||||
from filerenamer import FileRenamer
|
||||
import os
|
||||
import utils
|
||||
|
||||
class ExportConflictOpts:
|
||||
dontCreate = 1
|
||||
overwrite = 2
|
||||
createUnique = 3
|
||||
|
||||
class ExportWindow(QtGui.QDialog):
|
||||
|
||||
def __init__( self, parent, settings, msg ):
|
||||
super(ExportWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('exportwindow.ui' ), self)
|
||||
self.label.setText( msg )
|
||||
dontCreate = 1
|
||||
overwrite = 2
|
||||
createUnique = 3
|
||||
|
||||
self.setWindowFlags(self.windowFlags() &
|
||||
~QtCore.Qt.WindowContextHelpButtonHint )
|
||||
|
||||
self.settings = settings
|
||||
|
||||
self.cbxDeleteOriginal.setCheckState( QtCore.Qt.Unchecked )
|
||||
self.cbxAddToList.setCheckState( QtCore.Qt.Checked )
|
||||
self.radioDontCreate.setChecked( True )
|
||||
|
||||
self.deleteOriginal = False
|
||||
self.addToList = True
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
class ExportWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, msg: str) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def accept( self ):
|
||||
QtGui.QDialog.accept(self)
|
||||
with (ui_path / "exportwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
self.label.setText(msg)
|
||||
|
||||
self.deleteOriginal = self.cbxDeleteOriginal.isChecked()
|
||||
self.addToList = self.cbxAddToList.isChecked()
|
||||
if self.radioDontCreate.isChecked():
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
elif self.radioCreateNew.isChecked():
|
||||
self.fileConflictBehavior = ExportConflictOpts.createUnique
|
||||
#else:
|
||||
# self.fileConflictBehavior = ExportConflictOpts.overwrite
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(self.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint)
|
||||
)
|
||||
|
||||
self.cbxDeleteOriginal.setChecked(False)
|
||||
self.cbxAddToList.setChecked(True)
|
||||
self.radioDontCreate.setChecked(True)
|
||||
|
||||
self.deleteOriginal = False
|
||||
self.addToList = True
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
|
||||
def accept(self) -> None:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
self.deleteOriginal = self.cbxDeleteOriginal.isChecked()
|
||||
self.addToList = self.cbxAddToList.isChecked()
|
||||
if self.radioDontCreate.isChecked():
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
elif self.radioCreateNew.isChecked():
|
||||
self.fileConflictBehavior = ExportConflictOpts.createUnique
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
"""
|
||||
Functions for parsing comic info from filename
|
||||
|
||||
This should probably be re-written, but, well, it mostly works!
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
|
||||
# Some portions of this code were modified from pyComicMetaThis project
|
||||
# http://code.google.com/p/pycomicmetathis/
|
||||
|
||||
import re
|
||||
import os
|
||||
from urllib import unquote
|
||||
|
||||
class FileNameParser:
|
||||
|
||||
def repl(self, m):
|
||||
return ' ' * len(m.group())
|
||||
|
||||
def fixSpaces( self, string, remove_dashes=True ):
|
||||
if remove_dashes:
|
||||
placeholders = ['[-_]',' +']
|
||||
else:
|
||||
placeholders = ['[_]',' +']
|
||||
for ph in placeholders:
|
||||
string = re.sub(ph, self.repl, string )
|
||||
return string #.strip()
|
||||
|
||||
|
||||
def getIssueCount( self,filename, issue_end ):
|
||||
|
||||
count = ""
|
||||
filename = filename[issue_end:]
|
||||
|
||||
# replace any name seperators with spaces
|
||||
tmpstr = self.fixSpaces(filename)
|
||||
found = False
|
||||
|
||||
match = re.search('(?<=\sof\s)\d+(?=\s)', tmpstr, re.IGNORECASE)
|
||||
if match:
|
||||
count = match.group()
|
||||
found = True
|
||||
|
||||
if not found:
|
||||
match = re.search('(?<=\(of\s)\d+(?=\))', tmpstr, re.IGNORECASE)
|
||||
if match:
|
||||
count = match.group()
|
||||
found = True
|
||||
|
||||
|
||||
count = count.lstrip("0")
|
||||
|
||||
return count
|
||||
|
||||
def getIssueNumber( self, filename ):
|
||||
|
||||
# Returns a tuple of issue number string, and start and end indexs in the filename
|
||||
# (The indexes will be used to split the string up for further parsing)
|
||||
|
||||
found = False
|
||||
issue = ''
|
||||
start = 0
|
||||
end = 0
|
||||
|
||||
# first, look for multiple "--", this means it's formatted differently from most:
|
||||
if "--" in filename:
|
||||
# the pattern seems to be that anything to left of the first "--" is the series name followed by issue
|
||||
filename = re.sub("--.*", self.repl, filename)
|
||||
|
||||
elif "__" in filename:
|
||||
# the pattern seems to be that anything to left of the first "__" is the series name followed by issue
|
||||
filename = re.sub("__.*", self.repl, filename)
|
||||
|
||||
filename = filename.replace("+", " ")
|
||||
|
||||
# replace parenthetical phrases with spaces
|
||||
filename = re.sub( "\(.*?\)", self.repl, filename)
|
||||
filename = re.sub( "\[.*?\]", self.repl, filename)
|
||||
|
||||
# replace any name seperators with spaces
|
||||
filename = self.fixSpaces(filename)
|
||||
|
||||
# remove any "of NN" phrase with spaces (problem: this could break on some titles)
|
||||
filename = re.sub( "of [\d]+", self.repl, filename)
|
||||
|
||||
#print u"[{0}]".format(filename)
|
||||
|
||||
# we should now have a cleaned up filename version with all the words in
|
||||
# the same positions as original filename
|
||||
|
||||
# make a list of each word and its position
|
||||
word_list = list()
|
||||
for m in re.finditer("\S+", filename):
|
||||
word_list.append( (m.group(0), m.start(), m.end()) )
|
||||
|
||||
# remove the first word, since it can't be the issue number
|
||||
if len(word_list) > 1:
|
||||
word_list = word_list[1:]
|
||||
else:
|
||||
#only one word?? just bail.
|
||||
return issue, start, end
|
||||
|
||||
# Now try to search for the likely issue number word in the list
|
||||
|
||||
# first look for a word with "#" followed by digits with optional sufix
|
||||
# this is almost certainly the issue number
|
||||
for w in reversed(word_list):
|
||||
if re.match("#[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
|
||||
found = True
|
||||
break
|
||||
|
||||
# same as above but w/o a '#', and only look at the last word in the list
|
||||
if not found:
|
||||
w = word_list[-1]
|
||||
if re.match("[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
|
||||
found = True
|
||||
|
||||
# now try to look for a # followed by any characters
|
||||
if not found:
|
||||
for w in reversed(word_list):
|
||||
if re.match("#\S+", w[0]):
|
||||
found = True
|
||||
break
|
||||
|
||||
if found:
|
||||
issue = w[0]
|
||||
start = w[1]
|
||||
end = w[2]
|
||||
if issue[0] == '#':
|
||||
issue = issue[1:]
|
||||
|
||||
return issue, start, end
|
||||
|
||||
def getSeriesName(self, filename, issue_start ):
|
||||
|
||||
# use the issue number string index to split the filename string
|
||||
|
||||
if issue_start != 0:
|
||||
filename = filename[:issue_start]
|
||||
|
||||
# in case there is no issue number, remove some obvious stuff
|
||||
if "--" in filename:
|
||||
# the pattern seems to be that anything to left of the first "--" is the series name followed by issue
|
||||
filename = re.sub("--.*", self.repl, filename)
|
||||
|
||||
elif "__" in filename:
|
||||
# the pattern seems to be that anything to left of the first "__" is the series name followed by issue
|
||||
filename = re.sub("__.*", self.repl, filename)
|
||||
|
||||
filename = filename.replace("+", " ")
|
||||
tmpstr = self.fixSpaces(filename, remove_dashes=False)
|
||||
|
||||
series = tmpstr
|
||||
volume = ""
|
||||
|
||||
#save the last word
|
||||
try:
|
||||
last_word = series.split()[-1]
|
||||
except:
|
||||
last_word = ""
|
||||
|
||||
# remove any parenthetical phrases
|
||||
series = re.sub( "\(.*?\)", "", series)
|
||||
|
||||
# search for volume number
|
||||
match = re.search('(.+)([vV]|[Vv][oO][Ll]\.?\s?)(\d+)\s*$', series)
|
||||
if match:
|
||||
series = match.group(1)
|
||||
volume = match.group(3)
|
||||
|
||||
# if a volume wasn't found, see if the last word is a year in parentheses
|
||||
# since that's a common way to designate the volume
|
||||
if volume == "":
|
||||
#match either (YEAR), (YEAR-), or (YEAR-YEAR2)
|
||||
match = re.search("(\()(\d{4})(-(\d{4}|)|)(\))", last_word)
|
||||
if match:
|
||||
volume = match.group(2)
|
||||
|
||||
series = series.strip()
|
||||
|
||||
# if we don't have an issue number (issue_start==0), look
|
||||
# for hints i.e. "TPB", "one-shot", "OS", "OGN", etc that might
|
||||
# be removed to help search online
|
||||
if issue_start == 0:
|
||||
one_shot_words = [ "tpb", "os", "one-shot", "ogn", "gn" ]
|
||||
try:
|
||||
last_word = series.split()[-1]
|
||||
if last_word.lower() in one_shot_words:
|
||||
series = series.rsplit(' ', 1)[0]
|
||||
except:
|
||||
pass
|
||||
|
||||
return series, volume.strip()
|
||||
|
||||
def getYear( self,filename, issue_end):
|
||||
|
||||
filename = filename[issue_end:]
|
||||
|
||||
year = ""
|
||||
# look for four digit number with "(" ")" or "--" around it
|
||||
match = re.search('(\(\d\d\d\d\))|(--\d\d\d\d--)', filename)
|
||||
if match:
|
||||
year = match.group()
|
||||
# remove non-numerics
|
||||
year = re.sub("[^0-9]", "", year)
|
||||
return year
|
||||
|
||||
def getRemainder( self, filename, year, count, issue_end ):
|
||||
|
||||
#make a guess at where the the non-interesting stuff begins
|
||||
remainder = ""
|
||||
|
||||
if "--" in filename:
|
||||
remainder = filename.split("--",1)[1]
|
||||
elif "__" in filename:
|
||||
remainder = filename.split("__",1)[1]
|
||||
elif issue_end != 0:
|
||||
remainder = filename[issue_end:]
|
||||
|
||||
remainder = self.fixSpaces(remainder, remove_dashes=False)
|
||||
if year != "":
|
||||
remainder = remainder.replace(year,"",1)
|
||||
if count != "":
|
||||
remainder = remainder.replace("of "+count,"",1)
|
||||
|
||||
remainder = remainder.replace("()","")
|
||||
|
||||
return remainder.strip()
|
||||
|
||||
def parseFilename( self, filename ):
|
||||
|
||||
# remove the path
|
||||
filename = os.path.basename(filename)
|
||||
|
||||
# remove the extension
|
||||
filename = os.path.splitext(filename)[0]
|
||||
|
||||
#url decode, just in case
|
||||
filename = unquote(filename)
|
||||
|
||||
# sometimes archives get messed up names from too many decodings
|
||||
# often url encodings will break and leave "_28" and "_29" in place
|
||||
# of "(" and ")" see if there are a number of these, and replace them
|
||||
if filename.count("_28") > 1 and filename.count("_29") > 1:
|
||||
filename = filename.replace("_28", "(")
|
||||
filename = filename.replace("_29", ")")
|
||||
|
||||
self.issue, issue_start, issue_end = self.getIssueNumber(filename)
|
||||
self.series, self.volume = self.getSeriesName(filename, issue_start)
|
||||
self.year = self.getYear(filename, issue_end)
|
||||
self.issue_count = self.getIssueCount(filename, issue_end)
|
||||
self.remainder = self.getRemainder( filename, self.year, self.issue_count, issue_end )
|
||||
|
||||
if self.issue != "":
|
||||
# strip off leading zeros
|
||||
self.issue = self.issue.lstrip("0")
|
||||
if self.issue == "":
|
||||
self.issue = "0"
|
||||
if self.issue[0] == ".":
|
||||
self.issue = "0" + self.issue
|
||||
@@ -1,150 +1,325 @@
|
||||
"""
|
||||
Functions for renaming files based on metadata
|
||||
"""
|
||||
"""Functions for renaming files based on metadata"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import calendar
|
||||
import datetime
|
||||
import utils
|
||||
from issuestring import IssueString
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import string
|
||||
from collections.abc import Collection, Iterable, Mapping, Sequence, Sized
|
||||
from typing import Any, cast
|
||||
|
||||
from pathvalidate import Platform, normalize_platform, sanitize_filename
|
||||
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.issuestring import IssueString
|
||||
from comictaggerlib.defaults import DEFAULT_REPLACEMENTS, Replacement, Replacements
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_rename_dir(ca: ComicArchive, rename_dir: str | pathlib.Path | None) -> pathlib.Path:
|
||||
folder = ca.path.parent.absolute()
|
||||
if rename_dir is not None:
|
||||
if isinstance(rename_dir, str):
|
||||
rename_dir = pathlib.Path(rename_dir.strip())
|
||||
folder = rename_dir.absolute()
|
||||
return folder
|
||||
|
||||
|
||||
def _isnamedtupleinstance(x: Any) -> bool: # pragma: no cover
|
||||
t = type(x)
|
||||
b = t.__bases__
|
||||
|
||||
if len(b) != 1 or b[0] != tuple:
|
||||
return False
|
||||
|
||||
f = getattr(t, "_fields", None)
|
||||
if not isinstance(f, tuple):
|
||||
return False
|
||||
|
||||
return all(isinstance(n, str) for n in f)
|
||||
|
||||
|
||||
class MetadataFormatter(string.Formatter):
|
||||
def __init__(
|
||||
self, smart_cleanup: bool = False, platform: str = "auto", replacements: Replacements = DEFAULT_REPLACEMENTS
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.smart_cleanup = smart_cleanup
|
||||
self.platform = normalize_platform(platform)
|
||||
self.replacements = replacements
|
||||
|
||||
def format_field(self, value: Any, format_spec: str) -> str:
|
||||
if value is None or value == "":
|
||||
return ""
|
||||
return cast(str, super().format_field(value, format_spec))
|
||||
|
||||
def convert_field(self, value: Any, conversion: str | None) -> str:
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, Iterable) and not isinstance(value, (str, tuple)):
|
||||
if conversion == "C":
|
||||
if isinstance(value, Sized):
|
||||
return str(len(value))
|
||||
return ""
|
||||
if conversion and conversion.isdecimal():
|
||||
if not isinstance(value, Collection):
|
||||
return ""
|
||||
i = int(conversion) - 1
|
||||
if i < 0:
|
||||
i = 0
|
||||
if i < len(value):
|
||||
try:
|
||||
return sorted(value)[i]
|
||||
except Exception:
|
||||
...
|
||||
return list(value)[i]
|
||||
return ""
|
||||
if conversion == "j":
|
||||
conversion = "s"
|
||||
try:
|
||||
return ", ".join(list(self.convert_field(v, conversion) for v in sorted(value) if v is not None))
|
||||
except Exception:
|
||||
...
|
||||
return ", ".join(list(self.convert_field(v, conversion) for v in value if v is not None))
|
||||
if not conversion:
|
||||
return cast(str, super().convert_field(value, conversion))
|
||||
if conversion == "u":
|
||||
return str(value).upper()
|
||||
if conversion == "l":
|
||||
return str(value).casefold()
|
||||
if conversion == "c":
|
||||
return str(value).capitalize()
|
||||
if conversion == "S":
|
||||
return str(value).swapcase()
|
||||
if conversion == "t":
|
||||
return str(value).title()
|
||||
if conversion.isdecimal():
|
||||
return ""
|
||||
return cast(str, super().convert_field(value, conversion))
|
||||
|
||||
def handle_replacements(self, string: str, replacements: list[Replacement]) -> str:
|
||||
for find, replace, strict_only in replacements:
|
||||
if self.is_strict() or not strict_only:
|
||||
string = string.replace(find, replace)
|
||||
return string
|
||||
|
||||
def none_replacement(self, value: Any, replacement: str, r: str) -> Any:
|
||||
if r == "-" and value is None or value == "":
|
||||
return replacement
|
||||
if r == "+" and value is not None:
|
||||
return replacement
|
||||
return value
|
||||
|
||||
def split_replacement(self, field_name: str) -> tuple[str, str, str]:
|
||||
if "-" in field_name:
|
||||
return field_name.rpartition("-")
|
||||
if "+" in field_name:
|
||||
return field_name.rpartition("+")
|
||||
return field_name, "", ""
|
||||
|
||||
def is_strict(self) -> bool:
|
||||
return self.platform in [Platform.UNIVERSAL, Platform.WINDOWS]
|
||||
|
||||
def _vformat(
|
||||
self,
|
||||
format_string: str,
|
||||
args: Sequence[Any],
|
||||
kwargs: Mapping[str, Any],
|
||||
used_args: set[Any],
|
||||
recursion_depth: int,
|
||||
auto_arg_index: int = 0,
|
||||
) -> tuple[str, int]:
|
||||
if recursion_depth < 0:
|
||||
raise ValueError("Max string recursion exceeded")
|
||||
result = []
|
||||
lstrip = False
|
||||
for literal_text, field_name, format_spec, conversion in self.parse(format_string):
|
||||
# output the literal text
|
||||
if literal_text:
|
||||
if lstrip:
|
||||
literal_text = literal_text.lstrip("-_)}]#")
|
||||
if self.smart_cleanup:
|
||||
literal_text = self.handle_replacements(literal_text, self.replacements.literal_text)
|
||||
lspace = literal_text[0].isspace() if literal_text else False
|
||||
rspace = literal_text[-1].isspace() if literal_text else False
|
||||
literal_text = " ".join(literal_text.split())
|
||||
if literal_text == "":
|
||||
literal_text = " "
|
||||
else:
|
||||
if lspace:
|
||||
literal_text = " " + literal_text
|
||||
if rspace:
|
||||
literal_text += " "
|
||||
result.append(literal_text)
|
||||
|
||||
lstrip = False
|
||||
# if there's a field, output it
|
||||
if field_name is not None and field_name != "":
|
||||
field_name, r, replacement = self.split_replacement(field_name)
|
||||
field_name = field_name.casefold()
|
||||
# this is some markup, find the object and do the formatting
|
||||
|
||||
# handle arg indexing when digit field_names are given.
|
||||
if field_name.isdigit():
|
||||
raise ValueError("cannot use a number as a field name")
|
||||
|
||||
# given the field_name, find the object it references
|
||||
# and the argument it came from
|
||||
try:
|
||||
obj, arg_used = self.get_field(field_name, args, kwargs)
|
||||
used_args.add(arg_used)
|
||||
except Exception:
|
||||
obj = None
|
||||
|
||||
obj = self.none_replacement(obj, replacement, r)
|
||||
# do any conversion on the resulting object
|
||||
obj = self.convert_field(obj, conversion)
|
||||
if r == "-":
|
||||
obj = self.none_replacement(obj, replacement, r)
|
||||
|
||||
# expand the format spec, if needed
|
||||
format_spec, _ = self._vformat(
|
||||
cast(str, format_spec), args, kwargs, used_args, recursion_depth - 1, auto_arg_index=False
|
||||
)
|
||||
|
||||
# format the object and append to the result
|
||||
fmt_obj = self.format_field(obj, format_spec)
|
||||
if fmt_obj == "" and result and self.smart_cleanup and literal_text:
|
||||
if self.str_contains(result[-1], "({["):
|
||||
lstrip = True
|
||||
if result:
|
||||
if " " in result[-1]:
|
||||
result[-1], _, _ = result[-1].rstrip().rpartition(" ")
|
||||
result[-1] = result[-1].rstrip("-_({[#")
|
||||
if self.smart_cleanup:
|
||||
# colons and slashes get special treatment
|
||||
fmt_obj = self.handle_replacements(fmt_obj, self.replacements.format_value)
|
||||
fmt_obj = " ".join(fmt_obj.split())
|
||||
fmt_obj = str(sanitize_filename(fmt_obj, platform=self.platform))
|
||||
result.append(fmt_obj)
|
||||
|
||||
return "".join(result), False
|
||||
|
||||
def str_contains(self, chars: str, string: str) -> bool:
|
||||
for char in chars:
|
||||
if char in string:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FileRenamer:
|
||||
def __init__( self, metadata ):
|
||||
self.setMetadata( metadata )
|
||||
self.setTemplate( "%series% v%volume% #%issue% (of %issuecount%) (%year%)" )
|
||||
self.smart_cleanup = True
|
||||
self.issue_zero_padding = 3
|
||||
def __init__(
|
||||
self,
|
||||
metadata: GenericMetadata | None,
|
||||
platform: str = "auto",
|
||||
replacements: Replacements = DEFAULT_REPLACEMENTS,
|
||||
) -> None:
|
||||
self.template = "{publisher}/{series}/{series} v{volume} #{issue} (of {issue_count}) ({year})"
|
||||
self.smart_cleanup = True
|
||||
self.issue_zero_padding = 3
|
||||
self.metadata = metadata or GenericMetadata()
|
||||
self.move = False
|
||||
self.platform = platform
|
||||
self.replacements = replacements
|
||||
self.original_name = ""
|
||||
self.move_only = False
|
||||
|
||||
def setMetadata( self, metadata ):
|
||||
self.metdata = metadata
|
||||
def set_metadata(self, metadata: GenericMetadata, original_name: str) -> None:
|
||||
self.metadata = metadata
|
||||
self.original_name = original_name
|
||||
|
||||
def setIssueZeroPadding( self, count ):
|
||||
self.issue_zero_padding = count
|
||||
def set_issue_zero_padding(self, count: int) -> None:
|
||||
self.issue_zero_padding = count
|
||||
|
||||
def setSmartCleanup( self, on ):
|
||||
self.smart_cleanup = on
|
||||
def set_smart_cleanup(self, on: bool) -> None:
|
||||
self.smart_cleanup = on
|
||||
|
||||
def setTemplate( self, template ):
|
||||
self.template = template
|
||||
|
||||
def replaceToken( self, text, value, token ):
|
||||
#helper func
|
||||
def isToken( word ):
|
||||
return (word[0] == "%" and word[-1:] == "%")
|
||||
def set_template(self, template: str) -> None:
|
||||
self.template = template
|
||||
|
||||
if value is not None:
|
||||
return text.replace( token, unicode(value) )
|
||||
else:
|
||||
if self.smart_cleanup:
|
||||
# smart cleanup means we want to remove anything appended to token if it's empty
|
||||
# (e.g "#%issue%" or "v%volume%" )
|
||||
# (TODO: This could fail if there is more than one token appended together, I guess)
|
||||
text_list = text.split()
|
||||
|
||||
#special case for issuecount, remove preceding non-token word, as in "...(of %issuecount%)..."
|
||||
if token == '%issuecount%':
|
||||
for idx,word in enumerate( text_list ):
|
||||
if token in word and not isToken(text_list[idx -1]) :
|
||||
text_list[idx -1] = ""
|
||||
|
||||
text_list = [ x for x in text_list if token not in x ]
|
||||
return " ".join( text_list )
|
||||
else:
|
||||
return text.replace( token, "" )
|
||||
|
||||
def determineName( self, filename, ext=None ):
|
||||
def determine_name(self, ext: str) -> str:
|
||||
class Default(dict[str, Any]):
|
||||
def __missing__(self, key: str) -> str:
|
||||
return "{" + key + "}"
|
||||
|
||||
md = self.metdata
|
||||
new_name = self.template
|
||||
preferred_encoding = utils.get_actual_preferred_encoding()
|
||||
md = self.metadata
|
||||
|
||||
#print u"{0}".format(md)
|
||||
|
||||
new_name = self.replaceToken( new_name, md.series, '%series%')
|
||||
new_name = self.replaceToken( new_name, md.volume, '%volume%')
|
||||
|
||||
if md.issue is not None:
|
||||
issue_str = u"{0}".format( IssueString(md.issue).asString(pad=self.issue_zero_padding) )
|
||||
else:
|
||||
issue_str = None
|
||||
new_name = self.replaceToken( new_name, issue_str, '%issue%')
|
||||
|
||||
new_name = self.replaceToken( new_name, md.issueCount, '%issuecount%')
|
||||
new_name = self.replaceToken( new_name, md.year, '%year%')
|
||||
new_name = self.replaceToken( new_name, md.publisher, '%publisher%')
|
||||
new_name = self.replaceToken( new_name, md.title, '%title%')
|
||||
new_name = self.replaceToken( new_name, md.month, '%month%')
|
||||
month_name = None
|
||||
if md.month is not None:
|
||||
if (type(md.month) == str and md.month.isdigit()) or type(md.month) == int:
|
||||
if int(md.month) in range(1,13):
|
||||
dt = datetime.datetime( 1970, int(md.month), 1, 0, 0)
|
||||
month_name = dt.strftime(u"%B".encode(preferred_encoding)).decode(preferred_encoding)
|
||||
new_name = self.replaceToken( new_name, month_name, '%month_name%')
|
||||
template = self.template
|
||||
|
||||
new_name = self.replaceToken( new_name, md.genre, '%genre%')
|
||||
new_name = self.replaceToken( new_name, md.language, '%language_code%')
|
||||
new_name = self.replaceToken( new_name, md.criticalRating , '%criticalrating%')
|
||||
new_name = self.replaceToken( new_name, md.alternateSeries, '%alternateseries%')
|
||||
new_name = self.replaceToken( new_name, md.alternateNumber, '%alternatenumber%')
|
||||
new_name = self.replaceToken( new_name, md.alternateCount, '%alternatecount%')
|
||||
new_name = self.replaceToken( new_name, md.imprint, '%imprint%')
|
||||
new_name = self.replaceToken( new_name, md.format, '%format%')
|
||||
new_name = self.replaceToken( new_name, md.maturityRating, '%maturityrating%')
|
||||
new_name = self.replaceToken( new_name, md.storyArc, '%storyarc%')
|
||||
new_name = self.replaceToken( new_name, md.seriesGroup, '%seriesgroup%')
|
||||
new_name = self.replaceToken( new_name, md.scanInfo, '%scaninfo%')
|
||||
|
||||
if self.smart_cleanup:
|
||||
|
||||
# remove empty braces,brackets, parentheses
|
||||
new_name = re.sub("\(\s*[-:]*\s*\)", "", new_name )
|
||||
new_name = re.sub("\[\s*[-:]*\s*\]", "", new_name )
|
||||
new_name = re.sub("\{\s*[-:]*\s*\}", "", new_name )
|
||||
new_name = ""
|
||||
|
||||
# remove duplicate spaces
|
||||
new_name = u" ".join(new_name.split())
|
||||
fmt = MetadataFormatter(self.smart_cleanup, platform=self.platform, replacements=self.replacements)
|
||||
md_dict = vars(md)
|
||||
md_dict.update(
|
||||
dict(
|
||||
month_name=None,
|
||||
month_abbr=None,
|
||||
date=None,
|
||||
genre=None,
|
||||
story_arc=None,
|
||||
series_group=None,
|
||||
web_link=None,
|
||||
character=None,
|
||||
team=None,
|
||||
location=None,
|
||||
)
|
||||
)
|
||||
|
||||
# remove remove duplicate -, _,
|
||||
new_name = re.sub("[-_]{2,}\s+", "-- ", new_name )
|
||||
new_name = re.sub("(\s--)+", " --", new_name )
|
||||
new_name = re.sub("(\s-)+", " -", new_name )
|
||||
|
||||
# remove dash or double dash at end of line
|
||||
new_name = re.sub("[-]{1,2}\s*$", "", new_name )
|
||||
|
||||
# remove duplicate spaces (again!)
|
||||
new_name = u" ".join(new_name.split())
|
||||
md_dict["issue"] = IssueString(md.issue).as_string(pad=self.issue_zero_padding)
|
||||
for role in ["writer", "penciller", "inker", "colorist", "letterer", "cover artist", "editor", "translator"]:
|
||||
md_dict[role] = md.get_primary_credit(role)
|
||||
|
||||
|
||||
if ext is None:
|
||||
ext = os.path.splitext( filename )[1]
|
||||
if (isinstance(md.month, int) or isinstance(md.month, str) and md.month.isdigit()) and 0 < int(md.month) < 13:
|
||||
md_dict["month_name"] = calendar.month_name[int(md.month)]
|
||||
md_dict["month_abbr"] = calendar.month_abbr[int(md.month)]
|
||||
|
||||
new_name += ext
|
||||
|
||||
# some tweaks to keep various filesystems happy
|
||||
new_name = new_name.replace("/", "-")
|
||||
new_name = new_name.replace(" :", " -")
|
||||
new_name = new_name.replace(": ", " - ")
|
||||
new_name = new_name.replace(":", "-")
|
||||
new_name = new_name.replace("?", "")
|
||||
|
||||
return new_name
|
||||
|
||||
|
||||
if md.year is not None and datetime.MINYEAR <= md.year <= datetime.MAXYEAR:
|
||||
md_dict["date"] = datetime.datetime(year=md.year, month=md.month or 1, day=md.day or 1)
|
||||
|
||||
if md.genres:
|
||||
md_dict["genre"] = sorted(md.genres)[0]
|
||||
if md.story_arcs:
|
||||
md_dict["story_arc"] = md.story_arcs[0]
|
||||
if md.series_groups:
|
||||
md_dict["series_group"] = md.series_groups[0]
|
||||
if md.web_links:
|
||||
md_dict["web_link"] = md.web_links[0]
|
||||
if md.characters:
|
||||
md_dict["character"] = sorted(md.characters)[0]
|
||||
if md.teams:
|
||||
md_dict["team"] = sorted(md.teams)[0]
|
||||
if md.locations:
|
||||
md_dict["location"] = sorted(md.locations)[0]
|
||||
|
||||
new_basename = ""
|
||||
for component in pathlib.PureWindowsPath(template).parts:
|
||||
new_basename = str(
|
||||
sanitize_filename(fmt.vformat(component, args=[], kwargs=Default(md_dict)), platform=self.platform)
|
||||
).strip()
|
||||
new_name = os.path.join(new_name, new_basename)
|
||||
|
||||
if self.move_only:
|
||||
new_folder = os.path.join(new_name, os.path.splitext(self.original_name)[0])
|
||||
return new_folder + ext
|
||||
if self.move:
|
||||
return new_name.strip() + ext
|
||||
return new_basename.strip() + ext
|
||||
|
||||
@@ -1,406 +1,418 @@
|
||||
# coding=utf-8
|
||||
"""
|
||||
A PyQt4 widget for managing list of comic archive files
|
||||
"""
|
||||
"""A PyQt6 widget for managing list of comic archive files"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import pathlib
|
||||
import platform
|
||||
from typing import Callable, cast
|
||||
|
||||
from PyQt4.QtCore import *
|
||||
from PyQt4.QtGui import *
|
||||
from PyQt4 import uic
|
||||
from PyQt4.QtCore import pyqtSignal
|
||||
from PyQt6 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
from settings import ComicTaggerSettings
|
||||
from comicarchive import ComicArchive
|
||||
from comicarchive import MetaDataStyle
|
||||
from genericmetadata import GenericMetadata, PageType
|
||||
import utils
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictaggerlib.optionalmsgdialog import OptionalMessageDialog
|
||||
from comictaggerlib.settingswindow import linuxRarHelp, macRarHelp, windowsRarHelp
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import center_window_on_parent
|
||||
|
||||
class FileTableWidget( QTableWidget ):
|
||||
|
||||
def __init__(self, parent ):
|
||||
super(FileTableWidget, self).__init__(parent)
|
||||
|
||||
|
||||
self.setColumnCount(5)
|
||||
self.setHorizontalHeaderLabels (["File", "Folder", "CR", "CBL", ""])
|
||||
self.horizontalHeader().setStretchLastSection( True )
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileTableWidgetItem(QTableWidgetItem):
|
||||
def __lt__(self, other):
|
||||
return (self.data(Qt.UserRole).toBool() <
|
||||
other.data(Qt.UserRole).toBool())
|
||||
class FileSelectionList(QtWidgets.QWidget):
|
||||
selectionChanged = QtCore.pyqtSignal(QtCore.QVariant)
|
||||
listCleared = QtCore.pyqtSignal()
|
||||
|
||||
fileColNum = 0
|
||||
MDFlagColNum = 1
|
||||
typeColNum = 2
|
||||
readonlyColNum = 3
|
||||
folderColNum = 4
|
||||
dataColNum = fileColNum
|
||||
|
||||
class FileInfo( ):
|
||||
def __init__(self, ca ):
|
||||
self.ca = ca
|
||||
def __init__(
|
||||
self, parent: QtWidgets.QWidget, config: ct_ns, dirty_flag_verification: Callable[[str, str], bool]
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
class FileSelectionList(QWidget):
|
||||
with (ui_path / "fileselectionlist.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
selectionChanged = pyqtSignal(QVariant)
|
||||
listCleared = pyqtSignal()
|
||||
|
||||
fileColNum = 0
|
||||
CRFlagColNum = 1
|
||||
CBLFlagColNum = 2
|
||||
typeColNum = 3
|
||||
readonlyColNum = 4
|
||||
folderColNum = 5
|
||||
dataColNum = fileColNum
|
||||
|
||||
self.config = config
|
||||
|
||||
def __init__(self, parent , settings ):
|
||||
super(FileSelectionList, self).__init__(parent)
|
||||
self.twList.horizontalHeader().setMinimumSectionSize(50)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('fileselectionlist.ui' ), self)
|
||||
|
||||
self.settings = settings
|
||||
self.currentItem = None
|
||||
self.setContextMenuPolicy(QtCore.Qt.ContextMenuPolicy.ActionsContextMenu)
|
||||
self.dirty_flag = False
|
||||
|
||||
utils.reduceWidgetFontSize( self.twList )
|
||||
|
||||
self.twList.currentItemChanged.connect( self.currentItemChangedCB )
|
||||
|
||||
self.currentItem = None
|
||||
self.setContextMenuPolicy(Qt.ActionsContextMenu)
|
||||
self.modifiedFlag = False
|
||||
|
||||
selectAllAction = QAction("Select All", self)
|
||||
removeAction = QAction("Remove Selected Items", self)
|
||||
self.separator = QAction("",self)
|
||||
self.separator.setSeparator(True)
|
||||
|
||||
selectAllAction.setShortcut( 'Ctrl+A' )
|
||||
removeAction.setShortcut( 'Ctrl+X' )
|
||||
|
||||
selectAllAction.triggered.connect(self.selectAll)
|
||||
removeAction.triggered.connect(self.removeSelection)
|
||||
select_all_action = QtGui.QAction("Select All", self)
|
||||
remove_action = QtGui.QAction("Remove Selected Items", self)
|
||||
self.separator = QtGui.QAction("", self)
|
||||
self.separator.setSeparator(True)
|
||||
|
||||
self.addAction(selectAllAction)
|
||||
self.addAction(removeAction)
|
||||
self.addAction(self.separator)
|
||||
select_all_action.setShortcut("Ctrl+A")
|
||||
remove_action.setShortcut("Backspace" if platform.system() == "Darwin" else "Delete")
|
||||
|
||||
def getSorting(self):
|
||||
col = self.twList.horizontalHeader().sortIndicatorSection()
|
||||
order = self.twList.horizontalHeader().sortIndicatorOrder()
|
||||
return col, order
|
||||
select_all_action.triggered.connect(self.select_all)
|
||||
remove_action.triggered.connect(self.remove_selection)
|
||||
|
||||
def setSorting(self, col, order):
|
||||
col = self.twList.horizontalHeader().setSortIndicator( col, order)
|
||||
self.addAction(select_all_action)
|
||||
self.addAction(remove_action)
|
||||
self.addAction(self.separator)
|
||||
|
||||
def addAppAction( self, action ):
|
||||
self.insertAction( None , action )
|
||||
|
||||
def setModifiedFlag( self, modified ):
|
||||
self.modifiedFlag = modified
|
||||
|
||||
def selectAll( self ):
|
||||
self.twList.setRangeSelected( QTableWidgetSelectionRange ( 0, 0, self.twList.rowCount()-1, 5 ), True )
|
||||
self.loaded_paths: set[pathlib.Path] = set()
|
||||
|
||||
def deselectAll( self ):
|
||||
self.twList.setRangeSelected( QTableWidgetSelectionRange ( 0, 0, self.twList.rowCount()-1, 5 ), False )
|
||||
self.dirty_flag_verification = dirty_flag_verification
|
||||
self.rar_ro_shown = False
|
||||
|
||||
def removeArchiveList( self, ca_list ):
|
||||
self.twList.setSortingEnabled(False)
|
||||
for ca in ca_list:
|
||||
for row in range(self.twList.rowCount()):
|
||||
row_ca = self.getArchiveByRow( row )
|
||||
if row_ca == ca:
|
||||
self.twList.removeRow(row)
|
||||
break
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
def getArchiveByRow( self, row):
|
||||
fi = self.twList.item(row, FileSelectionList.dataColNum).data( Qt.UserRole ).toPyObject()
|
||||
return fi.ca
|
||||
|
||||
def getCurrentArchive( self ):
|
||||
return self.getArchiveByRow( self.twList.currentRow() )
|
||||
|
||||
def removeSelection( self ):
|
||||
row_list = []
|
||||
for item in self.twList.selectedItems():
|
||||
if item.column() == 0:
|
||||
row_list.append(item.row())
|
||||
def get_sorting(self) -> tuple[int, int]:
|
||||
col = self.twList.horizontalHeader().sortIndicatorSection()
|
||||
order = self.twList.horizontalHeader().sortIndicatorOrder().value
|
||||
return int(col), int(order)
|
||||
|
||||
if len(row_list) == 0:
|
||||
return
|
||||
|
||||
if self.twList.currentRow() in row_list:
|
||||
if not self.modifiedFlagVerification( "Remove Archive",
|
||||
"If you close this archive, data in the form will be lost. Are you sure?"):
|
||||
return
|
||||
|
||||
row_list.sort()
|
||||
row_list.reverse()
|
||||
def set_sorting(self, col: int, order: QtCore.Qt.SortOrder) -> None:
|
||||
self.twList.horizontalHeader().setSortIndicator(col, order)
|
||||
|
||||
self.twList.currentItemChanged.disconnect( self.currentItemChangedCB )
|
||||
self.twList.setSortingEnabled(False)
|
||||
def add_app_action(self, action: QtGui.QAction) -> None:
|
||||
self.insertAction(QtGui.QAction(), action)
|
||||
|
||||
for i in row_list:
|
||||
self.twList.removeRow(i)
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.currentItemChanged.connect( self.currentItemChangedCB )
|
||||
|
||||
if self.twList.rowCount() > 0:
|
||||
# since on a removal, we select row 0, make sure callback occurs if we're already there
|
||||
if self.twList.currentRow() == 0:
|
||||
self.currentItemChangedCB( self.twList.currentItem(), None)
|
||||
self.twList.selectRow(0)
|
||||
else:
|
||||
self.listCleared.emit()
|
||||
|
||||
def addPathList( self, pathlist ):
|
||||
|
||||
filelist = utils.get_recursive_filelist( pathlist )
|
||||
|
||||
# we now have a list of files to add
|
||||
def set_modified_flag(self, modified: bool) -> None:
|
||||
self.dirty_flag = modified
|
||||
|
||||
progdialog = QProgressDialog("", "Cancel", 0, len(filelist), self)
|
||||
progdialog.setWindowTitle( "Adding Files" )
|
||||
#progdialog.setWindowModality(Qt.WindowModal)
|
||||
progdialog.setWindowModality(Qt.ApplicationModal)
|
||||
progdialog.show()
|
||||
|
||||
firstAdded = None
|
||||
self.twList.setSortingEnabled(False)
|
||||
for idx,f in enumerate(filelist):
|
||||
QCoreApplication.processEvents()
|
||||
if progdialog.wasCanceled():
|
||||
break
|
||||
progdialog.setValue(idx)
|
||||
progdialog.setLabelText(f)
|
||||
utils.centerWindowOnParent( progdialog )
|
||||
QCoreApplication.processEvents()
|
||||
row = self.addPathItem( f )
|
||||
if firstAdded is None and row is not None:
|
||||
firstAdded = row
|
||||
|
||||
progdialog.close()
|
||||
if firstAdded is not None:
|
||||
self.twList.selectRow(firstAdded)
|
||||
else:
|
||||
if len(pathlist) == 1 and os.path.isfile(pathlist[0]):
|
||||
QMessageBox.information(self, self.tr("File Open"), self.tr("Selected file doesn't seem to be a comic archive."))
|
||||
else:
|
||||
QMessageBox.information(self, self.tr("File/Folder Open"), self.tr("No comic archives were found."))
|
||||
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
# Adjust column size
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setColumnWidth(FileSelectionList.CRFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.CBLFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.readonlyColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.typeColNum, 45)
|
||||
if self.twList.columnWidth(FileSelectionList.fileColNum) > 250:
|
||||
self.twList.setColumnWidth(FileSelectionList.fileColNum, 250)
|
||||
if self.twList.columnWidth(FileSelectionList.folderColNum ) > 200:
|
||||
self.twList.setColumnWidth(FileSelectionList.folderColNum, 200)
|
||||
def select_all(self) -> None:
|
||||
self.twList.setRangeSelected(
|
||||
QtWidgets.QTableWidgetSelectionRange(0, 0, self.twList.rowCount() - 1, self.twList.columnCount() - 1), True
|
||||
)
|
||||
|
||||
def isListDupe( self, path ):
|
||||
r = 0
|
||||
while r < self.twList.rowCount():
|
||||
ca = self.getArchiveByRow( r )
|
||||
if ca.path == path:
|
||||
return True
|
||||
r = r + 1
|
||||
|
||||
return False
|
||||
|
||||
def addPathItem( self, path):
|
||||
path = unicode( path )
|
||||
path = os.path.abspath( path )
|
||||
#print "processing", path
|
||||
|
||||
if self.isListDupe(path):
|
||||
return None
|
||||
|
||||
ca = ComicArchive( path, self.settings.rar_exe_path )
|
||||
|
||||
if ca.seemsToBeAComicArchive() :
|
||||
row = self.twList.rowCount()
|
||||
self.twList.insertRow( row )
|
||||
|
||||
fi = FileInfo( ca )
|
||||
|
||||
filename_item = QTableWidgetItem()
|
||||
folder_item = QTableWidgetItem()
|
||||
cix_item = FileTableWidgetItem()
|
||||
cbi_item = FileTableWidgetItem()
|
||||
readonly_item = FileTableWidgetItem()
|
||||
type_item = QTableWidgetItem()
|
||||
|
||||
filename_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
filename_item.setData( Qt.UserRole , fi )
|
||||
self.twList.setItem(row, FileSelectionList.fileColNum, filename_item)
|
||||
|
||||
folder_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.folderColNum, folder_item)
|
||||
def deselect_all(self) -> None:
|
||||
self.twList.setRangeSelected(
|
||||
QtWidgets.QTableWidgetSelectionRange(0, 0, self.twList.rowCount() - 1, self.twList.columnCount() - 1), False
|
||||
)
|
||||
|
||||
type_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.typeColNum, type_item)
|
||||
def remove_archive_list(self, ca_list: list[ComicArchive]) -> None:
|
||||
self.twList.setSortingEnabled(False)
|
||||
current_removed = False
|
||||
for ca in ca_list:
|
||||
for row in range(self.twList.rowCount()):
|
||||
row_ca = self.get_archive_by_row(row)
|
||||
if row_ca == ca:
|
||||
if row == self.twList.currentRow():
|
||||
current_removed = True
|
||||
self.twList.removeRow(row)
|
||||
self.loaded_paths -= {ca.path}
|
||||
break
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
cix_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
cix_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.CRFlagColNum, cix_item)
|
||||
if self.twList.rowCount() > 0 and current_removed:
|
||||
# since on a removal, we select row 0, make sure callback occurs if
|
||||
# we're already there
|
||||
if self.twList.currentRow() == 0:
|
||||
self.current_item_changed_cb(self.twList.currentItem(), None)
|
||||
self.twList.selectRow(0)
|
||||
elif self.twList.rowCount() <= 0:
|
||||
self.listCleared.emit()
|
||||
|
||||
cbi_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
cbi_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.CBLFlagColNum, cbi_item)
|
||||
def get_archive_by_row(self, row: int) -> ComicArchive | None:
|
||||
if row >= 0:
|
||||
ca: ComicArchive = self.twList.item(row, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
return ca
|
||||
return None
|
||||
|
||||
readonly_item.setFlags(Qt.ItemIsSelectable| Qt.ItemIsEnabled)
|
||||
readonly_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.readonlyColNum, readonly_item)
|
||||
|
||||
self.updateRow( row )
|
||||
|
||||
return row
|
||||
def get_current_archive(self) -> ComicArchive | None:
|
||||
return self.get_archive_by_row(self.twList.currentRow())
|
||||
|
||||
def updateRow( self, row ):
|
||||
fi = self.twList.item( row, FileSelectionList.dataColNum ).data( Qt.UserRole ).toPyObject()
|
||||
def remove_selection(self) -> None:
|
||||
row_list = []
|
||||
for item in self.twList.selectedItems():
|
||||
if item.column() == 0:
|
||||
row_list.append(item.row())
|
||||
|
||||
filename_item = self.twList.item( row, FileSelectionList.fileColNum )
|
||||
folder_item = self.twList.item( row, FileSelectionList.folderColNum )
|
||||
cix_item = self.twList.item( row, FileSelectionList.CRFlagColNum )
|
||||
cbi_item = self.twList.item( row, FileSelectionList.CBLFlagColNum )
|
||||
type_item = self.twList.item( row, FileSelectionList.typeColNum )
|
||||
readonly_item = self.twList.item( row, FileSelectionList.readonlyColNum )
|
||||
if len(row_list) == 0:
|
||||
return
|
||||
|
||||
item_text = os.path.split(fi.ca.path)[0]
|
||||
folder_item.setText( item_text )
|
||||
folder_item.setData( Qt.ToolTipRole, item_text )
|
||||
if self.twList.currentRow() in row_list:
|
||||
if not self.dirty_flag_verification(
|
||||
"Remove Archive", "If you close this archive, data in the form will be lost. Are you sure?"
|
||||
):
|
||||
return
|
||||
|
||||
item_text = os.path.split(fi.ca.path)[1]
|
||||
filename_item.setText( item_text )
|
||||
filename_item.setData( Qt.ToolTipRole, item_text )
|
||||
row_list.sort()
|
||||
row_list.reverse()
|
||||
|
||||
if fi.ca.isZip():
|
||||
item_text = "ZIP"
|
||||
elif fi.ca.isRar():
|
||||
item_text = "RAR"
|
||||
else:
|
||||
item_text = ""
|
||||
type_item.setText( item_text )
|
||||
type_item.setData( Qt.ToolTipRole, item_text )
|
||||
self.twList.currentItemChanged.disconnect(self.current_item_changed_cb)
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
for i in row_list:
|
||||
self.loaded_paths -= {self.get_archive_by_row(i).path} # type: ignore[union-attr]
|
||||
self.twList.removeRow(i)
|
||||
|
||||
if fi.ca.hasCIX():
|
||||
cix_item.setCheckState(Qt.Checked)
|
||||
cix_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
cix_item.setData(Qt.UserRole, False)
|
||||
cix_item.setCheckState(Qt.Unchecked)
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
|
||||
if fi.ca.hasCBI():
|
||||
cbi_item.setCheckState(Qt.Checked)
|
||||
cbi_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
cbi_item.setData(Qt.UserRole, False)
|
||||
cbi_item.setCheckState(Qt.Unchecked)
|
||||
if self.twList.rowCount() > 0:
|
||||
# since on a removal, we select row 0, make sure callback occurs if
|
||||
# we're already there
|
||||
if self.twList.currentRow() == 0:
|
||||
self.current_item_changed_cb(self.twList.currentItem(), None)
|
||||
self.twList.selectRow(0)
|
||||
else:
|
||||
self.listCleared.emit()
|
||||
|
||||
if not fi.ca.isWritable():
|
||||
readonly_item.setCheckState(Qt.Checked)
|
||||
readonly_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
readonly_item.setData(Qt.UserRole, False)
|
||||
readonly_item.setCheckState(Qt.Unchecked)
|
||||
def add_path_list(self, pathlist: list[str]) -> None:
|
||||
if not pathlist:
|
||||
return
|
||||
filelist = utils.get_recursive_filelist(pathlist)
|
||||
# we now have a list of files to add
|
||||
|
||||
|
||||
# Reading these will force them into the ComicArchive's cache
|
||||
fi.ca.readCIX()
|
||||
fi.ca.hasCBI()
|
||||
progdialog = None
|
||||
if len(filelist) < 3:
|
||||
# Prog dialog on Linux flakes out for small range, so scale up
|
||||
progdialog = QtWidgets.QProgressDialog("", "Cancel", 0, len(filelist), parent=self)
|
||||
progdialog.setWindowTitle("Adding Files")
|
||||
progdialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
progdialog.setMinimumDuration(300)
|
||||
progdialog.show()
|
||||
center_window_on_parent(progdialog)
|
||||
|
||||
def getSelectedArchiveList( self ):
|
||||
ca_list = []
|
||||
for r in range( self.twList.rowCount() ):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if self.twList.isItemSelected(item):
|
||||
fi = item.data( Qt.UserRole ).toPyObject()
|
||||
ca_list.append(fi.ca)
|
||||
first_added = None
|
||||
rar_added_ro = False
|
||||
self.twList.setSortingEnabled(False)
|
||||
for idx, f in enumerate(filelist):
|
||||
if idx % 10 == 0:
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
if progdialog is not None:
|
||||
if progdialog.wasCanceled():
|
||||
break
|
||||
progdialog.setValue(idx + 1)
|
||||
progdialog.setLabelText(f)
|
||||
|
||||
return ca_list
|
||||
|
||||
def updateCurrentRow( self ):
|
||||
self.updateRow( self.twList.currentRow() )
|
||||
row, ca = self.add_path_item(f)
|
||||
if row is not None:
|
||||
rar_added_ro = bool(ca and ca.archiver.name() == "RAR" and not ca.archiver.is_writable())
|
||||
if first_added is None and row != -1:
|
||||
first_added = row
|
||||
|
||||
def updateSelectedRows( self ):
|
||||
self.twList.setSortingEnabled(False)
|
||||
for r in range( self.twList.rowCount() ):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if self.twList.isItemSelected(item):
|
||||
self.updateRow( r )
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
def currentItemChangedCB( self, curr, prev ):
|
||||
if progdialog is not None:
|
||||
progdialog.hide()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
new_idx = curr.row()
|
||||
old_idx = -1
|
||||
if prev is not None:
|
||||
old_idx = prev.row()
|
||||
#print "old {0} new {1}".format(old_idx, new_idx)
|
||||
|
||||
if old_idx == new_idx:
|
||||
return
|
||||
|
||||
# don't allow change if modified
|
||||
if prev is not None and new_idx != old_idx:
|
||||
if not self.modifiedFlagVerification( "Change Archive",
|
||||
"If you change archives now, data in the form will be lost. Are you sure?"):
|
||||
self.twList.currentItemChanged.disconnect( self.currentItemChangedCB )
|
||||
self.twList.setCurrentItem( prev )
|
||||
self.twList.currentItemChanged.connect( self.currentItemChangedCB )
|
||||
# Need to defer this revert selection, for some reason
|
||||
QTimer.singleShot(1, self.revertSelection)
|
||||
return
|
||||
if first_added is not None:
|
||||
self.twList.selectRow(first_added)
|
||||
else:
|
||||
if len(pathlist) == 1 and os.path.isfile(pathlist[0]):
|
||||
QtWidgets.QMessageBox.information(
|
||||
self, "File Open", "Selected file doesn't seem to be a comic archive."
|
||||
)
|
||||
else:
|
||||
QtWidgets.QMessageBox.information(self, "File/Folder Open", "No readable comic archives were found.")
|
||||
|
||||
fi = self.twList.item( new_idx, FileSelectionList.dataColNum ).data( Qt.UserRole ).toPyObject()
|
||||
self.selectionChanged.emit( QVariant(fi))
|
||||
|
||||
def revertSelection( self ):
|
||||
self.twList.selectRow( self.twList.currentRow() )
|
||||
|
||||
|
||||
def modifiedFlagVerification( self, title, desc):
|
||||
if self.modifiedFlag:
|
||||
reply = QMessageBox.question(self,
|
||||
self.tr(title),
|
||||
self.tr(desc),
|
||||
QMessageBox.Yes, QMessageBox.No )
|
||||
|
||||
if reply != QMessageBox.Yes:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Attempt to use a special checkbox widget in the cell.
|
||||
# Couldn't figure out how to disable it with "enabled" colors
|
||||
#w = QWidget()
|
||||
#cb = QCheckBox(w)
|
||||
#cb.setCheckState(Qt.Checked)
|
||||
#layout = QHBoxLayout()
|
||||
#layout.addWidget( cb )
|
||||
#layout.setAlignment(Qt.AlignHCenter)
|
||||
#layout.setMargin(2)
|
||||
#w.setLayout(layout)
|
||||
#self.twList.setCellWidget( row, 2, w )
|
||||
if rar_added_ro:
|
||||
self.rar_ro_message()
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
# Adjust column size
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setColumnWidth(FileSelectionList.MDFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.readonlyColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.typeColNum, 45)
|
||||
if self.twList.columnWidth(FileSelectionList.fileColNum) > 250:
|
||||
self.twList.setColumnWidth(FileSelectionList.fileColNum, 250)
|
||||
if self.twList.columnWidth(FileSelectionList.folderColNum) > 200:
|
||||
self.twList.setColumnWidth(FileSelectionList.folderColNum, 200)
|
||||
|
||||
def rar_ro_message(self) -> None:
|
||||
if not self.rar_ro_shown:
|
||||
if platform.system() == "Windows":
|
||||
rar_help = windowsRarHelp
|
||||
|
||||
elif platform.system() == "Darwin":
|
||||
rar_help = macRarHelp
|
||||
|
||||
else:
|
||||
rar_help = linuxRarHelp
|
||||
|
||||
OptionalMessageDialog.msg_no_checkbox(
|
||||
self,
|
||||
"RAR Files are Read-Only",
|
||||
"It looks like you have opened a RAR/CBR archive,\n"
|
||||
"however ComicTagger cannot write to them without the rar program and are marked read only!\n\n"
|
||||
f"{rar_help}",
|
||||
)
|
||||
self.rar_ro_shown = True
|
||||
|
||||
def get_current_list_row(self, path: str) -> tuple[int, ComicArchive]:
|
||||
pl = pathlib.Path(path)
|
||||
if pl not in self.loaded_paths:
|
||||
return -1, None # type: ignore[return-value]
|
||||
|
||||
for r in range(self.twList.rowCount()):
|
||||
ca = cast(ComicArchive, self.get_archive_by_row(r))
|
||||
if ca.path == pl:
|
||||
return r, ca
|
||||
|
||||
return -1, None # type: ignore[return-value]
|
||||
|
||||
def add_path_item(self, path: str) -> tuple[int, ComicArchive]:
|
||||
path = str(path)
|
||||
path = os.path.abspath(path)
|
||||
|
||||
current_row, ca = self.get_current_list_row(path)
|
||||
if current_row >= 0:
|
||||
return current_row, ca
|
||||
|
||||
ca = ComicArchive(
|
||||
path, str(graphics_path / "nocover.png"), hash_archive=self.config.Runtime_Options__preferred_hash
|
||||
)
|
||||
|
||||
if ca.seems_to_be_a_comic_archive():
|
||||
self.loaded_paths.add(ca.path)
|
||||
row: int = self.twList.rowCount()
|
||||
self.twList.insertRow(row)
|
||||
|
||||
filename_item = QtWidgets.QTableWidgetItem()
|
||||
folder_item = QtWidgets.QTableWidgetItem()
|
||||
md_item = QtWidgets.QTableWidgetItem()
|
||||
readonly_item = QtWidgets.QTableWidgetItem()
|
||||
type_item = QtWidgets.QTableWidgetItem()
|
||||
|
||||
item_text = os.path.split(ca.path)[1]
|
||||
|
||||
filename_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
filename_item.setData(QtCore.Qt.ItemDataRole.UserRole, ca)
|
||||
filename_item.setText(item_text)
|
||||
filename_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
self.twList.setItem(row, FileSelectionList.fileColNum, filename_item)
|
||||
|
||||
item_text = os.path.split(ca.path)[0]
|
||||
|
||||
folder_item.setText(item_text)
|
||||
folder_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
folder_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.folderColNum, folder_item)
|
||||
|
||||
type_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.typeColNum, type_item)
|
||||
|
||||
md_item.setText(", ".join(x for x in ca.get_supported_tags() if ca.has_tags(x)))
|
||||
md_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
md_item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.MDFlagColNum, md_item)
|
||||
|
||||
if not ca.is_writable():
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Checked)
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, True)
|
||||
readonly_item.setText(" ")
|
||||
else:
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, False)
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Unchecked)
|
||||
# This is a nbsp it sorts after a space ' '
|
||||
readonly_item.setText("\xa0")
|
||||
readonly_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
readonly_item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.readonlyColNum, readonly_item)
|
||||
|
||||
return row, ca
|
||||
return -1, None # type: ignore[return-value]
|
||||
|
||||
def update_row(self, row: int) -> None:
|
||||
if row >= 0:
|
||||
ca: ComicArchive = self.twList.item(row, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
|
||||
filename_item = self.twList.item(row, FileSelectionList.fileColNum)
|
||||
folder_item = self.twList.item(row, FileSelectionList.folderColNum)
|
||||
md_item = self.twList.item(row, FileSelectionList.MDFlagColNum)
|
||||
type_item = self.twList.item(row, FileSelectionList.typeColNum)
|
||||
readonly_item = self.twList.item(row, FileSelectionList.readonlyColNum)
|
||||
|
||||
item_text = os.path.split(ca.path)[1]
|
||||
filename_item.setText(item_text)
|
||||
filename_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
item_text = os.path.split(ca.path)[0]
|
||||
folder_item.setText(item_text)
|
||||
folder_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
item_text = ca.archiver.name()
|
||||
type_item.setText(item_text)
|
||||
type_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
md_item.setText(", ".join(x for x in ca.get_supported_tags() if ca.has_tags(x)))
|
||||
|
||||
if not ca.is_writable():
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Checked)
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, True)
|
||||
readonly_item.setText(" ")
|
||||
else:
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, False)
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Unchecked)
|
||||
# This is a nbsp it sorts after a space ' '
|
||||
readonly_item.setText("\xa0")
|
||||
|
||||
def get_selected_archive_list(self) -> list[ComicArchive]:
|
||||
ca_list: list[ComicArchive] = []
|
||||
for r in range(self.twList.rowCount()):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if item.isSelected():
|
||||
ca: ComicArchive = item.data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
ca_list.append(ca)
|
||||
|
||||
return ca_list
|
||||
|
||||
def update_current_row(self) -> None:
|
||||
self.update_row(self.twList.currentRow())
|
||||
|
||||
def update_selected_rows(self) -> None:
|
||||
self.twList.setSortingEnabled(False)
|
||||
for r in range(self.twList.rowCount()):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if item.isSelected():
|
||||
self.update_row(r)
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
def current_item_changed_cb(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
|
||||
if curr is not None:
|
||||
new_idx = curr.row()
|
||||
old_idx = -1
|
||||
if prev is not None:
|
||||
old_idx = prev.row()
|
||||
|
||||
if old_idx == new_idx:
|
||||
return
|
||||
|
||||
# don't allow change if modified
|
||||
if prev is not None and new_idx != old_idx:
|
||||
if not self.dirty_flag_verification(
|
||||
"Change Archive", "If you change archives now, data in the form will be lost. Are you sure?"
|
||||
):
|
||||
self.twList.currentItemChanged.disconnect(self.current_item_changed_cb)
|
||||
self.twList.setCurrentItem(prev)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
# Need to defer this revert selection, for some reason
|
||||
QtCore.QTimer.singleShot(1, self.revert_selection)
|
||||
return
|
||||
|
||||
fi = self.twList.item(new_idx, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
self.selectionChanged.emit(QtCore.QVariant(fi))
|
||||
|
||||
def revert_selection(self) -> None:
|
||||
self.twList.selectRow(self.twList.currentRow())
|
||||
|
||||
@@ -1,316 +0,0 @@
|
||||
"""
|
||||
A python class for internal metadata storage
|
||||
|
||||
The goal of this class is to handle ALL the data that might come from various
|
||||
tagging schemes and databases, such as ComicVine or GCD. This makes conversion
|
||||
possible, however lossy it might be
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
Copyright 2012-2014 Anthony Beville
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import utils
|
||||
|
||||
# These page info classes are exactly the same as the CIX scheme, since it's unique
|
||||
class PageType:
|
||||
FrontCover = "FrontCover"
|
||||
InnerCover = "InnerCover"
|
||||
Roundup = "Roundup"
|
||||
Story = "Story"
|
||||
Advertisement = "Advertisement"
|
||||
Editorial = "Editorial"
|
||||
Letters = "Letters"
|
||||
Preview = "Preview"
|
||||
BackCover = "BackCover"
|
||||
Other = "Other"
|
||||
Deleted = "Deleted"
|
||||
|
||||
"""
|
||||
class PageInfo:
|
||||
Image = 0
|
||||
Type = PageType.Story
|
||||
DoublePage = False
|
||||
ImageSize = 0
|
||||
Key = ""
|
||||
ImageWidth = 0
|
||||
ImageHeight = 0
|
||||
"""
|
||||
|
||||
class GenericMetadata:
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.isEmpty = True
|
||||
self.tagOrigin = None
|
||||
|
||||
self.series = None
|
||||
self.issue = None
|
||||
self.title = None
|
||||
self.publisher = None
|
||||
self.month = None
|
||||
self.year = None
|
||||
self.day = None
|
||||
self.issueCount = None
|
||||
self.volume = None
|
||||
self.genre = None
|
||||
self.language = None # 2 letter iso code
|
||||
self.comments = None # use same way as Summary in CIX
|
||||
|
||||
self.volumeCount = None
|
||||
self.criticalRating = None
|
||||
self.country = None
|
||||
|
||||
self.alternateSeries = None
|
||||
self.alternateNumber = None
|
||||
self.alternateCount = None
|
||||
self.imprint = None
|
||||
self.notes = None
|
||||
self.webLink = None
|
||||
self.format = None
|
||||
self.manga = None
|
||||
self.blackAndWhite = None
|
||||
self.pageCount = None
|
||||
self.maturityRating = None
|
||||
|
||||
self.storyArc = None
|
||||
self.seriesGroup = None
|
||||
self.scanInfo = None
|
||||
|
||||
self.characters = None
|
||||
self.teams = None
|
||||
self.locations = None
|
||||
|
||||
self.credits = list()
|
||||
self.tags = list()
|
||||
self.pages = list()
|
||||
|
||||
# Some CoMet-only items
|
||||
self.price = None
|
||||
self.isVersionOf = None
|
||||
self.rights = None
|
||||
self.identifier = None
|
||||
self.lastMark = None
|
||||
self.coverImage = None
|
||||
|
||||
def overlay( self, new_md ):
|
||||
# Overlay a metadata object on this one
|
||||
# that is, when the new object has non-None
|
||||
# values, over-write them to this one
|
||||
|
||||
def assign( cur, new ):
|
||||
if new is not None:
|
||||
if type(new) == str and len(new) == 0:
|
||||
setattr(self, cur, None)
|
||||
else:
|
||||
setattr(self, cur, new)
|
||||
|
||||
if not new_md.isEmpty:
|
||||
self.isEmpty = False
|
||||
|
||||
assign( 'series', new_md.series )
|
||||
assign( "issue", new_md.issue )
|
||||
assign( "issueCount", new_md.issueCount )
|
||||
assign( "title", new_md.title )
|
||||
assign( "publisher", new_md.publisher )
|
||||
assign( "day", new_md.day )
|
||||
assign( "month", new_md.month )
|
||||
assign( "year", new_md.year )
|
||||
assign( "volume", new_md.volume )
|
||||
assign( "volumeCount", new_md.volumeCount )
|
||||
assign( "genre", new_md.genre )
|
||||
assign( "language", new_md.language )
|
||||
assign( "country", new_md.country )
|
||||
assign( "criticalRating", new_md.criticalRating )
|
||||
assign( "alternateSeries", new_md.alternateSeries )
|
||||
assign( "alternateNumber", new_md.alternateNumber )
|
||||
assign( "alternateCount", new_md.alternateCount )
|
||||
assign( "imprint", new_md.imprint )
|
||||
assign( "webLink", new_md.webLink )
|
||||
assign( "format", new_md.format )
|
||||
assign( "manga", new_md.manga )
|
||||
assign( "blackAndWhite", new_md.blackAndWhite )
|
||||
assign( "maturityRating", new_md.maturityRating )
|
||||
assign( "storyArc", new_md.storyArc )
|
||||
assign( "seriesGroup", new_md.seriesGroup )
|
||||
assign( "scanInfo", new_md.scanInfo )
|
||||
assign( "characters", new_md.characters )
|
||||
assign( "teams", new_md.teams )
|
||||
assign( "locations", new_md.locations )
|
||||
assign( "comments", new_md.comments )
|
||||
assign( "notes", new_md.notes )
|
||||
|
||||
assign( "price", new_md.price )
|
||||
assign( "isVersionOf", new_md.isVersionOf )
|
||||
assign( "rights", new_md.rights )
|
||||
assign( "identifier", new_md.identifier )
|
||||
assign( "lastMark", new_md.lastMark )
|
||||
|
||||
self.overlayCredits( new_md.credits )
|
||||
# TODO
|
||||
|
||||
# not sure if the tags and pages should broken down, or treated
|
||||
# as whole lists....
|
||||
|
||||
# For now, go the easy route, where any overlay
|
||||
# value wipes out the whole list
|
||||
if len(new_md.tags) > 0:
|
||||
assign( "tags", new_md.tags )
|
||||
|
||||
if len(new_md.pages) > 0:
|
||||
assign( "pages", new_md.pages )
|
||||
|
||||
|
||||
def overlayCredits( self, new_credits ):
|
||||
for c in new_credits:
|
||||
if c.has_key('primary') and c['primary']:
|
||||
primary = True
|
||||
else:
|
||||
primary = False
|
||||
|
||||
# Remove credit role if person is blank
|
||||
if c['person'] == "":
|
||||
for r in reversed(self.credits):
|
||||
if r['role'].lower() == c['role'].lower():
|
||||
self.credits.remove(r)
|
||||
# otherwise, add it!
|
||||
else:
|
||||
self.addCredit( c['person'], c['role'], primary )
|
||||
|
||||
def setDefaultPageList( self, count ):
|
||||
# generate a default page list, with the first page marked as the cover
|
||||
for i in range(count):
|
||||
page_dict = dict()
|
||||
page_dict['Image'] = str(i)
|
||||
if i == 0:
|
||||
page_dict['Type'] = PageType.FrontCover
|
||||
self.pages.append( page_dict )
|
||||
|
||||
def getArchivePageIndex( self, pagenum ):
|
||||
# convert the displayed page number to the page index of the file in the archive
|
||||
if pagenum < len( self.pages ):
|
||||
return int( self.pages[pagenum]['Image'] )
|
||||
else:
|
||||
return 0
|
||||
|
||||
def getCoverPageIndexList( self ):
|
||||
# return a list of archive page indices of cover pages
|
||||
coverlist = []
|
||||
for p in self.pages:
|
||||
if 'Type' in p and p['Type'] == PageType.FrontCover:
|
||||
coverlist.append( int(p['Image']))
|
||||
|
||||
if len(coverlist) == 0:
|
||||
coverlist.append( 0 )
|
||||
|
||||
return coverlist
|
||||
|
||||
def addCredit( self, person, role, primary = False ):
|
||||
|
||||
credit = dict()
|
||||
credit['person'] = person
|
||||
credit['role'] = role
|
||||
if primary:
|
||||
credit['primary'] = primary
|
||||
|
||||
# look to see if it's not already there...
|
||||
found = False
|
||||
for c in self.credits:
|
||||
if ( c['person'].lower() == person.lower() and
|
||||
c['role'].lower() == role.lower() ):
|
||||
# no need to add it. just adjust the "primary" flag as needed
|
||||
c['primary'] = primary
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
self.credits.append(credit)
|
||||
|
||||
|
||||
def __str__( self ):
|
||||
vals = []
|
||||
if self.isEmpty:
|
||||
return "No metadata"
|
||||
|
||||
def add_string( tag, val ):
|
||||
if val is not None and u"{0}".format(val) != "":
|
||||
vals.append( (tag, val) )
|
||||
|
||||
def add_attr_string( tag ):
|
||||
val = getattr(self,tag)
|
||||
add_string( tag, getattr(self,tag) )
|
||||
|
||||
add_attr_string( "series" )
|
||||
add_attr_string( "issue" )
|
||||
add_attr_string( "issueCount" )
|
||||
add_attr_string( "title" )
|
||||
add_attr_string( "publisher" )
|
||||
add_attr_string( "year" )
|
||||
add_attr_string( "month" )
|
||||
add_attr_string( "day" )
|
||||
add_attr_string( "volume" )
|
||||
add_attr_string( "volumeCount" )
|
||||
add_attr_string( "genre" )
|
||||
add_attr_string( "language" )
|
||||
add_attr_string( "country" )
|
||||
add_attr_string( "criticalRating" )
|
||||
add_attr_string( "alternateSeries" )
|
||||
add_attr_string( "alternateNumber" )
|
||||
add_attr_string( "alternateCount" )
|
||||
add_attr_string( "imprint" )
|
||||
add_attr_string( "webLink" )
|
||||
add_attr_string( "format" )
|
||||
add_attr_string( "manga" )
|
||||
|
||||
add_attr_string( "price" )
|
||||
add_attr_string( "isVersionOf" )
|
||||
add_attr_string( "rights" )
|
||||
add_attr_string( "identifier" )
|
||||
add_attr_string( "lastMark" )
|
||||
|
||||
if self.blackAndWhite:
|
||||
add_attr_string( "blackAndWhite" )
|
||||
add_attr_string( "maturityRating" )
|
||||
add_attr_string( "storyArc" )
|
||||
add_attr_string( "seriesGroup" )
|
||||
add_attr_string( "scanInfo" )
|
||||
add_attr_string( "characters" )
|
||||
add_attr_string( "teams" )
|
||||
add_attr_string( "locations" )
|
||||
add_attr_string( "comments" )
|
||||
add_attr_string( "notes" )
|
||||
|
||||
add_string( "tags", utils.listToString( self.tags ) )
|
||||
|
||||
for c in self.credits:
|
||||
primary = ""
|
||||
if c.has_key('primary') and c['primary']:
|
||||
primary = " [P]"
|
||||
add_string( "credit", c['role']+": "+c['person'] + primary)
|
||||
|
||||
# find the longest field name
|
||||
flen = 0
|
||||
for i in vals:
|
||||
flen = max( flen, len(i[0]) )
|
||||
flen += 1
|
||||
|
||||
#format the data nicely
|
||||
outstr = ""
|
||||
fmt_str = u"{0: <" + str(flen) + "} {1}\n"
|
||||
for i in vals:
|
||||
outstr += fmt_str.format( i[0]+":", i[1] )
|
||||
|
||||
return outstr
|
||||
5
comictaggerlib/graphics/__init__.py
Normal file
5
comictaggerlib/graphics/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
|
||||
graphics_path = importlib.resources.files(__package__)
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 13 KiB |
BIN
comictaggerlib/graphics/down.png
Normal file
BIN
comictaggerlib/graphics/down.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user