Compare commits
772 Commits
1.2.0+1
...
22d92e1ded
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22d92e1ded | ||
|
|
bad8b85874 | ||
|
|
938f760a37 | ||
|
|
f382c2f814 | ||
|
|
4e75731024 | ||
|
|
920a0ed1af | ||
|
|
9eb50da744 | ||
|
|
2e2d886cb2 | ||
|
|
5738433c2b | ||
|
|
4a33dbde46 | ||
|
|
10a48634bd | ||
|
|
2492d96fb3 | ||
|
|
87248503b4 | ||
|
|
7705e7ea1f | ||
|
|
54b0630891 | ||
|
|
27e70b966f | ||
|
|
ad8b92743c | ||
|
|
22b44c87ca | ||
|
|
2eca743f20 | ||
|
|
bb4be306cc | ||
|
|
768ef0b6bc | ||
|
|
215587d9a4 | ||
|
|
7430e59b64 | ||
|
|
09490b8ebf | ||
|
|
1e4a3b2484 | ||
|
|
b9bf3be4b2 | ||
|
|
a1e4cec94f | ||
|
|
65e857af8b | ||
|
|
8887d48b3e | ||
|
|
e14714e26b | ||
|
|
8ec16528ab | ||
|
|
e9e619c992 | ||
|
|
a6b60a4317 | ||
|
|
69615c6c07 | ||
|
|
da6b2b02f4 | ||
|
|
3dfdae4033 | ||
|
|
23021ba632 | ||
|
|
bc335f1686 | ||
|
|
999d3eb497 | ||
|
|
bf67c6d270 | ||
|
|
df762746ec | ||
|
|
6687e5c6ca | ||
|
|
2becec0fb6 | ||
|
|
fbe56f4db9 | ||
|
|
085543321a | ||
|
|
f8c0ca195a | ||
|
|
dda0cb521a | ||
|
|
bb1a83b4ba | ||
|
|
f34e8200dd | ||
|
|
539aac1307 | ||
|
|
f75ee58ac0 | ||
|
|
d27621ccd7 | ||
|
|
1ca585a65c | ||
|
|
39407286b3 | ||
|
|
6e56872121 | ||
|
|
888c50d72a | ||
|
|
231b600a0e | ||
|
|
db00736f58 | ||
|
|
5a714e40d9 | ||
|
|
230a4b6558 | ||
|
|
f7bd6ee4f3 | ||
|
|
1ef6e40c29 | ||
|
|
7d1bf8525b | ||
|
|
59694993ff | ||
|
|
109d8efc0b | ||
|
|
c8507c08a9 | ||
|
|
28be4d9dd7 | ||
|
|
ceb3b30e5c | ||
|
|
8dccedc229 | ||
|
|
c3a8221d99 | ||
|
|
ed480720aa | ||
|
|
f18f961dcd | ||
|
|
df781f67e3 | ||
|
|
addddaf44e | ||
|
|
4660b14453 | ||
|
|
9c231d7e11 | ||
|
|
989470772f | ||
|
|
8b7443945b | ||
|
|
da373764e0 | ||
|
|
fd868d9596 | ||
|
|
ae5e246180 | ||
|
|
04b3b6b4ab | ||
|
|
564ce24988 | ||
|
|
3b2e763d7d | ||
|
|
50859d07c4 | ||
|
|
04bf7f484e | ||
|
|
4c1247f49c | ||
|
|
17a8513efc | ||
|
|
7ada13bcc3 | ||
|
|
5b1c92e7b8 | ||
|
|
45643cc594 | ||
|
|
ab6b970063 | ||
|
|
9571020217 | ||
|
|
bb67ab009e | ||
|
|
f3b235ae14 | ||
|
|
0de95777b4 | ||
|
|
9d36ed0dc6 | ||
|
|
e0eec002fa | ||
|
|
79779b7a46 | ||
|
|
df24ad0008 | ||
|
|
651c5aed37 | ||
|
|
3c83dbd038 | ||
|
|
fc6e0c3db3 | ||
|
|
c5cfd3ebdc | ||
|
|
cead69f8e3 | ||
|
|
4d2b9e1157 | ||
|
|
f977e70562 | ||
|
|
12dd06c558 | ||
|
|
70541cc9ee | ||
|
|
d37c7a680d | ||
|
|
1ff6f1768b | ||
|
|
99325f40cf | ||
|
|
65948cd9cd | ||
|
|
305eb1dec5 | ||
|
|
9aad872ae6 | ||
|
|
a478a35f66 | ||
|
|
128cab077c | ||
|
|
9dc6f8914f | ||
|
|
57873136b6 | ||
|
|
987f3fc564 | ||
|
|
10776dbb07 | ||
|
|
2d3f68167c | ||
|
|
770f64b746 | ||
|
|
235c12bd53 | ||
|
|
10b19606e0 | ||
|
|
a7d1084a4d | ||
|
|
21575a9fb8 | ||
|
|
2258d70d7b | ||
|
|
b23c3195e3 | ||
|
|
bd9b3522d8 | ||
|
|
78060dff61 | ||
|
|
4a29040c74 | ||
|
|
496f3f0e75 | ||
|
|
f03b2e58cf | ||
|
|
29ddc3779a | ||
|
|
7842109ca2 | ||
|
|
7527dc4fd8 | ||
|
|
8dfd38a15c | ||
|
|
6227edb0a3 | ||
|
|
114a0bb615 | ||
|
|
abfd97d915 | ||
|
|
582b8cc57b | ||
|
|
97a24d8d52 | ||
|
|
edb087abde | ||
|
|
96c5c4aa28 | ||
|
|
4b93262d5f | ||
|
|
78a890f900 | ||
|
|
5bdbe7d181 | ||
|
|
f250d2c5c3 | ||
|
|
b6d5fe7013 | ||
|
|
80f3dd7ce4 | ||
|
|
0c63f77e53 | ||
|
|
5688cdea89 | ||
|
|
2949626f6d | ||
|
|
319aa582e5 | ||
|
|
058651cc29 | ||
|
|
5874f3bcaf | ||
|
|
c6522865ab | ||
|
|
5684694055 | ||
|
|
360a9e6308 | ||
|
|
015959bd97 | ||
|
|
8feade923a | ||
|
|
df3e7912b3 | ||
|
|
919561099e | ||
|
|
e7cc05679f | ||
|
|
99461c54f1 | ||
|
|
56f172e7b5 | ||
|
|
ddd98ee86d | ||
|
|
1d25179171 | ||
|
|
7efef0bb44 | ||
|
|
366e9cf6e8 | ||
|
|
57abe22515 | ||
|
|
c7a49b3643 | ||
|
|
1125788bb7 | ||
|
|
034a25a813 | ||
|
|
f72c0c8224 | ||
|
|
f6be7919d7 | ||
|
|
0a2340b6dc | ||
|
|
bf2b4ab268 | ||
|
|
40bd3d5bb8 | ||
|
|
61d2a8b833 | ||
|
|
b04dad8015 | ||
|
|
3ade47a7e0 | ||
|
|
5bc44650d6 | ||
|
|
8b1bcd93e6 | ||
|
|
d70a98ed29 | ||
|
|
05e6eaf88e | ||
|
|
90eb1c3980 | ||
|
|
7a63474769 | ||
|
|
0f07fc3153 | ||
|
|
e832b19f2f | ||
|
|
9499aeae10 | ||
|
|
f72ebdb149 | ||
|
|
ea84031b87 | ||
|
|
611c40fe0b | ||
|
|
2c3a2566cc | ||
|
|
1b6307f9c2 | ||
|
|
548ad4a816 | ||
|
|
27f71833b3 | ||
|
|
6c07fab985 | ||
|
|
4151c0e113 | ||
|
|
3119d68ea2 | ||
|
|
f43f51aa2f | ||
|
|
19986b64d0 | ||
|
|
00200334fb | ||
|
|
cde980b470 | ||
|
|
f90f373d20 | ||
|
|
c246b96845 | ||
|
|
053afaa75e | ||
|
|
3848aaeda3 | ||
|
|
16b13a6fe0 | ||
|
|
3f180612d3 | ||
|
|
37cc66cbae | ||
|
|
81b15a5877 | ||
|
|
14a4055040 | ||
|
|
2e01672e68 | ||
|
|
4a7aae4045 | ||
|
|
2187ddece8 | ||
|
|
fba5518d06 | ||
|
|
31cf687e2f | ||
|
|
526069dabf | ||
|
|
635cb037f1 | ||
|
|
861584df3a | ||
|
|
a53fda9fec | ||
|
|
af5a0e50e0 | ||
|
|
7a91acb60c | ||
|
|
3a287504ae | ||
|
|
82a22d25ea | ||
|
|
783e10a9a1 | ||
|
|
e8f13b1f9e | ||
|
|
4b415b376f | ||
|
|
122bdf7eb1 | ||
|
|
2afb604ab3 | ||
|
|
a912c7392b | ||
|
|
3b92993ef6 | ||
|
|
c3892082f5 | ||
|
|
92e2cb42e8 | ||
|
|
b8065e0f10 | ||
|
|
a395e5541f | ||
|
|
d191750231 | ||
|
|
e72347656b | ||
|
|
8e2411a086 | ||
|
|
97e64fa918 | ||
|
|
661d758315 | ||
|
|
364d870fe0 | ||
|
|
2da64fd52d | ||
|
|
057725c5da | ||
|
|
5996bd3588 | ||
|
|
fdf407898e | ||
|
|
70d544b7bd | ||
|
|
c583f63c8c | ||
|
|
d65a120eb5 | ||
|
|
60f47546c2 | ||
|
|
0b77078a93 | ||
|
|
2598fc546a | ||
|
|
ddf4407b77 | ||
|
|
6cf259191e | ||
|
|
30f1db1c73 | ||
|
|
ff15bff94c | ||
|
|
83aabfd9c3 | ||
|
|
d3ff40c249 | ||
|
|
c07e1c4168 | ||
|
|
1dc93c351d | ||
|
|
f94c9ef857 | ||
|
|
14fa70e608 | ||
|
|
ec65132cf2 | ||
|
|
941bbf545f | ||
|
|
afdb08fa15 | ||
|
|
c4b7411261 | ||
|
|
5b3e9c9026 | ||
|
|
e70c47d12a | ||
|
|
c1aba269a9 | ||
|
|
bf55037690 | ||
|
|
e2dfcc91ce | ||
|
|
33796aa475 | ||
|
|
4218e3558b | ||
|
|
271bfac834 | ||
|
|
9e86b5e331 | ||
|
|
c9638ba0d9 | ||
|
|
428879120a | ||
|
|
f0b9bc6c77 | ||
|
|
6133b886fb | ||
|
|
dacd767162 | ||
|
|
4d90417ecf | ||
|
|
c3e889279b | ||
|
|
9bf998ca9e | ||
|
|
5b2a06870a | ||
|
|
fca5818874 | ||
|
|
eaf0ef2f1b | ||
|
|
09fb34c5ff | ||
|
|
924467cc57 | ||
|
|
2611c284b8 | ||
|
|
b4a3e8c2ee | ||
|
|
118429f84c | ||
|
|
8b9332e150 | ||
|
|
5b5a483e25 | ||
|
|
33ea8da5bc | ||
|
|
aba59bdbfe | ||
|
|
316bd52f21 | ||
|
|
59893b1d1c | ||
|
|
fb83863654 | ||
|
|
f131c650fb | ||
|
|
f439797b03 | ||
|
|
bd5e23f93f | ||
|
|
fefb3ce6cd | ||
|
|
a24bd1c719 | ||
|
|
02fd8beda8 | ||
|
|
628dd5e456 | ||
|
|
c437532622 | ||
|
|
0714b94ca1 | ||
|
|
5ecaf89d15 | ||
|
|
2491999a33 | ||
|
|
9c7bf2e235 | ||
|
|
0c1093d58e | ||
|
|
a41c5a8af5 | ||
|
|
b727b1288d | ||
|
|
73738010b8 | ||
|
|
2fde11a704 | ||
|
|
6a6a3320cb | ||
|
|
83a8d5d5e1 | ||
|
|
4b3b9d8691 | ||
|
|
3422a1093d | ||
|
|
4eb9e008ce | ||
|
|
5e86605a46 | ||
|
|
8146b0c90e | ||
|
|
983937cdea | ||
|
|
e5b15abf91 | ||
|
|
4a5d02119e | ||
|
|
4b6c9fd066 | ||
|
|
79a6cef794 | ||
|
|
43cb68b38b | ||
|
|
ad68726e1d | ||
|
|
ba4b779145 | ||
|
|
d987a811e3 | ||
|
|
ee426e6473 | ||
|
|
9aa42c1ca7 | ||
|
|
d12325b7f8 | ||
|
|
ce5205902a | ||
|
|
94aabcdd40 | ||
|
|
839a918330 | ||
|
|
053295e028 | ||
|
|
c6e3266f60 | ||
|
|
7c4e5b775b | ||
|
|
bc02a9a2a2 | ||
|
|
2c5d419ee9 | ||
|
|
46899255c8 | ||
|
|
6a650514fa | ||
|
|
0f10e6e848 | ||
|
|
0d69ba3c49 | ||
|
|
d0e3b487eb | ||
|
|
c80627575a | ||
|
|
92eb79df71 | ||
|
|
ad48ad757c | ||
|
|
2de241cdd5 | ||
|
|
5d66815765 | ||
|
|
100e0f2101 | ||
|
|
55e3b7c7e0 | ||
|
|
f6698f7f0a | ||
|
|
50614d52fc | ||
|
|
712986ee69 | ||
|
|
2f7e3921ef | ||
|
|
80f42fdc3f | ||
|
|
725b2c66d3 | ||
|
|
5394b9f667 | ||
|
|
fad103a7ad | ||
|
|
87cd106b28 | ||
|
|
2d8c47edca | ||
|
|
0ac5b59a1e | ||
|
|
7c735b3555 | ||
|
|
9d8cf41cd3 | ||
|
|
ee3a06db46 | ||
|
|
7df2e3fdc0 | ||
|
|
20e7de5b5f | ||
|
|
f83f72fa12 | ||
|
|
fb4786159d | ||
|
|
734b83cade | ||
|
|
746c98ad1c | ||
|
|
9f00af4bba | ||
|
|
92fa4a874b | ||
|
|
a33b00d77e | ||
|
|
a7f6349aa4 | ||
|
|
d4b4544b2f | ||
|
|
521d5634f3 | ||
|
|
1d9840913a | ||
|
|
53a0b23230 | ||
|
|
9004ee1a6b | ||
|
|
440479da8c | ||
|
|
e5c3692bb9 | ||
|
|
103379e548 | ||
|
|
eca421e0f2 | ||
|
|
18566a0592 | ||
|
|
48c6372cf4 | ||
|
|
f3917c6e4d | ||
|
|
9bb5225301 | ||
|
|
e9cef87154 | ||
|
|
da01dde2b9 | ||
|
|
53445759f7 | ||
|
|
9aff3ae38e | ||
|
|
0302511f5f | ||
|
|
028949f216 | ||
|
|
af0d7b878b | ||
|
|
460a5bc4f4 | ||
|
|
3f6f8540c4 | ||
|
|
17d865b72f | ||
|
|
da21dc110d | ||
|
|
3870cd0f53 | ||
|
|
ed1df400d8 | ||
|
|
82d737407f | ||
|
|
d0719e7201 | ||
|
|
19112ac79b | ||
|
|
a64d753d77 | ||
|
|
970752435c | ||
|
|
b1436ee76e | ||
|
|
8eba44cce4 | ||
|
|
5fc5a14bd9 | ||
|
|
10f36e9868 | ||
|
|
aab7e37bb2 | ||
|
|
2860093b6f | ||
|
|
ad7b270650 | ||
|
|
70dcb9768a | ||
|
|
873d976662 | ||
|
|
fc4eb4f002 | ||
|
|
129e19ac9d | ||
|
|
0dede72692 | ||
|
|
83ac9f91b5 | ||
|
|
858bc303d8 | ||
|
|
005d7b72f4 | ||
|
|
91b863fcb1 | ||
|
|
e5f6a7d1d6 | ||
|
|
e7f937ecd2 | ||
|
|
d75f39fe93 | ||
|
|
12d9befc25 | ||
|
|
3e8ee864b7 | ||
|
|
134c4a60e9 | ||
|
|
3f9e5457f6 | ||
|
|
cc2ef8593c | ||
|
|
c5a5fc8bdb | ||
|
|
1cbed64299 | ||
|
|
c608ff80a1 | ||
|
|
52cc692b58 | ||
|
|
31894a66ec | ||
|
|
aa11a47164 | ||
|
|
093d20a52b | ||
|
|
38c3014222 | ||
|
|
df87f81698 | ||
|
|
cf12e891b0 | ||
|
|
76fb565d4e | ||
|
|
06ffd9f6be | ||
|
|
dfef425af3 | ||
|
|
880b1be401 | ||
|
|
04ad588a58 | ||
|
|
6b4abcf061 | ||
|
|
629b28f258 | ||
|
|
c34902449f | ||
|
|
63e6174cf2 | ||
|
|
9da14e0f95 | ||
|
|
c469fdb25e | ||
|
|
67be086638 | ||
|
|
a724fd8430 | ||
|
|
685ce014b6 | ||
|
|
62bf1d3808 | ||
|
|
d55d75cd79 | ||
|
|
19e5f10a7b | ||
|
|
e5e9617052 | ||
|
|
b4f6820f56 | ||
|
|
b07aa03c5f | ||
|
|
2f54b1b36b | ||
|
|
70293a0819 | ||
|
|
8592fdee74 | ||
|
|
075faaea5a | ||
|
|
870dc5e9b6 | ||
|
|
86402af8b1 | ||
|
|
d7976cf9d2 | ||
|
|
b67765d9aa | ||
|
|
618e15600f | ||
|
|
8cac2c255f | ||
|
|
4f42fef4fc | ||
|
|
73dd33dc64 | ||
|
|
3774ab0568 | ||
|
|
f8807675d6 | ||
|
|
79137a12f8 | ||
|
|
d33d274725 | ||
|
|
26851475ea | ||
|
|
a06d88efc0 | ||
|
|
dcf853515c | ||
|
|
bf06b94284 | ||
|
|
561dc28044 | ||
|
|
43ec4848ef | ||
|
|
aad83c8c03 | ||
|
|
4514ae80d0 | ||
|
|
cab69a32be | ||
|
|
c5ad75370f | ||
|
|
d23258f359 | ||
|
|
c9cd58fecb | ||
|
|
58904a927f | ||
|
|
fb1616aaa1 | ||
|
|
4be12d857d | ||
|
|
e1ab72ec2a | ||
|
|
8a8dea8aa4 | ||
|
|
43464724bd | ||
|
|
34163fe9d7 | ||
|
|
9aa29f1445 | ||
|
|
3ea44b7ca7 | ||
|
|
c1c8f4eb6e | ||
|
|
a14c24a78a | ||
|
|
18d861a2be | ||
|
|
ac15a4dd72 | ||
|
|
6a98afb89c | ||
|
|
21873d3830 | ||
|
|
2daf9b3ed8 | ||
|
|
a6d55cd21a | ||
|
|
d37e4607ee | ||
|
|
00e95178cd | ||
|
|
4034123e6d | ||
|
|
5587bfac31 | ||
|
|
4b6d35fd3a | ||
|
|
3cf75cf2ec | ||
|
|
30dbe758d4 | ||
|
|
55384790f8 | ||
|
|
acaf5ed510 | ||
|
|
d213db3129 | ||
|
|
6a717377df | ||
|
|
904561fb8e | ||
|
|
be6b71dec7 | ||
|
|
63b654a173 | ||
|
|
bc25acde9f | ||
|
|
03677ce4b8 | ||
|
|
535afcb4c6 | ||
|
|
06255f7848 | ||
|
|
00e649bb4c | ||
|
|
078f569ec6 | ||
|
|
315cf7d920 | ||
|
|
e9cc6a16a8 | ||
|
|
26eb6985fe | ||
|
|
be983c61bc | ||
|
|
77a53a6834 | ||
|
|
860a3147d2 | ||
|
|
8ecb87fa26 | ||
|
|
f17f560705 | ||
|
|
aadeb07c49 | ||
|
|
e07fe9e8d1 | ||
|
|
f2a68d6c8b | ||
|
|
94be266e17 | ||
|
|
5a19eaf9a0 | ||
|
|
28cbbbece7 | ||
|
|
40314367c9 | ||
|
|
6e7660c3d9 | ||
|
|
99030fae6b | ||
|
|
947dc81c74 | ||
|
|
c0880c9afe | ||
|
|
e6414fba96 | ||
|
|
a00891f622 | ||
|
|
9ba8b2876c | ||
|
|
46d3e99d48 | ||
|
|
d206f5f581 | ||
|
|
ec83667d77 | ||
|
|
0bbf417133 | ||
|
|
a3e1153283 | ||
|
|
ccb461ae76 | ||
|
|
d24b51f94e | ||
|
|
def2635ac2 | ||
|
|
b72fcaa9a9 | ||
|
|
3ddfacd89e | ||
|
|
6eb5fa7ac7 | ||
|
|
68efcc74fb | ||
|
|
3d84af3746 | ||
|
|
cb5b321539 | ||
|
|
20ec8c38c2 | ||
|
|
8bdf91ab96 | ||
|
|
fbbd36ab4d | ||
|
|
95643fdace | ||
|
|
6c65c2ad56 | ||
|
|
292a69a204 | ||
|
|
5c6e7d6f3e | ||
|
|
7e033857ba | ||
|
|
d9c02b0115 | ||
|
|
b9af606f87 | ||
|
|
d3c29ae40a | ||
|
|
ff73cbf2f9 | ||
|
|
3369a24343 | ||
|
|
ce693b55f1 | ||
|
|
db37ec7204 | ||
|
|
470b5c0a17 | ||
|
|
04409a55c7 | ||
|
|
bb7fbb4e38 | ||
|
|
5bb48cf816 | ||
|
|
b5e6e41043 | ||
|
|
62d927a104 | ||
|
|
4c9fa4f716 | ||
|
|
e8fa51ad45 | ||
|
|
fd4c453854 | ||
|
|
c19ed49e05 | ||
|
|
36adf91744 | ||
|
|
8b73a87360 | ||
|
|
8c591a8a3b | ||
|
|
c5772c75e5 | ||
|
|
ff02d25eea | ||
|
|
98a7ee35ee | ||
|
|
59d48619b1 | ||
|
|
10056c4229 | ||
|
|
7e772abda7 | ||
|
|
09ea531a90 | ||
|
|
710d9bf6a5 | ||
|
|
bb81f921ff | ||
|
|
1468b1932f | ||
|
|
74d95b6a50 | ||
|
|
d33fb6ef31 | ||
|
|
4201558483 | ||
|
|
983b3d08f6 | ||
|
|
eec715551a | ||
|
|
d3f552173e | ||
|
|
3e3dcb03f9 | ||
|
|
44b0e70399 | ||
|
|
38aedac101 | ||
|
|
9a9d97f3bb | ||
|
|
a4cb8b51a6 | ||
|
|
1bbdebff42 | ||
|
|
783c4e1c5b | ||
|
|
eb5360a38b | ||
|
|
205d337751 | ||
|
|
d469ee82d8 | ||
|
|
c464283962 | ||
|
|
48467b14b5 | ||
|
|
70df9d0682 | ||
|
|
049971a78a | ||
|
|
052e95e53b | ||
|
|
fa0c193730 | ||
|
|
a98eb2f81b | ||
|
|
ae4de0b3e6 | ||
|
|
84b762877f | ||
|
|
2bb7aaeddf | ||
|
|
08434a703e | ||
|
|
552a319298 | ||
|
|
b9e72bf7a1 | ||
|
|
135544c0db | ||
|
|
c297fd7fe7 | ||
|
|
168f24b139 | ||
|
|
89ddea7e9b | ||
|
|
bfe005cb63 | ||
|
|
48c2e91f7e | ||
|
|
02f365b93f | ||
|
|
d78c3e3039 | ||
|
|
f18513fd0e | ||
|
|
caa94c4e28 | ||
|
|
7037877a77 | ||
|
|
6cccf22d54 | ||
|
|
ceb2b2861e | ||
|
|
298f50cb45 | ||
|
|
e616aa8373 | ||
|
|
0fe881df59 | ||
|
|
f3f48ea958 | ||
|
|
9a9d36dc65 | ||
|
|
028b728d82 | ||
|
|
23f323f52d | ||
|
|
49210e67c5 | ||
|
|
e519bf79be | ||
|
|
4f08610a28 | ||
|
|
544bdcb4e3 | ||
|
|
f3095144f5 | ||
|
|
75f31c7cb2 | ||
|
|
f7f4e41c95 | ||
|
|
6da177471b | ||
|
|
8a74e5b02b | ||
|
|
5658f261b0 | ||
|
|
6da3bf764e | ||
|
|
5e06d35057 | ||
|
|
82bcc876b3 | ||
|
|
d7a6882577 | ||
|
|
5e7e1b1513 | ||
|
|
cd9a02c255 | ||
|
|
b47f816dd5 | ||
|
|
d1a649c0ba | ||
|
|
b7759506fe | ||
|
|
97777d61d2 | ||
|
|
e622b56dae | ||
|
|
a24251e5b4 | ||
|
|
d4470a2015 | ||
|
|
d37022b71f | ||
|
|
5f38241bcb | ||
|
|
4fb9461491 | ||
|
|
c9b5bd625f | ||
|
|
558072a330 | ||
|
|
26fa7eeabb | ||
|
|
c50cef568e | ||
|
|
2db80399a6 | ||
|
|
4936c31c18 | ||
|
|
ada88d719f | ||
|
|
1b28623fe3 | ||
|
|
593f568ea7 | ||
|
|
7b4dba35b5 | ||
|
|
c95e700025 | ||
|
|
e10f7dd7a7 | ||
|
|
84dc148cff | ||
|
|
14c9609efe | ||
|
|
2a3620ea21 | ||
|
|
8c5d4869f9 | ||
|
|
c0aa665347 | ||
|
|
6900368251 | ||
|
|
ac1bdf2f9c | ||
|
|
c840724c9c | ||
|
|
220606a046 | ||
|
|
223269cc2e | ||
|
|
31b96fdbb9 | ||
|
|
908a500e7e | ||
|
|
ae20a2eec8 | ||
|
|
287c5f39c1 | ||
|
|
cfd2489228 | ||
|
|
86a83021a6 | ||
|
|
d7595f5ca1 | ||
|
|
5a2bb66d5b | ||
|
|
5de2ce65a4 | ||
|
|
95d167561d | ||
|
|
7d2702c3b6 | ||
|
|
d0f96b6511 | ||
|
|
ba71e61d87 | ||
|
|
191d72554c | ||
|
|
628251c75b | ||
|
|
71499c3d7c | ||
|
|
03b8bf4671 | ||
|
|
773735bf6e | ||
|
|
b62e291749 | ||
|
|
a66b5ea0e3 | ||
|
|
615650f822 | ||
|
|
ed16199940 | ||
|
|
7005bd296e | ||
|
|
cdeca34791 | ||
|
|
aefe778b36 | ||
|
|
c6e1dc87dc | ||
|
|
ef37158e57 | ||
|
|
444e67100c | ||
|
|
82d054fd05 | ||
|
|
f82c024f8d | ||
|
|
da4daa6a8a | ||
|
|
6e1e8959c9 | ||
|
|
aedc5bedb4 | ||
|
|
93f5061c8f | ||
|
|
d46e171bd6 | ||
|
|
e7fe520660 | ||
|
|
91f288e8f4 | ||
|
|
d7bd3bb94b | ||
|
|
9e0b0ac01c | ||
|
|
03a8d906ea | ||
|
|
fff28cf6ae | ||
|
|
9ee95b8d5e | ||
|
|
11bf5a9709 | ||
|
|
af4b3af14e | ||
|
|
9bb7fbbc9e | ||
|
|
beb7c57a6b | ||
|
|
ce48730bd5 | ||
|
|
806b65db24 | ||
|
|
cdf9a40227 | ||
|
|
0adac47968 | ||
|
|
096a89eab4 | ||
|
|
f877d620af | ||
|
|
c175e46b15 | ||
|
|
f0bc669d40 | ||
|
|
db3db48e5c | ||
|
|
cec585f8e0 | ||
|
|
d71a48d8d4 | ||
|
|
9e4a560911 | ||
|
|
f244255386 | ||
|
|
254e2c25ee | ||
|
|
7455cf17c8 | ||
|
|
d93cb50896 | ||
|
|
3316cab775 | ||
|
|
c01f00f6c3 | ||
|
|
06ff25550e | ||
|
|
1f7ef44556 | ||
|
|
fabf2b4df6 | ||
|
|
0fbaeb861e | ||
|
|
3dcc04a318 | ||
|
|
933e053df3 |
97
.github/workflows/build.yaml
vendored
Normal file
97
.github/workflows/build.yaml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
name: CI
|
||||
|
||||
env:
|
||||
PKG_CONFIG_PATH: /usr/local/opt/icu4c/lib/pkgconfig
|
||||
LC_COLLATE: en_US.UTF-8
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags-ignore:
|
||||
- '**'
|
||||
jobs:
|
||||
lint:
|
||||
permissions:
|
||||
checks: write
|
||||
contents: read
|
||||
pull-requests: write
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install flake8
|
||||
|
||||
- uses: reviewdog/action-setup@v1
|
||||
with:
|
||||
reviewdog_version: nightly
|
||||
- run: flake8 | reviewdog -f=flake8 -reporter=github-pr-review -tee -level=error -fail-on-error
|
||||
env:
|
||||
REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
build-and-test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-latest, macos-11, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install tox
|
||||
run: |
|
||||
python -m pip install --upgrade --upgrade-strategy eager tox
|
||||
|
||||
- name: Install macos dependencies
|
||||
run: |
|
||||
brew install icu4c pkg-config
|
||||
# export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig";
|
||||
# export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin:$PATH"
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2
|
||||
# export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig";
|
||||
# export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin:$PATH"
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build and install PyPi packages
|
||||
run: |
|
||||
python -m tox r -m build
|
||||
|
||||
- name: Archive production artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ format('ComicTagger-{0}', runner.os) }}"
|
||||
path: |
|
||||
dist/*.zip
|
||||
dist/*.tar.gz
|
||||
dist/*.dmg
|
||||
dist/*.AppImage
|
||||
|
||||
- name: PyTest
|
||||
run: |
|
||||
python -m tox r
|
||||
43
.github/workflows/contributions.yaml
vendored
Normal file
43
.github/workflows/contributions.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Contributions
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
tags-ignore:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
contrib-readme-job:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CI_COMMIT_AUTHOR: github-actions[bot]
|
||||
CI_COMMIT_EMAIL: <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
CI_COMMIT_MESSAGE: Update AUTHORS
|
||||
name: A job to automate contrib in readme
|
||||
steps:
|
||||
- name: Contribute List
|
||||
uses: akhilmhdh/contributors-readme-action@v2.3.6
|
||||
with:
|
||||
use_username: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Update AUTHORS
|
||||
run: |
|
||||
git config --global log.mailmap true
|
||||
git log --reverse '--format=%aN <%aE>' | cat -n | sort -uk2 | sort -n | cut -f2- >AUTHORS
|
||||
|
||||
- name: Commit and push AUTHORS
|
||||
run: |
|
||||
if ! git diff --exit-code; then
|
||||
git pull
|
||||
git config --global user.name "${{ env.CI_COMMIT_AUTHOR }}"
|
||||
git config --global user.email "${{ env.CI_COMMIT_EMAIL }}"
|
||||
git commit -a -m "${{ env.CI_COMMIT_MESSAGE }}"
|
||||
git push
|
||||
fi
|
||||
76
.github/workflows/package.yaml
vendored
Normal file
76
.github/workflows/package.yaml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
name: Package
|
||||
|
||||
env:
|
||||
PKG_CONFIG_PATH: /usr/local/opt/icu4c/lib/pkgconfig
|
||||
LC_COLLATE: en_US.UTF-8
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
package:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-latest, macos-11, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install tox
|
||||
run: |
|
||||
python -m pip install --upgrade --upgrade-strategy eager tox
|
||||
|
||||
- name: Install macos dependencies
|
||||
run: |
|
||||
brew install icu4c pkg-config
|
||||
# export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig";
|
||||
# export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin:$PATH"
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2
|
||||
# export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig";
|
||||
# export PATH="/usr/local/opt/icu4c/bin:/usr/local/opt/icu4c/sbin:$PATH"
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build, Install and Test PyPi packages
|
||||
run: |
|
||||
python -m tox r
|
||||
python -m tox r -m release
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
- name: Get release name
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
shell: bash
|
||||
run: |
|
||||
git fetch --depth=1 origin +refs/tags/*:refs/tags/* # github is dumb
|
||||
echo "release_name=$(git tag -l --format "%(refname:strip=2): %(contents:lines=1)" ${{ github.ref_name }})" >> $GITHUB_ENV
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
name: "${{ env.release_name }}"
|
||||
prerelease: "${{ contains(github.ref, '-') }}" # alpha-releases should be 1.3.0-alpha.x full releases should be 1.3.0
|
||||
draft: false
|
||||
# upload the single application zip file for each OS and include the wheel built on linux
|
||||
files: |
|
||||
dist/*.zip
|
||||
dist/*.tar.gz
|
||||
dist/*.dmg
|
||||
dist/*${{ fromJSON('["never", ""]')[runner.os == 'Linux'] }}.whl
|
||||
dist/*.AppImage
|
||||
166
.gitignore
vendored
166
.gitignore
vendored
@@ -1,6 +1,160 @@
|
||||
/.idea/
|
||||
/nbproject/
|
||||
/dist
|
||||
*.pyc
|
||||
/.vscode
|
||||
venv
|
||||
# generated by setuptools_scm
|
||||
ctversion.py
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
|
||||
### Other editors
|
||||
.*.swp
|
||||
nbproject/
|
||||
.vscode
|
||||
|
||||
comictaggerlib/_version.py
|
||||
*.exe
|
||||
*.zip
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# for testing
|
||||
temp/
|
||||
|
||||
9
.mailmap
Normal file
9
.mailmap
Normal file
@@ -0,0 +1,9 @@
|
||||
Andrew W. Buchanan <buchanan@difference.com>
|
||||
Davide Romanini <d.romanini@cineca.it> <davide.romanini@gmail.com>
|
||||
Davide Romanini <d.romanini@cineca.it> <user159033@92-63-141-211.rdns.melbourne.co.uk>
|
||||
Michael Fitzurka <MichaelFitzurka@users.noreply.github.com> <MichaelFitzurka@github.com>
|
||||
Timmy Welch <timmy@narnian.us>
|
||||
beville <beville@users.noreply.github.com> <(no author)@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@gmail.com@6c5673fe-1810-88d6-992b-cd32ca31540c>
|
||||
beville <beville@users.noreply.github.com> <beville@users.noreply.github.com>
|
||||
46
.pre-commit-config.yaml
Normal file
46
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
exclude: ^scripts
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: debug-statements
|
||||
- id: name-tests-test
|
||||
- id: requirements-txt-fixer
|
||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||
rev: v2.5.0
|
||||
hooks:
|
||||
- id: setup-cfg-fmt
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.15.1
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py39-plus]
|
||||
- repo: https://github.com/PyCQA/autoflake
|
||||
rev: v2.3.0
|
||||
hooks:
|
||||
- id: autoflake
|
||||
args: [-i, --remove-all-unused-imports, --ignore-init-module-imports]
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
args: [--af,--add-import, 'from __future__ import annotations']
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-encodings, flake8-builtins, flake8-length, flake8-print, flake8-no-nested-comprehensions]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.8.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies: [types-setuptools, types-requests, settngs>=0.9.1]
|
||||
ci:
|
||||
skip: [mypy]
|
||||
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
||||
language: generic
|
||||
os: osx
|
||||
osx_image: xcode8.3
|
||||
install:
|
||||
- brew upgrade python3
|
||||
- pip3 install --upgrade setuptools
|
||||
- pip3 install -r requirements.txt
|
||||
script:
|
||||
- make dist
|
||||
- ls -l dist
|
||||
deploy:
|
||||
provider: releases
|
||||
skip_cleanup: true
|
||||
api_key:
|
||||
secure: RgohcOJOfLhXXT12bMWaLwOqhe+ClSCYXjYuUJuWK4/E1fdd1xu1ebdQU+MI/R8cZ0Efz3sr2n3NkO/Aa8gN68xEfuF7RVRMm64P9oPrfZgGdsD6H43rU/6kN8bgaDRmCYpLTfXaJ+/gq0x1QDkhWJuceF2BYEGGvL0BvS/TUsLyjVxs8ujTplLyguXHNEv4/7Yz7SBNZZmUHjBuq/y+l8ds3ra9rSgAVAN1tMXoFKJPv+SNNkpTo5WUNMPzBnN041F1rzqHwYDLog2V7Krp9JkXzheRFdAr51/tJBYzEd8AtYVdYvaIvoO6A4PiTZ7MpsmcZZPAWqLQU00UTm/PhT/LVR+7+f8lOBG07RgNNHB+edjDRz3TAuqyuZl9wURWTZKTPuO49TkZMz7Wm0DRNZHvBm1IXLeSG7Tll2YL1+WpZNZg+Dhro2J1QD3vxDXafhMdTCB4z0q5aKpG93IT0p6oXOO0oEGOPZYbA2c5R3SXWSyqd1E1gdhbVjIZr59h++TEf1zz07tvWHqPuAF/Ly/j+dIcY2wj0EzRWaSASWgUpTnMljAkHtWhqDw4GXGDRkRUWRJl1d0/JyVqCeIdRzDQNl8/q7BcO3F1zqr1PgnYdz0lfwWxL1/ekw2vHOJE/GOdkyvX0aJrnaOV338mjJbfGHYv4ESc9ow1kdtIbiU=
|
||||
file_glob: true
|
||||
file: dist/ComicTagger*.zip
|
||||
draft: true
|
||||
on:
|
||||
branch: master
|
||||
tags: true
|
||||
18
AUTHORS
Normal file
18
AUTHORS
Normal file
@@ -0,0 +1,18 @@
|
||||
beville <beville@users.noreply.github.com>
|
||||
Davide Romanini <d.romanini@cineca.it>
|
||||
fcanc <f.canc@icloud.com>
|
||||
Alban Seurat <alkpone@alkpone.com>
|
||||
tlc <tlc@users.noreply.github.com>
|
||||
Marek Pawlak <francuz14@gmail.com>
|
||||
Timmy Welch <timmy@narnian.us>
|
||||
J.P. Cranford <philipcranford4@gmail.com>
|
||||
thFrgttn <39759781+thFrgttn@users.noreply.github.com>
|
||||
Andrew W. Buchanan <buchanan@difference.com>
|
||||
Michael Fitzurka <MichaelFitzurka@users.noreply.github.com>
|
||||
Richard Haussmann <richard.haussmann@gmail.com>
|
||||
Mizaki <jinxybob@hotmail.com>
|
||||
Xavier Jouvenot <x.jouvenot@gmail.com>
|
||||
github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
Ben Longman <deck@steamdeck.lan>
|
||||
Sven Hesse <drmccoy@drmccoy.de>
|
||||
pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
|
||||
98
CONTRIBUTING.md
Normal file
98
CONTRIBUTING.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# How to contribute
|
||||
|
||||
If your not sure what you can do or you need to ask a question or just want to talk about ComicTagger head over to the [discussions tab](https://github.com/comictagger/comictagger/discussions/categories/general) and start a discussion
|
||||
|
||||
## Tests
|
||||
|
||||
We have tests written using pytest! Some of them even pass! If you are contributing code any tests you can write are appreciated.
|
||||
|
||||
A great place to start is extending the tests that are already made.
|
||||
|
||||
For example the file tests/filenames.py has lists of filenames to be parsed in the format:
|
||||
```py
|
||||
pytest.param(
|
||||
"Star Wars - War of the Bounty Hunters - IG-88 (2021) (Digital) (Kileko-Empire).cbz",
|
||||
"number ends series, no-issue",
|
||||
{
|
||||
"issue": "",
|
||||
"series": "Star Wars - War of the Bounty Hunters - IG-88",
|
||||
"volume": "",
|
||||
"year": "2021",
|
||||
"remainder": "(Digital) (Kileko-Empire)",
|
||||
"issue_count": "",
|
||||
},
|
||||
marks=pytest.mark.xfail,
|
||||
)
|
||||
```
|
||||
|
||||
A test consists of 3-4 parts
|
||||
1. The filename to be parsed
|
||||
2. The reason it might fail
|
||||
3. What the result of parsing the filename should be
|
||||
4. `marks=pytest.mark.xfail` This marks the test as expected to fail
|
||||
|
||||
If you are not comfortable creating a pull request you can [open an issue](https://github.com/comictagger/comictagger/issues/new/choose) or [start a discussion](https://github.com/comictagger/comictagger/discussions/new)
|
||||
|
||||
## Submitting changes
|
||||
|
||||
Please open a [GitHub Pull Request](https://github.com/comictagger/comictagger/pull/new/develop) with a clear list of what you've done (read more about [pull requests](http://help.github.com/pull-requests/)). When you send a pull request, we will love you forever if you include tests. We can always use more test coverage. Please run the code tools below and make sure all of your commits are atomic (one feature per commit).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Currently only python 3.9 is supported however 3.10 will probably work if you try it
|
||||
|
||||
Those on linux should install `Pillow` from the system package manager if possible and if the GUI `pyqt5` should be installed from the system package manager
|
||||
|
||||
Those on macOS will need to ensure that you are using python3 in x86 mode either by installing an x86 only version of python or using the universal installer and using `python3-intel64` instead of `python3`
|
||||
|
||||
1. Clone the repository
|
||||
```
|
||||
git clone https://github.com/comictagger/comictagger.git
|
||||
```
|
||||
|
||||
2. It is preferred to use a virtual env for running from source:
|
||||
|
||||
```
|
||||
python3 -m venv venv
|
||||
```
|
||||
|
||||
3. Activate the virtual env:
|
||||
```
|
||||
. venv/bin/activate
|
||||
```
|
||||
or if on windows PowerShell
|
||||
```
|
||||
. venv/bin/activate.ps1
|
||||
```
|
||||
|
||||
4. Install tox:
|
||||
```bash
|
||||
pip install tox
|
||||
```
|
||||
|
||||
5. If you are on an M1 Mac you will need to export two environment variables for tests to pass.
|
||||
```
|
||||
export tox_python=python3.9-intel64
|
||||
export tox_env=m1env
|
||||
```
|
||||
|
||||
6. install ComicTagger
|
||||
```
|
||||
tox run -e venv
|
||||
```
|
||||
|
||||
7. Make your changes
|
||||
8. Build to ensure that your changes work: this will produce a binary build in the dist folder
|
||||
```bash
|
||||
tox run -m build
|
||||
```
|
||||
|
||||
The build runs these formatters and linters automatically
|
||||
|
||||
setup-cfg-fmt: Formats the setup.cfg file
|
||||
autoflake: Removes unused imports
|
||||
isort: sorts imports so that you can always find where an import is located<br>
|
||||
black: formats all of the code consistently so there are no surprises<br>
|
||||
flake8: checks for code quality and style (warns for unused imports and similar issues)<br>
|
||||
mypy: checks the types of variables and functions to catch errors
|
||||
pytest: runs tests for ComicTagger functionality
|
||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,8 +0,0 @@
|
||||
include README.md
|
||||
include release_notes.txt
|
||||
include requirements.txt
|
||||
include unrar/*
|
||||
recursive-include scripts *.py *.txt
|
||||
recursive-include desktop-integration *
|
||||
include windows/app.ico
|
||||
include mac/app.icns
|
||||
57
Makefile
57
Makefile
@@ -1,57 +0,0 @@
|
||||
VERSION_STR := $(shell python -c 'import comictaggerlib.ctversion; print( comictaggerlib.ctversion.version)')
|
||||
|
||||
ifeq ($(OS),Windows_NT)
|
||||
OS_VERSION=win-$(PROCESSOR_ARCHITECTURE)
|
||||
APP_NAME=comictagger.exe
|
||||
FINAL_NAME=ComicTagger-$(VERSION_STR)-$(OS_VERSION).exe
|
||||
else ifeq ($(shell uname -s),Darwin)
|
||||
OS_VERSION=osx-$(shell defaults read loginwindow SystemVersionStampAsString)-$(shell uname -m)
|
||||
APP_NAME=ComicTagger.app
|
||||
FINAL_NAME=ComicTagger-$(VERSION_STR)-$(OS_VERSION).app
|
||||
else
|
||||
APP_NAME=comictagger
|
||||
FINAL_NAME=ComicTagger-$(VERSION_STR)
|
||||
endif
|
||||
|
||||
.PHONY: all clean pydist upload unrar dist
|
||||
|
||||
all: clean dist
|
||||
|
||||
clean:
|
||||
rm -rf *~ *.pyc *.pyo
|
||||
rm -rf scripts/*.pyc
|
||||
cd comictaggerlib; rm -f *~ *.pyc *.pyo
|
||||
rm -rf dist MANIFEST
|
||||
rm -rf *.deb
|
||||
rm -rf logdict*.log
|
||||
$(MAKE) -C mac clean
|
||||
rm -rf build
|
||||
$(MAKE) -C unrar clean
|
||||
rm -f unrar/libunrar.so unrar/libunrar.a unrar/unrar
|
||||
rm -f comictaggerlib/libunrar.so
|
||||
rm -rf comictaggerlib/ui/__pycache__
|
||||
|
||||
pydist:
|
||||
make clean
|
||||
mkdir -p piprelease
|
||||
rm -f comictagger-$(VERSION_STR).zip
|
||||
python setup.py sdist --formats=zip #,gztar
|
||||
mv dist/comictagger-$(VERSION_STR).zip piprelease
|
||||
rm -rf comictagger.egg-info dist
|
||||
|
||||
upload:
|
||||
python setup.py register
|
||||
python setup.py sdist --formats=zip upload
|
||||
|
||||
unrar:
|
||||
ifeq ($(OS),Windows_NT)
|
||||
# statically compile mingw dependencies
|
||||
# https://stackoverflow.com/questions/18138635/mingw-exe-requires-a-few-gcc-dlls-regardless-of-the-code
|
||||
$(MAKE) -C unrar LDFLAGS='-Wl,-Bstatic,--whole-archive -lwinpthread -Wl,--no-whole-archive -pthread -static-libgcc -static-libstdc++' lib
|
||||
else
|
||||
$(MAKE) -C unrar lib
|
||||
endif
|
||||
|
||||
dist: unrar
|
||||
pyinstaller -y comictagger.spec
|
||||
cd dist && zip -r $(FINAL_NAME).zip $(APP_NAME)
|
||||
214
README.md
214
README.md
@@ -1,29 +1,185 @@
|
||||
|
||||
ComicTagger is a multi-platform app for writing metadata to digital comics, written in Python and PyQt.
|
||||
|
||||
Features:
|
||||
|
||||
* Runs on Mac OSX, Microsoft Windows, and Linux systems
|
||||
* Communicates with an online database (Comic Vine) for acquiring metadata
|
||||
* Uses image processing to automatically match a given archive with the correct issue data
|
||||
* Batch processing in the GUI for tagging hundreds or more comics at a time
|
||||
* Reads and writes multiple tagging schemes (ComicBookLover and ComicRack).
|
||||
* Reads and writes RAR and Zip archives (external tools needed for writing RAR)
|
||||
* Can run without PyQt5 installed
|
||||
|
||||
|
||||
Recent changes:
|
||||
- Ported to Python 3
|
||||
- Ported to PyQt5
|
||||
- Added more application and GUI awareness of the unrar library, and removed references to the old scheme that used the unrar executable.
|
||||
- Got setup.py working again to build sdist packages, suitable (I think) for PyPI. An install from the package will attempt to build unrar library. It should work on most Linux distros, and was tested on a Mac OSX system with dev tools from homebrew. If the library doesn't build, the GUI has instructions on where to download the library.
|
||||
- Removed/changes obsolete links to old Google code website.
|
||||
- Set a environment variable to scale the GUI on 4k displays
|
||||
|
||||
Notes:
|
||||
- I did some testing with the pyinstaller build, and it worked on both platforms. I did encounter two problems:
|
||||
- Mac build showed the wrong widget set. I found a solution here that seemed to work: https://stackoverflow.com/questions/48626999/packaging-with-pyinstaller-pyqt5-setstyle-ignored
|
||||
- Windows build had problems grabbing images from ComicVine using SSL. It think that some libraries are missing from the monolithic exe, but I couldn't figure out how to fix the problem.
|
||||
- In setup.py you can also find the remains of an attempt to do some desktop integration from a pip install. It does work, but can cause problems with wheel installs, and I don't know if it's worth the bother. I kept the commented-out code in place, just in case.
|
||||
|
||||
With Python 3, it's much easier to get the app working from scratch on a new distro, as all of the dependencies are available as wheels, including PyQt5, so just a simple "pip install comictagger.zip" is all that's needed.
|
||||
[](https://github.com/comictagger/comictagger/actions/workflows/build.yaml)
|
||||
[](https://github.com/comictagger/comictagger/releases/latest)
|
||||
[](https://pypi.org/project/comictagger/)
|
||||
[](https://pypistats.org/packages/comictagger)
|
||||
[](https://community.chocolatey.org/packages/comictagger)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
[](https://github.com/comictagger/comictagger/discussions)
|
||||
[](https://gitter.im/comictagger/community)
|
||||
[](https://groups.google.com/forum/#!forum/comictagger)
|
||||
[](https://twitter.com/comictagger)
|
||||
[](https://www.facebook.com/ComicTagger-139615369550787/)
|
||||
|
||||
# ComicTagger
|
||||
|
||||
ComicTagger is a **multi-platform** app for **writing metadata to digital comics**, written in Python and PyQt.
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
* Runs on macOS, Microsoft Windows, and Linux systems
|
||||
* Get comic information from [Comic Vine](https://comicvine.gamespot.com/)
|
||||
* **Automatic issue matching** using advanced image processing techniques
|
||||
* **Batch processing** in the GUI for tagging hundreds or more comics at a time
|
||||
* Support for **ComicRack** and **ComicBookLover** tagging formats
|
||||
* Native full support for **CBZ** digital comics
|
||||
* Native read only support for **CBR** digital comics: full support enabled installing additional [rar tools](https://www.rarlab.com/download.htm)
|
||||
* Command line interface (CLI) enabling **custom scripting** and **batch operations on large collections**
|
||||
|
||||
For details, screen-shots, and more, visit [the Wiki](https://github.com/comictagger/comictagger/wiki)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Binaries
|
||||
|
||||
Windows, Linux and MacOS binaries are provided in the [Releases Page](https://github.com/comictagger/comictagger/releases).
|
||||
|
||||
Just unzip the archive in any folder and run, no additional installation steps are required.
|
||||
|
||||
### PIP installation
|
||||
|
||||
A pip package is provided, you can install it with:
|
||||
|
||||
```
|
||||
$ pip3 install comictagger[GUI]
|
||||
```
|
||||
|
||||
There are optional dependencies. You can install the optional dependencies by specifying one or more of them in braces e.g. `comictagger[CBR,GUI]`
|
||||
|
||||
Optional dependencies:
|
||||
1. `ICU`: Ensures that comic pages are supported correctly. This should always be installed. *Currently only exists in the latest alpha release *
|
||||
1. `CBR`: Provides support for CBR/RAR files.
|
||||
1. `GUI`: Installs the GUI.
|
||||
1. `7Z`: Provides support for CB7/7Z files.
|
||||
1. `all`: Installs all of the above optional dependencies.
|
||||
|
||||
### Chocolatey installation (Windows only)
|
||||
|
||||
A [Chocolatey package](https://community.chocolatey.org/packages/comictagger), maintained by @Xav83, is provided, you can install it with:
|
||||
```powershell
|
||||
choco install comictagger
|
||||
```
|
||||
### From source
|
||||
|
||||
1. Ensure you have python 3.9 installed
|
||||
2. Clone this repository `git clone https://github.com/comictagger/comictagger.git`
|
||||
7. `pip3 install .[ICU]` or `pip3 install .[GUI,ICU]`
|
||||
|
||||
|
||||
## Contributors
|
||||
|
||||
<!-- readme: beville,davide-romanini,collaborators,contributors -start -->
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/beville">
|
||||
<img src="https://avatars.githubusercontent.com/u/7294848?v=4" width="100;" alt="beville"/>
|
||||
<br />
|
||||
<sub><b>beville</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/davide-romanini">
|
||||
<img src="https://avatars.githubusercontent.com/u/731199?v=4" width="100;" alt="davide-romanini"/>
|
||||
<br />
|
||||
<sub><b>davide-romanini</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/fcanc">
|
||||
<img src="https://avatars.githubusercontent.com/u/4999486?v=4" width="100;" alt="fcanc"/>
|
||||
<br />
|
||||
<sub><b>fcanc</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/lordwelch">
|
||||
<img src="https://avatars.githubusercontent.com/u/7547075?v=4" width="100;" alt="lordwelch"/>
|
||||
<br />
|
||||
<sub><b>lordwelch</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/mizaki">
|
||||
<img src="https://avatars.githubusercontent.com/u/1141189?v=4" width="100;" alt="mizaki"/>
|
||||
<br />
|
||||
<sub><b>mizaki</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/MichaelFitzurka">
|
||||
<img src="https://avatars.githubusercontent.com/u/27830765?v=4" width="100;" alt="MichaelFitzurka"/>
|
||||
<br />
|
||||
<sub><b>MichaelFitzurka</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/abuchanan920">
|
||||
<img src="https://avatars.githubusercontent.com/u/368793?v=4" width="100;" alt="abuchanan920"/>
|
||||
<br />
|
||||
<sub><b>abuchanan920</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/AlbanSeurat">
|
||||
<img src="https://avatars.githubusercontent.com/u/500180?v=4" width="100;" alt="AlbanSeurat"/>
|
||||
<br />
|
||||
<sub><b>AlbanSeurat</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/rhaussmann">
|
||||
<img src="https://avatars.githubusercontent.com/u/7084007?v=4" width="100;" alt="rhaussmann"/>
|
||||
<br />
|
||||
<sub><b>rhaussmann</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/jpcranford">
|
||||
<img src="https://avatars.githubusercontent.com/u/21347202?v=4" width="100;" alt="jpcranford"/>
|
||||
<br />
|
||||
<sub><b>jpcranford</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/PawlakMarek">
|
||||
<img src="https://avatars.githubusercontent.com/u/26022173?v=4" width="100;" alt="PawlakMarek"/>
|
||||
<br />
|
||||
<sub><b>PawlakMarek</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/DrMcCoy">
|
||||
<img src="https://avatars.githubusercontent.com/u/156130?v=4" width="100;" alt="DrMcCoy"/>
|
||||
<br />
|
||||
<sub><b>DrMcCoy</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
<tr>
|
||||
<td align="center">
|
||||
<a href="https://github.com/Xav83">
|
||||
<img src="https://avatars.githubusercontent.com/u/6787157?v=4" width="100;" alt="Xav83"/>
|
||||
<br />
|
||||
<sub><b>Xav83</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/thFrgttn">
|
||||
<img src="https://avatars.githubusercontent.com/u/39759781?v=4" width="100;" alt="thFrgttn"/>
|
||||
<br />
|
||||
<sub><b>thFrgttn</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center">
|
||||
<a href="https://github.com/tlc">
|
||||
<img src="https://avatars.githubusercontent.com/u/19436?v=4" width="100;" alt="tlc"/>
|
||||
<br />
|
||||
<sub><b>tlc</b></sub>
|
||||
</a>
|
||||
</td></tr>
|
||||
</table>
|
||||
<!-- readme: beville,davide-romanini,collaborators,contributors -end -->
|
||||
|
||||
18
appveyor.yml
18
appveyor.yml
@@ -1,18 +0,0 @@
|
||||
version: 1.0.{build}
|
||||
build_script:
|
||||
- cmd: powershell -exec bypass -File windows\fullbuild.ps1
|
||||
artifacts:
|
||||
- path: dist\*.zip
|
||||
name: ComicTagger
|
||||
deploy:
|
||||
description: New Windows Release
|
||||
provider: GitHub
|
||||
auth_token:
|
||||
secure: GftVXNVTfnfWxlCIWNsufQjgrLqGPbrhK7bddzZ+6onYbbx6vEKWku0QMZjcxq/D
|
||||
draft: true
|
||||
prerelease: false
|
||||
artifact: ComicTagger
|
||||
on:
|
||||
branch: master
|
||||
APPVEYOR_REPO_TAG: true
|
||||
|
||||
@@ -3,7 +3,7 @@ Encoding=UTF-8
|
||||
Name=ComicTagger
|
||||
GenericName=Comic Metadata Editor
|
||||
Comment=A cross-platform GUI/CLI app for writing metadata to comic archives
|
||||
Exec=%%CTSCRIPT%% %F
|
||||
Exec=comictagger %F
|
||||
Icon=/usr/local/share/comictagger/app.png
|
||||
Terminal=false
|
||||
Type=Application
|
||||
241
build-tools/comictagger.spec
Normal file
241
build-tools/comictagger.spec
Normal file
@@ -0,0 +1,241 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
import platform
|
||||
|
||||
from comictaggerlib import ctversion
|
||||
|
||||
enable_console = False
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(
|
||||
["../comictaggerlib/__main__.py"],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
exe_binaries = []
|
||||
exe_zipfiles = []
|
||||
exe_datas = []
|
||||
exe_exclude_binaries = True
|
||||
|
||||
coll_binaries = a.binaries
|
||||
coll_zipfiles = a.zipfiles
|
||||
coll_datas = a.datas
|
||||
|
||||
if platform.system() in ["Windows"]:
|
||||
enable_console = True
|
||||
exe_binaries = a.binaries
|
||||
exe_zipfiles = a.zipfiles
|
||||
exe_datas = a.datas
|
||||
exe_exclude_binaries = False
|
||||
|
||||
coll_binaries = []
|
||||
coll_zipfiles = []
|
||||
coll_datas = []
|
||||
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
a.scripts,
|
||||
exe_binaries,
|
||||
exe_zipfiles,
|
||||
exe_datas,
|
||||
[],
|
||||
exclude_binaries=exe_exclude_binaries,
|
||||
name="comictagger",
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=enable_console,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
icon="windows/app.ico",
|
||||
)
|
||||
if platform.system() not in ["Windows"]:
|
||||
coll = COLLECT(
|
||||
exe,
|
||||
coll_binaries,
|
||||
coll_zipfiles,
|
||||
coll_datas,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name="comictagger",
|
||||
)
|
||||
app = BUNDLE(
|
||||
coll,
|
||||
name="ComicTagger.app",
|
||||
icon="mac/app.icns",
|
||||
info_plist={
|
||||
"NSHighResolutionCapable": "True",
|
||||
"NSPrincipalClass": "NSApplication",
|
||||
"NSRequiresAquaSystemAppearance": "False",
|
||||
"CFBundleDisplayName": "ComicTagger",
|
||||
"CFBundleShortVersionString": ctversion.version,
|
||||
"CFBundleVersion": ctversion.version,
|
||||
"CFBundleDocumentTypes": [
|
||||
{
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
"LSItemContentTypes": [
|
||||
"public.folder",
|
||||
],
|
||||
"CFBundleTypeName": "Folder",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cbz",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "ZIP Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"public.zip-comic-archive",
|
||||
"com.simplecomic.cbz-archive",
|
||||
"com.macitbetter.cbz-archive",
|
||||
"public.cbz-archive",
|
||||
"cx.c3.cbz-archive",
|
||||
"com.yacreader.yacreader.cbz",
|
||||
"com.milke.cbz-archive",
|
||||
"com.bitcartel.comicbooklover.cbz",
|
||||
"public.archive.cbz",
|
||||
"public.zip-archive",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cb7",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "7-Zip Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"org.7-zip.7-zip-archive",
|
||||
"com.simplecomic.cb7-archive",
|
||||
"public.cb7-archive",
|
||||
"com.macitbetter.cb7-archive",
|
||||
"cx.c3.cb7-archive",
|
||||
"org.7-zip.7-zip-comic-archive",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
{
|
||||
"CFBundleTypeExtensions": [
|
||||
"cbr",
|
||||
],
|
||||
"LSTypeIsPackage": False,
|
||||
"NSPersistentStoreTypeKey": "Binary",
|
||||
"CFBundleTypeIconSystemGenerated": True,
|
||||
"CFBundleTypeName": "RAR Comic Archive",
|
||||
"LSItemContentTypes": [
|
||||
"com.rarlab.rar-archive",
|
||||
"com.rarlab.rar-comic-archive",
|
||||
"com.simplecomic.cbr-archive",
|
||||
"com.macitbetter.cbr-archive",
|
||||
"public.cbr-archive",
|
||||
"cx.c3.cbr-archive",
|
||||
"com.bitcartel.comicbooklover.cbr",
|
||||
"com.milke.cbr-archive",
|
||||
"public.archive.cbr",
|
||||
"com.yacreader.yacreader.cbr",
|
||||
],
|
||||
"CFBundleTypeRole": "Editor",
|
||||
"LSHandlerRank": "Default",
|
||||
},
|
||||
],
|
||||
"UTImportedTypeDeclarations": [
|
||||
{
|
||||
"UTTypeIdentifier": "com.rarlab.rar-archive",
|
||||
"UTTypeDescription": "RAR Archive",
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
],
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/x-rar",
|
||||
"application/x-rar-compressed",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"rar",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"com.rarlab.rar-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "com.rarlab.rar-comic-archive",
|
||||
"UTTypeDescription": "RAR Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/vnd.comicbook-rar",
|
||||
"application/x-cbr",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"cbr",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"public.zip-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "public.zip-comic-archive",
|
||||
"UTTypeDescription": "ZIP Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.filename-extension": [
|
||||
"cbz",
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
"UTTypeConformsTo": [
|
||||
"public.data",
|
||||
"public.archive",
|
||||
"org.7-zip.7-zip-archive",
|
||||
],
|
||||
"UTTypeIdentifier": "org.7-zip.7-zip-comic-archive",
|
||||
"UTTypeDescription": "7-Zip Comic Archive",
|
||||
"UTTypeTagSpecification": {
|
||||
"public.mime-type": [
|
||||
"application/vnd.comicbook+7-zip",
|
||||
"application/x-cb7-compressed",
|
||||
],
|
||||
"public.filename-extension": [
|
||||
"cb7",
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
bundle_identifier="com.comictagger",
|
||||
)
|
||||
24
build-tools/dmgbuild.conf
Normal file
24
build-tools/dmgbuild.conf
Normal file
@@ -0,0 +1,24 @@
|
||||
import pathlib
|
||||
import platform
|
||||
from comictaggerlib.ctversion import __version__
|
||||
|
||||
app = "ComicTagger"
|
||||
exe = app.casefold()
|
||||
ver = platform.mac_ver()
|
||||
os_version = f"osx-{ver[0]}-{ver[2]}"
|
||||
app_name = f"{app}.app"
|
||||
final_name = f"{app}-{__version__}-{os_version}"
|
||||
path = pathlib.Path(f"dist/{app_name}")
|
||||
zip_file = pathlib.Path(f"dist/{final_name}.zip")
|
||||
|
||||
format = 'ULMO'
|
||||
files = (str(path),)
|
||||
|
||||
symlinks = {'Applications': '/Applications'}
|
||||
|
||||
icon = pathlib.Path().cwd() / 'build-tools' / 'mac' / 'volume.icns'
|
||||
|
||||
icon_locations = {
|
||||
app_name: (100, 100),
|
||||
'Applications': (300, 100)
|
||||
}
|
||||
20
build-tools/generate_settngs.py
Normal file
20
build-tools/generate_settngs.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pathlib
|
||||
|
||||
import settngs
|
||||
|
||||
import comictaggerlib.main
|
||||
|
||||
|
||||
def generate() -> str:
|
||||
app = comictaggerlib.main.App()
|
||||
app.load_plugins(app.initial_arg_parser.parse_known_args()[0])
|
||||
app.register_settings()
|
||||
return settngs.generate_ns(app.manager.definitions)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
src = generate()
|
||||
pathlib.Path("./comictaggerlib/ctsettings/settngs_namespace.py").write_text(src)
|
||||
print(src, end="")
|
||||
33
build-tools/get_appimage.py
Normal file
33
build-tools/get_appimage.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import pathlib
|
||||
import stat
|
||||
|
||||
import requests
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("APPIMAGETOOL", default="build/appimagetool-x86_64.AppImage", type=pathlib.Path, nargs="?")
|
||||
|
||||
opts = parser.parse_args()
|
||||
opts.APPIMAGETOOL = opts.APPIMAGETOOL.absolute()
|
||||
|
||||
|
||||
def urlretrieve(url: str, dest: pathlib.Path) -> None:
|
||||
resp = requests.get(url)
|
||||
if resp.status_code == 200:
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.write_bytes(resp.content)
|
||||
|
||||
|
||||
if opts.APPIMAGETOOL.exists():
|
||||
raise SystemExit(0)
|
||||
|
||||
urlretrieve(
|
||||
"https://github.com/AppImage/AppImageKit/releases/latest/download/appimagetool-x86_64.AppImage", opts.APPIMAGETOOL
|
||||
)
|
||||
os.chmod(opts.APPIMAGETOOL, stat.S_IRWXU)
|
||||
|
||||
if not opts.APPIMAGETOOL.exists():
|
||||
raise SystemExit(1)
|
||||
@@ -1,11 +1,9 @@
|
||||
#PYINSTALLER_CMD := VERSIONER_PYTHON_PREFER_32_BIT=yes arch -i386 python $(HOME)/pyinstaller-2.0/pyinstaller.py
|
||||
#PYINSTALLER_CMD := python $(HOME)/pyinstaller-2.0/pyinstaller.py
|
||||
PYINSTALLER_CMD := pyinstaller
|
||||
TAGGER_BASE ?= ../
|
||||
TAGGER_SRC := $(TAGGER_BASE)/comictaggerlib
|
||||
|
||||
APP_NAME := ComicTagger
|
||||
VERSION_STR := $(shell grep version $(TAGGER_SRC)/ctversion.py| cut -d= -f2 | sed 's/\"//g')
|
||||
VERSION_STR := $(shell cd .. && python setup.py --version)
|
||||
|
||||
MAC_BASE := $(TAGGER_BASE)/mac
|
||||
DIST_DIR := $(MAC_BASE)/dist
|
||||
@@ -17,22 +15,12 @@ DMG_FILE := $(VOLUME_NAME).dmg
|
||||
all: clean dist diskimage
|
||||
|
||||
dist:
|
||||
#$(PYINSTALLER_CMD) $(TAGGER_BASE)/comictagger.py -o $(MAC_BASE) -w -n $(APP_NAME) -s
|
||||
$(PYINSTALLER_CMD) $(TAGGER_BASE)/comictagger.py -w -n $(APP_NAME) -s
|
||||
$(PYINSTALLER_CMD) $(TAGGER_BASE)/comictagger.py -w -n $(APP_NAME) -s
|
||||
cp -a $(TAGGER_SRC)/ui $(APP_BUNDLE)/Contents/MacOS
|
||||
cp -a $(TAGGER_SRC)/graphics $(APP_BUNDLE)/Contents/MacOS
|
||||
cp $(MAC_BASE)/libunrar.so $(APP_BUNDLE)/Contents/MacOS
|
||||
cp $(MAC_BASE)/app.icns $(APP_BUNDLE)/Contents/Resources/icon-windowed.icns
|
||||
# fix the version string in the Info.plist
|
||||
sed -i -e 's/0\.0\.0/$(VERSION_STR)/' $(MAC_BASE)/dist/ComicTagger.app/Contents/Info.plist
|
||||
# strip out PPC/x64
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/accessible
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/bearer
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/codecs
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/graphicssystems
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/iconengines
|
||||
#./make_thin.sh dist/ComicTagger.app/Contents/MacOS/qt4_plugins/imageformats
|
||||
|
||||
clean:
|
||||
rm -rf $(DIST_DIR) $(MAC_BASE)/build
|
||||
@@ -42,7 +30,7 @@ clean:
|
||||
rm -f raw*.dmg
|
||||
echo $(VERSION_STR)
|
||||
diskimage:
|
||||
#Set up disk image staging folder
|
||||
# Set up disk image staging folder
|
||||
rm -rf $(STAGING)
|
||||
mkdir $(STAGING)
|
||||
cp $(TAGGER_BASE)/release_notes.txt $(STAGING)
|
||||
@@ -51,28 +39,27 @@ diskimage:
|
||||
cp $(MAC_BASE)/volume.icns $(STAGING)/.VolumeIcon.icns
|
||||
SetFile -c icnC $(STAGING)/.VolumeIcon.icns
|
||||
|
||||
##generate raw disk image
|
||||
# generate raw disk image
|
||||
rm -f $(DMG_FILE)
|
||||
hdiutil create -srcfolder $(STAGING) -volname $(VOLUME_NAME) -format UDRW -ov raw-$(DMG_FILE)
|
||||
hdiutil create -srcfolder $(STAGING) -volname $(VOLUME_NAME) -format UDRW -ov raw-$(DMG_FILE)
|
||||
|
||||
#remove working files and folders
|
||||
# remove working files and folders
|
||||
rm -rf $(STAGING)
|
||||
|
||||
|
||||
# we now have a raw DMG file.
|
||||
|
||||
|
||||
# remount it so we can set the volume icon properly
|
||||
mkdir -p $(STAGING)
|
||||
hdiutil attach raw-$(DMG_FILE) -mountpoint $(STAGING)
|
||||
SetFile -a C $(STAGING)
|
||||
hdiutil detach $(STAGING)
|
||||
rm -rf $(STAGING)
|
||||
|
||||
|
||||
# convert the raw image
|
||||
rm -f $(DMG_FILE)
|
||||
hdiutil convert raw-$(DMG_FILE) -format UDZO -o $(DMG_FILE)
|
||||
rm -f raw-$(DMG_FILE)
|
||||
|
||||
#move finished product to release folder
|
||||
|
||||
# move finished product to release folder
|
||||
mkdir -p $(TAGGER_BASE)/release
|
||||
mv $(DMG_FILE) $(TAGGER_BASE)/release
|
||||
|
||||
@@ -8,12 +8,12 @@ do
|
||||
then
|
||||
echo "Fat Binary: $FILE"
|
||||
mkdir -p thin
|
||||
lipo -thin i386 -output thin/$FILE $BINFOLDER/$FILE
|
||||
lipo -thin i386 -output thin/$FILE $BINFOLDER/$FILE
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -d thin ]
|
||||
then
|
||||
then
|
||||
mv thin/* $BINFOLDER
|
||||
else
|
||||
echo No files to lipo
|
||||
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 62 KiB |
88
build-tools/zip_artifacts.py
Normal file
88
build-tools/zip_artifacts.py
Normal file
@@ -0,0 +1,88 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import sys
|
||||
import tarfile
|
||||
import zipfile
|
||||
|
||||
from comictaggerlib.ctversion import __version__
|
||||
|
||||
|
||||
def addToZip(zf: zipfile.ZipFile, path: str, zippath: str) -> None:
|
||||
if os.path.isfile(path):
|
||||
zf.write(path, zippath)
|
||||
elif os.path.isdir(path):
|
||||
if zippath:
|
||||
zf.write(path, zippath)
|
||||
for nm in sorted(os.listdir(path)):
|
||||
addToZip(zf, os.path.join(path, nm), os.path.join(zippath, nm))
|
||||
|
||||
|
||||
def Zip(zip_file: pathlib.Path, path: pathlib.Path) -> None:
|
||||
zip_file.unlink(missing_ok=True)
|
||||
with zipfile.ZipFile(f"{zip_file}.zip", "w", compression=zipfile.ZIP_DEFLATED, compresslevel=8) as zf:
|
||||
zippath = os.path.basename(path)
|
||||
if not zippath:
|
||||
zippath = os.path.basename(os.path.dirname(path))
|
||||
if zippath in ("", os.curdir, os.pardir):
|
||||
zippath = ""
|
||||
addToZip(zf, str(path), zippath)
|
||||
|
||||
|
||||
def addToTar(tf: tarfile.TarFile, path: str, zippath: str) -> None:
|
||||
if os.path.isfile(path):
|
||||
tf.add(path, zippath)
|
||||
elif os.path.isdir(path):
|
||||
if zippath:
|
||||
tf.add(path, zippath, recursive=False)
|
||||
for nm in sorted(os.listdir(path)):
|
||||
addToTar(tf, os.path.join(path, nm), os.path.join(zippath, nm))
|
||||
|
||||
|
||||
def Tar(tar_file: pathlib.Path, path: pathlib.Path) -> None:
|
||||
tar_file.unlink(missing_ok=True)
|
||||
with tarfile.open(f"{tar_file}.tar.gz", "w:gz") as tf:
|
||||
zippath = os.path.basename(path)
|
||||
if not zippath:
|
||||
zippath = os.path.basename(os.path.dirname(path))
|
||||
if zippath in ("", os.curdir, os.pardir):
|
||||
zippath = ""
|
||||
addToTar(tf, str(path), zippath)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = "ComicTagger"
|
||||
exe = app.casefold()
|
||||
if platform.system() == "Windows":
|
||||
os_version = f"win-{platform.machine()}"
|
||||
app_name = f"{exe}.exe"
|
||||
final_name = f"{app}-{__version__}-{os_version}.exe"
|
||||
elif platform.system() == "Darwin":
|
||||
ver = platform.mac_ver()
|
||||
os_version = f"osx-{ver[0]}-{ver[2]}"
|
||||
app_name = f"{app}.app"
|
||||
final_name = f"{app}-{__version__}-{os_version}"
|
||||
else:
|
||||
app_name = exe
|
||||
final_name = f"ComicTagger-{__version__}-{platform.system()}"
|
||||
|
||||
path = pathlib.Path(f"dist/{app_name}")
|
||||
zip_file = pathlib.Path(f"dist/{final_name}")
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
from dmgbuild.__main__ import main as dmg_main
|
||||
|
||||
sys.argv = [
|
||||
"zip_artifacts",
|
||||
"-s",
|
||||
str(pathlib.Path(__file__).parent / "dmgbuild.conf"),
|
||||
f"{app} {__version__}",
|
||||
f"dist/{final_name}.dmg",
|
||||
]
|
||||
dmg_main()
|
||||
elif platform.system() == "Windows":
|
||||
Zip(zip_file, path)
|
||||
else:
|
||||
Tar(zip_file, path)
|
||||
@@ -1 +1,3 @@
|
||||
__author__ = 'dromanin'
|
||||
from __future__ import annotations
|
||||
|
||||
__author__ = "dromanin"
|
||||
|
||||
7
comicapi/__pyinstaller/__init__.py
Normal file
7
comicapi/__pyinstaller/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def get_hook_dirs() -> list[str]:
|
||||
return [os.path.dirname(__file__)]
|
||||
10
comicapi/__pyinstaller/hook-comicapi.py
Normal file
10
comicapi/__pyinstaller/hook-comicapi.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from PyInstaller.utils.hooks import collect_data_files, collect_entry_point
|
||||
|
||||
datas, hiddenimports = collect_entry_point("comicapi.archiver")
|
||||
mdatas, mhiddenimports = collect_entry_point("comicapi.metadata")
|
||||
|
||||
hiddenimports += mhiddenimports
|
||||
datas += mdatas
|
||||
datas += collect_data_files("comicapi.data")
|
||||
13
comicapi/archivers/__init__.py
Normal file
13
comicapi/archivers/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.archivers.archiver import Archiver
|
||||
from comicapi.archivers.folder import FolderArchiver
|
||||
from comicapi.archivers.zip import ZipArchiver
|
||||
|
||||
|
||||
class UnknownArchiver(Archiver):
|
||||
def name(self) -> str:
|
||||
return "Unknown"
|
||||
|
||||
|
||||
__all__ = ["Archiver", "UnknownArchiver", "FolderArchiver", "ZipArchiver"]
|
||||
137
comicapi/archivers/archiver.py
Normal file
137
comicapi/archivers/archiver.py
Normal file
@@ -0,0 +1,137 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pathlib
|
||||
from typing import Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Archiver(Protocol):
|
||||
"""Archiver Protocol"""
|
||||
|
||||
"""The path to the archive"""
|
||||
path: pathlib.Path
|
||||
|
||||
"""
|
||||
The name of the executable used for this archiver. This should be the base name of the executable.
|
||||
For example if 'rar.exe' is needed this should be "rar".
|
||||
If an executable is not used this should be the empty string.
|
||||
"""
|
||||
exe: str = ""
|
||||
|
||||
"""
|
||||
Whether or not this archiver is enabled.
|
||||
If external imports are required and are not available this should be false. See rar.py and sevenzip.py.
|
||||
"""
|
||||
enabled: bool = True
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.path = pathlib.Path()
|
||||
|
||||
def get_comment(self) -> str:
|
||||
"""
|
||||
Returns the comment from the current archive as a string.
|
||||
Should always return a string. If comments are not supported in the archive the empty string should be returned.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
"""
|
||||
Returns True if the comment was successfully set on the current archive.
|
||||
Should always return a boolean. If comments are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
"""
|
||||
Returns True if the current archive supports comments.
|
||||
Should always return a boolean. If comments are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
"""
|
||||
Reads the named file from the current archive.
|
||||
archive_file should always come from the output of get_filename_list.
|
||||
Should always return a bytes object. Exceptions should be of the type OSError.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
"""
|
||||
Removes the named file from the current archive.
|
||||
archive_file should always come from the output of get_filename_list.
|
||||
Should always return a boolean. Failures should return False.
|
||||
|
||||
Rebuilding the archive without the named file is a standard way to remove a file.
|
||||
"""
|
||||
return False
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
"""
|
||||
Writes the named file to the current archive.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
"""
|
||||
Returns a list of filenames in the current archive.
|
||||
Should always return a list of string. Failures should return an empty list.
|
||||
"""
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
"""
|
||||
Returns True if the current archive supports arbitrary non-picture files.
|
||||
Should always return a boolean.
|
||||
If arbitrary non-picture files are not supported in the archive False should be returned.
|
||||
"""
|
||||
return False
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""
|
||||
Copies the contents of another achive to the current archive.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
"""
|
||||
Retuns True if the current archive is writeable
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def extension(self) -> str:
|
||||
"""
|
||||
Returns the extension that this archiver should use eg ".cbz".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def name(self) -> str:
|
||||
"""
|
||||
Returns the name of this archiver for display purposes eg "CBZ".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
"""
|
||||
Returns True if the given path can be opened by this archiver.
|
||||
Should always return a boolean. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def open(cls, path: pathlib.Path) -> Archiver:
|
||||
"""
|
||||
Opens the given archive.
|
||||
Should always return a an Archver.
|
||||
Should never cause an exception no file operations should take place in this method,
|
||||
is_valid will always be called before open.
|
||||
"""
|
||||
archiver = cls()
|
||||
archiver.path = path
|
||||
return archiver
|
||||
104
comicapi/archivers/folder.py
Normal file
104
comicapi/archivers/folder.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FolderArchiver(Archiver):
|
||||
"""Folder implementation"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.comment_file_name = "ComicTaggerFolderComment.txt"
|
||||
|
||||
def get_comment(self) -> str:
|
||||
try:
|
||||
return (self.path / self.comment_file_name).read_text()
|
||||
except OSError:
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
if (self.path / self.comment_file_name).exists() or comment:
|
||||
return self.write_file(self.comment_file_name, comment.encode("utf-8"))
|
||||
return True
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
try:
|
||||
data = (self.path / archive_file).read_bytes()
|
||||
except OSError as e:
|
||||
logger.error("Error reading folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
try:
|
||||
(self.path / archive_file).unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.error("Error removing file for folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
try:
|
||||
file_path = self.path / archive_file
|
||||
file_path.parent.mkdir(exist_ok=True, parents=True)
|
||||
with open(self.path / archive_file, mode="wb") as f:
|
||||
f.write(data)
|
||||
except OSError as e:
|
||||
logger.error("Error writing folder archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
filenames = []
|
||||
try:
|
||||
for root, _dirs, files in os.walk(self.path):
|
||||
for f in files:
|
||||
filenames.append(os.path.relpath(os.path.join(root, f), self.path).replace(os.path.sep, "/"))
|
||||
return filenames
|
||||
except OSError as e:
|
||||
logger.error("Error listing files in folder archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
try:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
self.write_file(filename, data)
|
||||
|
||||
# preserve the old comment
|
||||
comment = other_archive.get_comment()
|
||||
if comment is not None:
|
||||
if not self.set_comment(comment):
|
||||
return False
|
||||
except Exception:
|
||||
logger.exception("Error while copying archive from %s to %s", other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def name(self) -> str:
|
||||
return "Folder"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return path.is_dir()
|
||||
312
comicapi/archivers/rar.py
Normal file
312
comicapi/archivers/rar.py
Normal file
@@ -0,0 +1,312 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
try:
|
||||
import rarfile
|
||||
|
||||
rar_support = True
|
||||
except ImportError:
|
||||
rar_support = False
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if not rar_support:
|
||||
logger.error("rar unavailable")
|
||||
|
||||
|
||||
class RarArchiver(Archiver):
|
||||
"""RAR implementation"""
|
||||
|
||||
enabled = rar_support
|
||||
exe = "rar"
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
# windows only, keeps the cmd.exe from popping up
|
||||
if platform.system() == "Windows":
|
||||
self.startupinfo = subprocess.STARTUPINFO() # type: ignore
|
||||
self.startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
|
||||
else:
|
||||
self.startupinfo = None
|
||||
|
||||
def get_comment(self) -> str:
|
||||
rarc = self.get_rar_obj()
|
||||
return (rarc.comment if rarc else "") or ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
if rar_support and self.exe:
|
||||
try:
|
||||
# write comment to temp file
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
tmp_file = pathlib.Path(tmp_dir) / "rar_comment.txt"
|
||||
tmp_file.write_text(comment, encoding="utf-8")
|
||||
|
||||
working_dir = os.path.dirname(os.path.abspath(self.path))
|
||||
|
||||
# use external program to write comment to Rar archive
|
||||
proc_args = [
|
||||
self.exe,
|
||||
"c",
|
||||
f"-w{working_dir}",
|
||||
"-c-",
|
||||
f"-z{tmp_file}",
|
||||
str(self.path),
|
||||
]
|
||||
result = subprocess.run(
|
||||
proc_args,
|
||||
startupinfo=self.startupinfo,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
cwd=tmp_dir,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error writing comment to rar archive [exitcode: %d]: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
time.sleep(1)
|
||||
except OSError as e:
|
||||
logger.exception("Error writing comment to rar archive [%s]: %s", e, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
rarc = self.get_rar_obj()
|
||||
if rarc is None:
|
||||
return b""
|
||||
|
||||
tries = 0
|
||||
while tries < 7:
|
||||
try:
|
||||
tries = tries + 1
|
||||
data: bytes = rarc.open(archive_file).read()
|
||||
entries = [(rarc.getinfo(archive_file), data)]
|
||||
|
||||
if entries[0][0].file_size != len(entries[0][1]):
|
||||
logger.info(
|
||||
"Error reading rar archive [file is not expected size: %d vs %d] %s :: %s :: tries #%d",
|
||||
entries[0][0].file_size,
|
||||
len(entries[0][1]),
|
||||
self.path,
|
||||
archive_file,
|
||||
tries,
|
||||
)
|
||||
continue
|
||||
|
||||
except OSError as e:
|
||||
logger.error("Error reading rar archive [%s]: %s :: %s :: tries #%d", e, self.path, archive_file, tries)
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Unexpected exception reading rar archive [%s]: %s :: %s :: tries #%d",
|
||||
e,
|
||||
self.path,
|
||||
archive_file,
|
||||
tries,
|
||||
)
|
||||
break
|
||||
|
||||
else:
|
||||
# Success. Entries is a list of of tuples: ( rarinfo, filedata)
|
||||
if len(entries) == 1:
|
||||
return entries[0][1]
|
||||
|
||||
raise OSError
|
||||
|
||||
raise OSError
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
if self.exe:
|
||||
# use external program to remove file from Rar archive
|
||||
result = subprocess.run(
|
||||
[self.exe, "d", "-c-", self.path, archive_file],
|
||||
startupinfo=self.startupinfo,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
cwd=self.path.absolute().parent,
|
||||
)
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
time.sleep(1)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error removing file from rar archive [exitcode: %d]: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
archive_file,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
if self.exe:
|
||||
archive_path = pathlib.PurePosixPath(archive_file)
|
||||
archive_name = archive_path.name
|
||||
archive_parent = str(archive_path.parent).lstrip("./")
|
||||
|
||||
# use external program to write file to Rar archive
|
||||
result = subprocess.run(
|
||||
[self.exe, "a", f"-si{archive_name}", f"-ap{archive_parent}", "-c-", "-ep", self.path],
|
||||
input=data,
|
||||
startupinfo=self.startupinfo,
|
||||
capture_output=True,
|
||||
cwd=self.path.absolute().parent,
|
||||
)
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
time.sleep(1)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error writing rar archive [exitcode: %d]: %s :: %s :: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
archive_file,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
rarc = self.get_rar_obj()
|
||||
tries = 0
|
||||
if rar_support and rarc:
|
||||
while tries < 7:
|
||||
try:
|
||||
tries = tries + 1
|
||||
namelist = []
|
||||
for item in rarc.infolist():
|
||||
if item.file_size != 0:
|
||||
namelist.append(item.filename)
|
||||
|
||||
except OSError as e:
|
||||
logger.error("Error listing files in rar archive [%s]: %s :: attempt #%d", e, self.path, tries)
|
||||
time.sleep(1)
|
||||
|
||||
else:
|
||||
return namelist
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current archive with one copied from another archive"""
|
||||
try:
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
tmp_path = pathlib.Path(tmp_dir)
|
||||
rar_cwd = tmp_path / "rar"
|
||||
rar_cwd.mkdir(exist_ok=True)
|
||||
rar_path = (tmp_path / self.path.name).with_suffix(".rar")
|
||||
|
||||
for filename in other_archive.get_filename_list():
|
||||
(rar_cwd / filename).parent.mkdir(exist_ok=True, parents=True)
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
with open(rar_cwd / filename, mode="w+b") as tmp_file:
|
||||
tmp_file.write(data)
|
||||
result = subprocess.run(
|
||||
[self.exe, "a", "-r", "-c-", str(rar_path.absolute()), "."],
|
||||
cwd=rar_cwd.absolute(),
|
||||
startupinfo=self.startupinfo,
|
||||
stdin=subprocess.DEVNULL,
|
||||
capture_output=True,
|
||||
encoding="utf-8",
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(
|
||||
"Error while copying to rar archive [exitcode: %d]: %s: %s",
|
||||
result.returncode,
|
||||
self.path,
|
||||
result.stderr,
|
||||
)
|
||||
return False
|
||||
|
||||
self.path.unlink(missing_ok=True)
|
||||
shutil.move(rar_path, self.path)
|
||||
except Exception as e:
|
||||
logger.exception("Error while copying to rar archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
try:
|
||||
if bool(self.exe and (os.path.exists(self.exe) or shutil.which(self.exe))):
|
||||
return (
|
||||
subprocess.run(
|
||||
(self.exe,),
|
||||
startupinfo=self.startupinfo,
|
||||
capture_output=True,
|
||||
cwd=self.path.absolute().parent,
|
||||
)
|
||||
.stdout.strip()
|
||||
.startswith(b"RAR")
|
||||
)
|
||||
except OSError:
|
||||
...
|
||||
return False
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cbr"
|
||||
|
||||
def name(self) -> str:
|
||||
return "RAR"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
if rar_support:
|
||||
# Try using exe
|
||||
orig = rarfile.UNRAR_TOOL
|
||||
rarfile.UNRAR_TOOL = cls.exe
|
||||
try:
|
||||
return rarfile.is_rarfile(str(path)) and rarfile.tool_setup(sevenzip=False, sevenzip2=False, force=True)
|
||||
except rarfile.RarCannotExec:
|
||||
rarfile.UNRAR_TOOL = orig
|
||||
|
||||
# Fallback to standard
|
||||
try:
|
||||
return rarfile.is_rarfile(str(path)) and rarfile.tool_setup(force=True)
|
||||
except rarfile.RarCannotExec as e:
|
||||
logger.info(e)
|
||||
return False
|
||||
|
||||
def get_rar_obj(self) -> rarfile.RarFile | None:
|
||||
if rar_support:
|
||||
try:
|
||||
rarc = rarfile.RarFile(str(self.path))
|
||||
except (OSError, rarfile.RarFileError) as e:
|
||||
logger.error("Unable to get rar object [%s]: %s", e, self.path)
|
||||
else:
|
||||
return rarc
|
||||
|
||||
return None
|
||||
134
comicapi/archivers/sevenzip.py
Normal file
134
comicapi/archivers/sevenzip.py
Normal file
@@ -0,0 +1,134 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
try:
|
||||
import py7zr
|
||||
|
||||
z7_support = True
|
||||
except ImportError:
|
||||
z7_support = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SevenZipArchiver(Archiver):
|
||||
"""7Z implementation"""
|
||||
|
||||
enabled = z7_support
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
# @todo: Implement Comment?
|
||||
def get_comment(self) -> str:
|
||||
return ""
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
return False
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
data = b""
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "r") as zf:
|
||||
data = zf.read(archive_file)[archive_file].read()
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error reading 7zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
return self.rebuild([archive_file])
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
# At the moment, no other option but to rebuild the whole
|
||||
# archive w/o the indicated file. Very sucky, but maybe
|
||||
# another solution can be found
|
||||
files = self.get_filename_list()
|
||||
if archive_file in files:
|
||||
if not self.rebuild([archive_file]):
|
||||
return False
|
||||
|
||||
try:
|
||||
# now just add the archive file as a new one
|
||||
with py7zr.SevenZipFile(self.path, "a") as zf:
|
||||
zf.writestr(data, archive_file)
|
||||
return True
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error writing 7zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "r") as zf:
|
||||
namelist: list[str] = [file.filename for file in zf.list() if not file.is_directory]
|
||||
|
||||
return namelist
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error listing files in 7zip archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def rebuild(self, exclude_list: list[str]) -> bool:
|
||||
"""Zip helper func
|
||||
|
||||
This recompresses the zip archive, without the files in the exclude_list
|
||||
"""
|
||||
|
||||
try:
|
||||
# py7zr treats all archives as if they used solid compression
|
||||
# so we need to get the filename list first to read all the files at once
|
||||
with py7zr.SevenZipFile(self.path, mode="r") as zin:
|
||||
targets = [f for f in zin.getnames() if f not in exclude_list]
|
||||
with tempfile.NamedTemporaryFile(dir=os.path.dirname(self.path), delete=False) as tmp_file:
|
||||
with py7zr.SevenZipFile(tmp_file.file, mode="w") as zout:
|
||||
with py7zr.SevenZipFile(self.path, mode="r") as zin:
|
||||
for filename, buffer in zin.read(targets).items():
|
||||
zout.writef(buffer, filename)
|
||||
|
||||
self.path.unlink(missing_ok=True)
|
||||
tmp_file.close() # Required on windows
|
||||
|
||||
shutil.move(tmp_file.name, self.path)
|
||||
except (py7zr.Bad7zFile, OSError) as e:
|
||||
logger.error("Error rebuilding 7zip file [%s]: %s", e, self.path)
|
||||
return False
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "w") as zout:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(
|
||||
filename
|
||||
) # This will be very inefficient if other_archive is a 7z file
|
||||
if data is not None:
|
||||
zout.writestr(data, filename)
|
||||
except Exception as e:
|
||||
logger.error("Error while copying to 7zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cb7"
|
||||
|
||||
def name(self) -> str:
|
||||
return "Seven Zip"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return py7zr.is_7zfile(path)
|
||||
204
comicapi/archivers/zip.py
Normal file
204
comicapi/archivers/zip.py
Normal file
@@ -0,0 +1,204 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import struct
|
||||
import tempfile
|
||||
import zipfile
|
||||
from typing import cast
|
||||
|
||||
import chardet
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ZipArchiver(Archiver):
|
||||
"""ZIP implementation"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def supports_comment(self) -> bool:
|
||||
return True
|
||||
|
||||
def get_comment(self) -> str:
|
||||
with zipfile.ZipFile(self.path, "r") as zf:
|
||||
encoding = chardet.detect(zf.comment, True)
|
||||
if encoding["confidence"] > 60:
|
||||
try:
|
||||
comment = zf.comment.decode(encoding["encoding"])
|
||||
except UnicodeDecodeError:
|
||||
comment = zf.comment.decode("utf-8", errors="replace")
|
||||
else:
|
||||
comment = zf.comment.decode("utf-8", errors="replace")
|
||||
return comment
|
||||
|
||||
def set_comment(self, comment: str) -> bool:
|
||||
with zipfile.ZipFile(self.path, mode="a") as zf:
|
||||
zf.comment = bytes(comment, "utf-8")
|
||||
return True
|
||||
|
||||
def read_file(self, archive_file: str) -> bytes:
|
||||
with zipfile.ZipFile(self.path, mode="r") as zf:
|
||||
try:
|
||||
data = zf.read(archive_file)
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error reading zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
return data
|
||||
|
||||
def remove_file(self, archive_file: str) -> bool:
|
||||
return self.rebuild([archive_file])
|
||||
|
||||
def write_file(self, archive_file: str, data: bytes) -> bool:
|
||||
# At the moment, no other option but to rebuild the whole
|
||||
# zip archive w/o the indicated file. Very sucky, but maybe
|
||||
# another solution can be found
|
||||
files = self.get_filename_list()
|
||||
if archive_file in files:
|
||||
if not self.rebuild([archive_file]):
|
||||
return False
|
||||
|
||||
try:
|
||||
# now just add the archive file as a new one
|
||||
with zipfile.ZipFile(self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED) as zf:
|
||||
zf.writestr(archive_file, data)
|
||||
return True
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error writing zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
return False
|
||||
|
||||
def get_filename_list(self) -> list[str]:
|
||||
try:
|
||||
with zipfile.ZipFile(self.path, mode="r") as zf:
|
||||
namelist = [file.filename for file in zf.infolist() if not file.is_dir()]
|
||||
return namelist
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error listing files in zip archive [%s]: %s", e, self.path)
|
||||
return []
|
||||
|
||||
def supports_files(self) -> bool:
|
||||
return True
|
||||
|
||||
def rebuild(self, exclude_list: list[str]) -> bool:
|
||||
"""Zip helper func
|
||||
|
||||
This recompresses the zip archive, without the files in the exclude_list
|
||||
"""
|
||||
try:
|
||||
with zipfile.ZipFile(
|
||||
tempfile.NamedTemporaryFile(dir=os.path.dirname(self.path), delete=False), "w", allowZip64=True
|
||||
) as zout:
|
||||
with zipfile.ZipFile(self.path, mode="r") as zin:
|
||||
for item in zin.infolist():
|
||||
buffer = zin.read(item.filename)
|
||||
if item.filename not in exclude_list:
|
||||
zout.writestr(item, buffer)
|
||||
|
||||
# preserve the old comment
|
||||
zout.comment = zin.comment
|
||||
|
||||
# replace with the new file
|
||||
self.path.unlink(missing_ok=True)
|
||||
zout.close() # Required on windows
|
||||
|
||||
shutil.move(cast(str, zout.filename), self.path)
|
||||
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.error("Error rebuilding zip file [%s]: %s", e, self.path)
|
||||
return False
|
||||
return True
|
||||
|
||||
def copy_from_archive(self, other_archive: Archiver) -> bool:
|
||||
"""Replace the current zip with one copied from another archive"""
|
||||
try:
|
||||
with zipfile.ZipFile(self.path, mode="w", allowZip64=True) as zout:
|
||||
for filename in other_archive.get_filename_list():
|
||||
data = other_archive.read_file(filename)
|
||||
if data is not None:
|
||||
zout.writestr(filename, data)
|
||||
|
||||
# preserve the old comment
|
||||
comment = other_archive.get_comment()
|
||||
if comment is not None:
|
||||
if not self.write_zip_comment(self.path, comment):
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Error while copying to zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_writable(self) -> bool:
|
||||
return True
|
||||
|
||||
def extension(self) -> str:
|
||||
return ".cbz"
|
||||
|
||||
def name(self) -> str:
|
||||
return "ZIP"
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
return zipfile.is_zipfile(path)
|
||||
|
||||
def write_zip_comment(self, filename: pathlib.Path | str, comment: str) -> bool:
|
||||
"""
|
||||
This is a custom function for writing a comment to a zip file,
|
||||
since the built-in one doesn't seem to work on Windows and Mac OS/X
|
||||
|
||||
Fortunately, the zip comment is at the end of the file, and it's
|
||||
easy to manipulate. See this website for more info:
|
||||
see: http://en.wikipedia.org/wiki/Zip_(file_format)#Structure
|
||||
"""
|
||||
|
||||
# get file size
|
||||
statinfo = os.stat(filename)
|
||||
file_length = statinfo.st_size
|
||||
|
||||
try:
|
||||
with open(filename, mode="r+b") as file:
|
||||
# the starting position, relative to EOF
|
||||
pos = -4
|
||||
found = False
|
||||
|
||||
# walk backwards to find the "End of Central Directory" record
|
||||
while (not found) and (-pos != file_length):
|
||||
# seek, relative to EOF
|
||||
file.seek(pos, 2)
|
||||
value = file.read(4)
|
||||
|
||||
# look for the end of central directory signature
|
||||
if bytearray(value) == bytearray([0x50, 0x4B, 0x05, 0x06]):
|
||||
found = True
|
||||
else:
|
||||
# not found, step back another byte
|
||||
pos = pos - 1
|
||||
|
||||
if found:
|
||||
# now skip forward 20 bytes to the comment length word
|
||||
pos += 20
|
||||
file.seek(pos, 2)
|
||||
|
||||
# Pack the length of the comment string
|
||||
fmt = "H" # one 2-byte integer
|
||||
comment_length = struct.pack(fmt, len(comment)) # pack integer in a binary string
|
||||
|
||||
# write out the length
|
||||
file.write(comment_length)
|
||||
file.seek(pos + 2, 2)
|
||||
|
||||
# write out the comment itself
|
||||
file.write(comment.encode("utf-8"))
|
||||
file.truncate()
|
||||
else:
|
||||
raise Exception("Could not find the End of Central Directory record!")
|
||||
except Exception as e:
|
||||
logger.error("Error writing comment to zip archive [%s]: %s", e, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@@ -1,276 +0,0 @@
|
||||
"""A class to encapsulate CoMet data"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
#from datetime import datetime
|
||||
#from pprint import pprint
|
||||
#import zipfile
|
||||
|
||||
from .genericmetadata import GenericMetadata
|
||||
from . import utils
|
||||
|
||||
|
||||
class CoMet:
|
||||
|
||||
writer_synonyms = ['writer', 'plotter', 'scripter']
|
||||
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
|
||||
inker_synonyms = ['inker', 'artist', 'finishes']
|
||||
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
|
||||
letterer_synonyms = ['letterer']
|
||||
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
|
||||
editor_synonyms = ['editor']
|
||||
|
||||
def metadataFromString(self, string):
|
||||
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
return self.convertXMLToMetadata(tree)
|
||||
|
||||
def stringFromMetadata(self, metadata):
|
||||
|
||||
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
|
||||
tree = self.convertMetadataToXML(self, metadata)
|
||||
return header + ET.tostring(tree.getroot())
|
||||
|
||||
def indent(self, elem, level=0):
|
||||
# for making the XML output readable
|
||||
i = "\n" + level * " "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
self.indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def convertMetadataToXML(self, filename, metadata):
|
||||
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
# build a tree structure
|
||||
root = ET.Element("comet")
|
||||
root.attrib['xmlns:comet'] = "http://www.denvog.com/comet/"
|
||||
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib[
|
||||
'xsi:schemaLocation'] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
|
||||
|
||||
# helper func
|
||||
def assign(comet_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, comet_entry).text = "{0}".format(md_entry)
|
||||
|
||||
# title is manditory
|
||||
if md.title is None:
|
||||
md.title = ""
|
||||
assign('title', md.title)
|
||||
assign('series', md.series)
|
||||
assign('issue', md.issue) # must be int??
|
||||
assign('volume', md.volume)
|
||||
assign('description', md.comments)
|
||||
assign('publisher', md.publisher)
|
||||
assign('pages', md.pageCount)
|
||||
assign('format', md.format)
|
||||
assign('language', md.language)
|
||||
assign('rating', md.maturityRating)
|
||||
assign('price', md.price)
|
||||
assign('isVersionOf', md.isVersionOf)
|
||||
assign('rights', md.rights)
|
||||
assign('identifier', md.identifier)
|
||||
assign('lastMark', md.lastMark)
|
||||
assign('genre', md.genre) # TODO repeatable
|
||||
|
||||
if md.characters is not None:
|
||||
char_list = [c.strip() for c in md.characters.split(',')]
|
||||
for c in char_list:
|
||||
assign('character', c)
|
||||
|
||||
if md.manga is not None and md.manga == "YesAndRightToLeft":
|
||||
assign('readingDirection', "rtl")
|
||||
|
||||
date_str = ""
|
||||
if md.year is not None:
|
||||
date_str = str(md.year).zfill(4)
|
||||
if md.month is not None:
|
||||
date_str += "-" + str(md.month).zfill(2)
|
||||
assign('date', date_str)
|
||||
|
||||
assign('coverImage', md.coverImage)
|
||||
|
||||
# need to specially process the credits, since they are structured
|
||||
# differently than CIX
|
||||
credit_writer_list = list()
|
||||
credit_penciller_list = list()
|
||||
credit_inker_list = list()
|
||||
credit_colorist_list = list()
|
||||
credit_letterer_list = list()
|
||||
credit_cover_list = list()
|
||||
credit_editor_list = list()
|
||||
|
||||
# loop thru credits, and build a list for each role that CoMet supports
|
||||
for credit in metadata.credits:
|
||||
|
||||
if credit['role'].lower() in set(self.writer_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'writer').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.penciller_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'penciller').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.inker_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'inker').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.colorist_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'colorist').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.letterer_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'letterer').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.cover_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'coverDesigner').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
if credit['role'].lower() in set(self.editor_synonyms):
|
||||
ET.SubElement(
|
||||
root,
|
||||
'editor').text = "{0}".format(
|
||||
credit['person'])
|
||||
|
||||
# self pretty-print
|
||||
self.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
def convertXMLToMetadata(self, tree):
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != 'comet':
|
||||
raise 1
|
||||
return None
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
# Helper function
|
||||
def xlate(tag):
|
||||
node = root.find(tag)
|
||||
if node is not None:
|
||||
return node.text
|
||||
else:
|
||||
return None
|
||||
|
||||
md.series = xlate('series')
|
||||
md.title = xlate('title')
|
||||
md.issue = xlate('issue')
|
||||
md.volume = xlate('volume')
|
||||
md.comments = xlate('description')
|
||||
md.publisher = xlate('publisher')
|
||||
md.language = xlate('language')
|
||||
md.format = xlate('format')
|
||||
md.pageCount = xlate('pages')
|
||||
md.maturityRating = xlate('rating')
|
||||
md.price = xlate('price')
|
||||
md.isVersionOf = xlate('isVersionOf')
|
||||
md.rights = xlate('rights')
|
||||
md.identifier = xlate('identifier')
|
||||
md.lastMark = xlate('lastMark')
|
||||
md.genre = xlate('genre') # TODO - repeatable field
|
||||
|
||||
date = xlate('date')
|
||||
if date is not None:
|
||||
parts = date.split('-')
|
||||
if len(parts) > 0:
|
||||
md.year = parts[0]
|
||||
if len(parts) > 1:
|
||||
md.month = parts[1]
|
||||
|
||||
md.coverImage = xlate('coverImage')
|
||||
|
||||
readingDirection = xlate('readingDirection')
|
||||
if readingDirection is not None and readingDirection == "rtl":
|
||||
md.manga = "YesAndRightToLeft"
|
||||
|
||||
# loop for character tags
|
||||
char_list = []
|
||||
for n in root:
|
||||
if n.tag == 'character':
|
||||
char_list.append(n.text.strip())
|
||||
md.characters = utils.listToString(char_list)
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if (n.tag == 'writer' or
|
||||
n.tag == 'penciller' or
|
||||
n.tag == 'inker' or
|
||||
n.tag == 'colorist' or
|
||||
n.tag == 'letterer' or
|
||||
n.tag == 'editor'
|
||||
):
|
||||
metadata.addCredit(n.text.strip(), n.tag.title())
|
||||
|
||||
if n.tag == 'coverDesigner':
|
||||
metadata.addCredit(n.text.strip(), "Cover")
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
# verify that the string actually contains CoMet data in XML format
|
||||
def validateString(self, string):
|
||||
try:
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
root = tree.getroot()
|
||||
if root.tag != 'comet':
|
||||
raise Exception
|
||||
except:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def writeToExternalFile(self, filename, metadata):
|
||||
|
||||
tree = self.convertMetadataToXML(self, metadata)
|
||||
# ET.dump(tree)
|
||||
tree.write(filename, encoding='utf-8')
|
||||
|
||||
def readFromExternalFile(self, filename):
|
||||
|
||||
tree = ET.parse(filename)
|
||||
return self.convertXMLToMetadata(tree)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,144 +0,0 @@
|
||||
"""A class to encapsulate the ComicBookInfo data"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
#import zipfile
|
||||
|
||||
from .genericmetadata import GenericMetadata
|
||||
from . import utils
|
||||
#import ctversion
|
||||
|
||||
|
||||
class ComicBookInfo:
|
||||
|
||||
def metadataFromString(self, string):
|
||||
|
||||
cbi_container = json.loads(str(string, 'utf-8'))
|
||||
|
||||
metadata = GenericMetadata()
|
||||
|
||||
cbi = cbi_container['ComicBookInfo/1.0']
|
||||
|
||||
# helper func
|
||||
# If item is not in CBI, return None
|
||||
def xlate(cbi_entry):
|
||||
if cbi_entry in cbi:
|
||||
return cbi[cbi_entry]
|
||||
else:
|
||||
return None
|
||||
|
||||
metadata.series = xlate('series')
|
||||
metadata.title = xlate('title')
|
||||
metadata.issue = xlate('issue')
|
||||
metadata.publisher = xlate('publisher')
|
||||
metadata.month = xlate('publicationMonth')
|
||||
metadata.year = xlate('publicationYear')
|
||||
metadata.issueCount = xlate('numberOfIssues')
|
||||
metadata.comments = xlate('comments')
|
||||
metadata.credits = xlate('credits')
|
||||
metadata.genre = xlate('genre')
|
||||
metadata.volume = xlate('volume')
|
||||
metadata.volumeCount = xlate('numberOfVolumes')
|
||||
metadata.language = xlate('language')
|
||||
metadata.country = xlate('country')
|
||||
metadata.criticalRating = xlate('rating')
|
||||
metadata.tags = xlate('tags')
|
||||
|
||||
# make sure credits and tags are at least empty lists and not None
|
||||
if metadata.credits is None:
|
||||
metadata.credits = []
|
||||
if metadata.tags is None:
|
||||
metadata.tags = []
|
||||
|
||||
# need to massage the language string to be ISO
|
||||
if metadata.language is not None:
|
||||
# reverse look-up
|
||||
pattern = metadata.language
|
||||
metadata.language = None
|
||||
for key in utils.getLanguageDict():
|
||||
if utils.getLanguageDict()[key] == pattern.encode('utf-8'):
|
||||
metadata.language = key
|
||||
break
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def stringFromMetadata(self, metadata):
|
||||
|
||||
cbi_container = self.createJSONDictionary(metadata)
|
||||
return json.dumps(cbi_container)
|
||||
|
||||
def validateString(self, string):
|
||||
"""Verify that the string actually contains CBI data in JSON format"""
|
||||
|
||||
try:
|
||||
cbi_container = json.loads(string)
|
||||
except:
|
||||
return False
|
||||
|
||||
return ('ComicBookInfo/1.0' in cbi_container)
|
||||
|
||||
def createJSONDictionary(self, metadata):
|
||||
"""Create the dictionary that we will convert to JSON text"""
|
||||
|
||||
cbi = dict()
|
||||
cbi_container = {'appID': 'ComicTagger/' + '1.0.0', # ctversion.version,
|
||||
'lastModified': str(datetime.now()),
|
||||
'ComicBookInfo/1.0': cbi}
|
||||
|
||||
# helper func
|
||||
def assign(cbi_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
cbi[cbi_entry] = md_entry
|
||||
|
||||
# helper func
|
||||
def toInt(s):
|
||||
i = None
|
||||
if type(s) in [str, str, int]:
|
||||
try:
|
||||
i = int(s)
|
||||
except ValueError:
|
||||
pass
|
||||
return i
|
||||
|
||||
assign('series', metadata.series)
|
||||
assign('title', metadata.title)
|
||||
assign('issue', metadata.issue)
|
||||
assign('publisher', metadata.publisher)
|
||||
assign('publicationMonth', toInt(metadata.month))
|
||||
assign('publicationYear', toInt(metadata.year))
|
||||
assign('numberOfIssues', toInt(metadata.issueCount))
|
||||
assign('comments', metadata.comments)
|
||||
assign('genre', metadata.genre)
|
||||
assign('volume', toInt(metadata.volume))
|
||||
assign('numberOfVolumes', toInt(metadata.volumeCount))
|
||||
assign('language', utils.getLanguageFromISO(metadata.language))
|
||||
assign('country', metadata.country)
|
||||
assign('rating', metadata.criticalRating)
|
||||
assign('credits', metadata.credits)
|
||||
assign('tags', metadata.tags)
|
||||
|
||||
return cbi_container
|
||||
|
||||
def writeToExternalFile(self, filename, metadata):
|
||||
|
||||
cbi_container = self.createJSONDictionary(metadata)
|
||||
|
||||
f = open(filename, 'w')
|
||||
f.write(json.dumps(cbi_container, indent=4))
|
||||
f.close
|
||||
@@ -1,291 +0,0 @@
|
||||
"""A class to encapsulate ComicRack's ComicInfo.xml data"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
#from datetime import datetime
|
||||
#from pprint import pprint
|
||||
#import zipfile
|
||||
|
||||
from .genericmetadata import GenericMetadata
|
||||
from . import utils
|
||||
|
||||
|
||||
class ComicInfoXml:
|
||||
|
||||
writer_synonyms = ['writer', 'plotter', 'scripter']
|
||||
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
|
||||
inker_synonyms = ['inker', 'artist', 'finishes']
|
||||
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
|
||||
letterer_synonyms = ['letterer']
|
||||
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
|
||||
editor_synonyms = ['editor']
|
||||
|
||||
def getParseableCredits(self):
|
||||
parsable_credits = []
|
||||
parsable_credits.extend(self.writer_synonyms)
|
||||
parsable_credits.extend(self.penciller_synonyms)
|
||||
parsable_credits.extend(self.inker_synonyms)
|
||||
parsable_credits.extend(self.colorist_synonyms)
|
||||
parsable_credits.extend(self.letterer_synonyms)
|
||||
parsable_credits.extend(self.cover_synonyms)
|
||||
parsable_credits.extend(self.editor_synonyms)
|
||||
return parsable_credits
|
||||
|
||||
def metadataFromString(self, string):
|
||||
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
return self.convertXMLToMetadata(tree)
|
||||
|
||||
def stringFromMetadata(self, metadata):
|
||||
|
||||
header = '<?xml version="1.0"?>\n'
|
||||
|
||||
tree = self.convertMetadataToXML(self, metadata)
|
||||
tree_str = ET.tostring(tree.getroot()).decode()
|
||||
return header + tree_str
|
||||
|
||||
def indent(self, elem, level=0):
|
||||
# for making the XML output readable
|
||||
i = "\n" + level * " "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
self.indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def convertMetadataToXML(self, filename, metadata):
|
||||
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
# build a tree structure
|
||||
root = ET.Element("ComicInfo")
|
||||
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
|
||||
# helper func
|
||||
|
||||
def assign(cix_entry, md_entry):
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, cix_entry).text = "{0}".format(md_entry)
|
||||
|
||||
assign('Title', md.title)
|
||||
assign('Series', md.series)
|
||||
assign('Number', md.issue)
|
||||
assign('Count', md.issueCount)
|
||||
assign('Volume', md.volume)
|
||||
assign('AlternateSeries', md.alternateSeries)
|
||||
assign('AlternateNumber', md.alternateNumber)
|
||||
assign('StoryArc', md.storyArc)
|
||||
assign('SeriesGroup', md.seriesGroup)
|
||||
assign('AlternateCount', md.alternateCount)
|
||||
assign('Summary', md.comments)
|
||||
assign('Notes', md.notes)
|
||||
assign('Year', md.year)
|
||||
assign('Month', md.month)
|
||||
assign('Day', md.day)
|
||||
|
||||
# need to specially process the credits, since they are structured
|
||||
# differently than CIX
|
||||
credit_writer_list = list()
|
||||
credit_penciller_list = list()
|
||||
credit_inker_list = list()
|
||||
credit_colorist_list = list()
|
||||
credit_letterer_list = list()
|
||||
credit_cover_list = list()
|
||||
credit_editor_list = list()
|
||||
|
||||
# first, loop thru credits, and build a list for each role that CIX
|
||||
# supports
|
||||
for credit in metadata.credits:
|
||||
|
||||
if credit['role'].lower() in set(self.writer_synonyms):
|
||||
credit_writer_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.penciller_synonyms):
|
||||
credit_penciller_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.inker_synonyms):
|
||||
credit_inker_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.colorist_synonyms):
|
||||
credit_colorist_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.letterer_synonyms):
|
||||
credit_letterer_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.cover_synonyms):
|
||||
credit_cover_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
if credit['role'].lower() in set(self.editor_synonyms):
|
||||
credit_editor_list.append(credit['person'].replace(",", ""))
|
||||
|
||||
# second, convert each list to string, and add to XML struct
|
||||
if len(credit_writer_list) > 0:
|
||||
node = ET.SubElement(root, 'Writer')
|
||||
node.text = utils.listToString(credit_writer_list)
|
||||
|
||||
if len(credit_penciller_list) > 0:
|
||||
node = ET.SubElement(root, 'Penciller')
|
||||
node.text = utils.listToString(credit_penciller_list)
|
||||
|
||||
if len(credit_inker_list) > 0:
|
||||
node = ET.SubElement(root, 'Inker')
|
||||
node.text = utils.listToString(credit_inker_list)
|
||||
|
||||
if len(credit_colorist_list) > 0:
|
||||
node = ET.SubElement(root, 'Colorist')
|
||||
node.text = utils.listToString(credit_colorist_list)
|
||||
|
||||
if len(credit_letterer_list) > 0:
|
||||
node = ET.SubElement(root, 'Letterer')
|
||||
node.text = utils.listToString(credit_letterer_list)
|
||||
|
||||
if len(credit_cover_list) > 0:
|
||||
node = ET.SubElement(root, 'CoverArtist')
|
||||
node.text = utils.listToString(credit_cover_list)
|
||||
|
||||
if len(credit_editor_list) > 0:
|
||||
node = ET.SubElement(root, 'Editor')
|
||||
node.text = utils.listToString(credit_editor_list)
|
||||
|
||||
assign('Publisher', md.publisher)
|
||||
assign('Imprint', md.imprint)
|
||||
assign('Genre', md.genre)
|
||||
assign('Web', md.webLink)
|
||||
assign('PageCount', md.pageCount)
|
||||
assign('LanguageISO', md.language)
|
||||
assign('Format', md.format)
|
||||
assign('AgeRating', md.maturityRating)
|
||||
if md.blackAndWhite is not None and md.blackAndWhite:
|
||||
ET.SubElement(root, 'BlackAndWhite').text = "Yes"
|
||||
assign('Manga', md.manga)
|
||||
assign('Characters', md.characters)
|
||||
assign('Teams', md.teams)
|
||||
assign('Locations', md.locations)
|
||||
assign('ScanInformation', md.scanInfo)
|
||||
|
||||
# loop and add the page entries under pages node
|
||||
if len(md.pages) > 0:
|
||||
pages_node = ET.SubElement(root, 'Pages')
|
||||
for page_dict in md.pages:
|
||||
page_node = ET.SubElement(pages_node, 'Page')
|
||||
page_node.attrib = page_dict
|
||||
|
||||
# self pretty-print
|
||||
self.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
def convertXMLToMetadata(self, tree):
|
||||
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != 'ComicInfo':
|
||||
raise 1
|
||||
return None
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
# Helper function
|
||||
def xlate(tag):
|
||||
node = root.find(tag)
|
||||
if node is not None:
|
||||
return node.text
|
||||
else:
|
||||
return None
|
||||
|
||||
md.series = xlate('Series')
|
||||
md.title = xlate('Title')
|
||||
md.issue = xlate('Number')
|
||||
md.issueCount = xlate('Count')
|
||||
md.volume = xlate('Volume')
|
||||
md.alternateSeries = xlate('AlternateSeries')
|
||||
md.alternateNumber = xlate('AlternateNumber')
|
||||
md.alternateCount = xlate('AlternateCount')
|
||||
md.comments = xlate('Summary')
|
||||
md.notes = xlate('Notes')
|
||||
md.year = xlate('Year')
|
||||
md.month = xlate('Month')
|
||||
md.day = xlate('Day')
|
||||
md.publisher = xlate('Publisher')
|
||||
md.imprint = xlate('Imprint')
|
||||
md.genre = xlate('Genre')
|
||||
md.webLink = xlate('Web')
|
||||
md.language = xlate('LanguageISO')
|
||||
md.format = xlate('Format')
|
||||
md.manga = xlate('Manga')
|
||||
md.characters = xlate('Characters')
|
||||
md.teams = xlate('Teams')
|
||||
md.locations = xlate('Locations')
|
||||
md.pageCount = xlate('PageCount')
|
||||
md.scanInfo = xlate('ScanInformation')
|
||||
md.storyArc = xlate('StoryArc')
|
||||
md.seriesGroup = xlate('SeriesGroup')
|
||||
md.maturityRating = xlate('AgeRating')
|
||||
|
||||
tmp = xlate('BlackAndWhite')
|
||||
md.blackAndWhite = False
|
||||
if tmp is not None and tmp.lower() in ["yes", "true", "1"]:
|
||||
md.blackAndWhite = True
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if (n.tag == 'Writer' or
|
||||
n.tag == 'Penciller' or
|
||||
n.tag == 'Inker' or
|
||||
n.tag == 'Colorist' or
|
||||
n.tag == 'Letterer' or
|
||||
n.tag == 'Editor'
|
||||
):
|
||||
if n.text is not None:
|
||||
for name in n.text.split(','):
|
||||
metadata.addCredit(name.strip(), n.tag)
|
||||
|
||||
if n.tag == 'CoverArtist':
|
||||
if n.text is not None:
|
||||
for name in n.text.split(','):
|
||||
metadata.addCredit(name.strip(), "Cover")
|
||||
|
||||
# parse page data now
|
||||
pages_node = root.find("Pages")
|
||||
if pages_node is not None:
|
||||
for page in pages_node:
|
||||
metadata.pages.append(page.attrib)
|
||||
# print page.attrib
|
||||
|
||||
metadata.isEmpty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def writeToExternalFile(self, filename, metadata):
|
||||
|
||||
tree = self.convertMetadataToXML(self, metadata)
|
||||
# ET.dump(tree)
|
||||
tree.write(filename, encoding='utf-8')
|
||||
|
||||
def readFromExternalFile(self, filename):
|
||||
|
||||
tree = ET.parse(filename)
|
||||
return self.convertXMLToMetadata(tree)
|
||||
5
comicapi/data/__init__.py
Normal file
5
comicapi/data/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
|
||||
data_path = importlib.resources.files(__package__)
|
||||
130
comicapi/data/publishers.json
Normal file
130
comicapi/data/publishers.json
Normal file
@@ -0,0 +1,130 @@
|
||||
{
|
||||
"Marvel":{
|
||||
"marvel comics": "",
|
||||
"aircel comics": "Aircel Comics",
|
||||
"aircel": "Aircel Comics",
|
||||
"atlas comics": "Atlas Comics",
|
||||
"atlas": "Atlas Comics",
|
||||
"crossgen comics": "CrossGen comics",
|
||||
"crossgen": "CrossGen comics",
|
||||
"curtis magazines": "Curtis Magazines",
|
||||
"disney books group": "Disney Books Group",
|
||||
"disney books": "Disney Books Group",
|
||||
"disney kingdoms": "Disney Kingdoms",
|
||||
"epic comics group": "Epic Comics",
|
||||
"epic comics": "Epic Comics",
|
||||
"epic": "Epic Comics",
|
||||
"eternity comics": "Eternity Comics",
|
||||
"humorama": "Humorama",
|
||||
"icon comics": "Icon Comics",
|
||||
"infinite comics": "Infinite Comics",
|
||||
"malibu comics": "Malibu Comics",
|
||||
"malibu": "Malibu Comics",
|
||||
"marvel 2099": "Marvel 2099",
|
||||
"marvel absurd": "Marvel Absurd",
|
||||
"marvel adventures": "Marvel Adventures",
|
||||
"marvel age": "Marvel Age",
|
||||
"marvel books": "Marvel Books",
|
||||
"marvel comics 2": "Marvel Comics 2",
|
||||
"marvel digital comics unlimited": "Marvel Unlimited",
|
||||
"marvel edge": "Marvel Edge",
|
||||
"marvel frontier": "Marvel Frontier",
|
||||
"marvel illustrated": "Marvel Illustrated",
|
||||
"marvel knights": "Marvel Knights",
|
||||
"marvel magazine group": "Marvel Magazine Group",
|
||||
"marvel mangaverse": "Marvel Mangaverse",
|
||||
"marvel monsters group": "Marvel Monsters Group",
|
||||
"marvel music": "Marvel Music",
|
||||
"marvel next": "Marvel Next",
|
||||
"marvel noir": "Marvel Noir",
|
||||
"marvel press": "Marvel Press",
|
||||
"marvel uk": "Marvel UK",
|
||||
"marvel unlimited": "Marvel Unlimited",
|
||||
"max": "MAX",
|
||||
"mc2": "Marvel Comics 2",
|
||||
"new universe": "New Universe",
|
||||
"non-pareil publishing corp.": "Non-Pareil Publishing Corp.",
|
||||
"paramount comics": "Paramount Comics",
|
||||
"power comics": "Power Comics",
|
||||
"razorline": "Razorline",
|
||||
"star comics": "Star Comics",
|
||||
"timely comics": "Timely Comics",
|
||||
"timely": "Timely Comics",
|
||||
"tsunami": "Tsunami",
|
||||
"ultimate comics": "Ultimate Comics",
|
||||
"ultimate marvel": "Ultimate Marvel",
|
||||
"vital publications, inc.": "Vital Publications, Inc."
|
||||
},
|
||||
|
||||
"DC Comics":{
|
||||
"dc_comics": "",
|
||||
"dc": "",
|
||||
"dccomics": "",
|
||||
"!mpact comics": "Impact Comics",
|
||||
"all star dc": "All-Star",
|
||||
"all star": "All-Star",
|
||||
"all-star dc": "All-Star",
|
||||
"all-star": "All-Star",
|
||||
"america's best comics": "America's Best Comics",
|
||||
"black label": "DC Black Label",
|
||||
"cliffhanger": "Cliffhanger",
|
||||
"cmx manga": "CMX Manga",
|
||||
"dc black label": "DC Black Label",
|
||||
"dc focus": "DC Focus",
|
||||
"dc ink": "DC Ink",
|
||||
"dc zoom": "DC Zoom",
|
||||
"earth m": "Earth M",
|
||||
"earth one": "Earth One",
|
||||
"earth-m": "Earth M",
|
||||
"elseworlds": "Elseworlds",
|
||||
"eo": "Earth One",
|
||||
"first wave": "First Wave",
|
||||
"focus": "DC Focus",
|
||||
"helix": "Helix",
|
||||
"homage comics": "Homage Comics",
|
||||
"impact comics": "Impact Comics",
|
||||
"impact! comics": "Impact Comics",
|
||||
"johnny dc": "Johnny DC",
|
||||
"mad": "Mad",
|
||||
"minx": "Minx",
|
||||
"paradox press": "Paradox Press",
|
||||
"piranha press": "Piranha Press",
|
||||
"sandman universe": "Sandman Universe",
|
||||
"tangent comics": "Tangent Comics",
|
||||
"tsr": "TSR",
|
||||
"vertigo": "Vertigo",
|
||||
"wildstorm productions": "WildStorm Productions",
|
||||
"wildstorm signature": "WildStorm Productions",
|
||||
"wildstorm": "WildStorm Productions",
|
||||
"wonder comics": "Wonder Comics",
|
||||
"young animal": "Young Animal",
|
||||
"zuda comics": "Zuda Comics",
|
||||
"zuda": "Zuda Comics"
|
||||
},
|
||||
|
||||
"Dark Horse Comics":{
|
||||
"berger books": "Berger Books",
|
||||
"comics' greatest world": "Dark Horse Heroes",
|
||||
"dark horse digital": "Dark Horse Digital",
|
||||
"dark horse heroes": "Dark Horse Heroes",
|
||||
"dark horse manga": "Dark Horse Manga",
|
||||
"dh deluxe": "DH Deluxe",
|
||||
"dh press": "DH Press",
|
||||
"kitchen sink books": "Kitchen Sink Books",
|
||||
"legend": "Legend",
|
||||
"m press": "M Press",
|
||||
"maverick": "Maverick"
|
||||
},
|
||||
|
||||
"Archie Comics":{
|
||||
"archie action": "Archie Action",
|
||||
"archie adventure Series": "Archie Adventure Series",
|
||||
"archie horror": "Archie Horror",
|
||||
"dark circle Comics": "Dark Circle Comics",
|
||||
"dark circle": "Dark Circle Comics",
|
||||
"mighty comics Group": "Mighty Comics Group",
|
||||
"radio comics": "Mighty Comics Group",
|
||||
"red circle Comics": "Dark Circle Comics",
|
||||
"red circle": "Dark Circle Comics"
|
||||
}
|
||||
}
|
||||
420
comicapi/filenamelexer.py
Normal file
420
comicapi/filenamelexer.py
Normal file
@@ -0,0 +1,420 @@
|
||||
# Extracted and mutilated from https://github.com/lordwelch/wsfmt
|
||||
# Which was extracted and mutilated from https://github.com/golang/go/tree/master/src/text/template/parse
|
||||
from __future__ import annotations
|
||||
|
||||
import calendar
|
||||
import os
|
||||
import unicodedata
|
||||
from enum import Enum, auto
|
||||
from typing import Any, Callable, Protocol
|
||||
|
||||
|
||||
class ItemType(Enum):
|
||||
Error = auto() # Error occurred; value is text of error
|
||||
EOF = auto()
|
||||
Text = auto() # Text
|
||||
LeftParen = auto()
|
||||
Number = auto() # Simple number
|
||||
IssueNumber = auto() # Preceded by a # Symbol
|
||||
RightParen = auto()
|
||||
Space = auto() # Run of spaces separating arguments
|
||||
Dot = auto()
|
||||
LeftBrace = auto()
|
||||
RightBrace = auto()
|
||||
LeftSBrace = auto()
|
||||
RightSBrace = auto()
|
||||
Symbol = auto()
|
||||
Skip = auto() # __ or -- no title, issue or series information beyond
|
||||
Operator = auto()
|
||||
Calendar = auto()
|
||||
InfoSpecifier = auto() # Specifies type of info e.g. v1 for 'volume': 1
|
||||
ArchiveType = auto()
|
||||
Honorific = auto()
|
||||
Publisher = auto()
|
||||
Keywords = auto()
|
||||
FCBD = auto()
|
||||
ComicType = auto()
|
||||
C2C = auto()
|
||||
|
||||
|
||||
braces = [
|
||||
ItemType.LeftBrace,
|
||||
ItemType.LeftParen,
|
||||
ItemType.LeftSBrace,
|
||||
ItemType.RightBrace,
|
||||
ItemType.RightParen,
|
||||
ItemType.RightSBrace,
|
||||
]
|
||||
|
||||
eof = chr(0)
|
||||
|
||||
key = {
|
||||
"fcbd": ItemType.FCBD,
|
||||
"freecomicbookday": ItemType.FCBD,
|
||||
"cbr": ItemType.ArchiveType,
|
||||
"cbz": ItemType.ArchiveType,
|
||||
"cbt": ItemType.ArchiveType,
|
||||
"cb7": ItemType.ArchiveType,
|
||||
"rar": ItemType.ArchiveType,
|
||||
"zip": ItemType.ArchiveType,
|
||||
"tar": ItemType.ArchiveType,
|
||||
"7z": ItemType.ArchiveType,
|
||||
"annual": ItemType.ComicType,
|
||||
"volume": ItemType.InfoSpecifier,
|
||||
"vol.": ItemType.InfoSpecifier,
|
||||
"vol": ItemType.InfoSpecifier,
|
||||
"v": ItemType.InfoSpecifier,
|
||||
"of": ItemType.InfoSpecifier,
|
||||
"dc": ItemType.Publisher,
|
||||
"marvel": ItemType.Publisher,
|
||||
"covers": ItemType.InfoSpecifier,
|
||||
"c2c": ItemType.C2C,
|
||||
"mr": ItemType.Honorific,
|
||||
"ms": ItemType.Honorific,
|
||||
"mrs": ItemType.Honorific,
|
||||
"dr": ItemType.Honorific,
|
||||
}
|
||||
|
||||
|
||||
class Item:
|
||||
def __init__(self, typ: ItemType, pos: int, val: str) -> None:
|
||||
self.typ: ItemType = typ
|
||||
self.pos: int = pos
|
||||
self.val: str = val
|
||||
self.no_space = False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.val}: index: {self.pos}: {self.typ}"
|
||||
|
||||
|
||||
class LexerFunc(Protocol):
|
||||
def __call__(self, __origin: Lexer) -> LexerFunc | None: ...
|
||||
|
||||
|
||||
class Lexer:
|
||||
def __init__(self, string: str, allow_issue_start_with_letter: bool = False) -> None:
|
||||
self.input: str = string # The string being scanned
|
||||
# The next lexing function to enter
|
||||
self.state: LexerFunc | None = None
|
||||
self.pos: int = -1 # Current position in the input
|
||||
self.start: int = 0 # Start position of this item
|
||||
self.lastPos: int = 0 # Position of most recent item returned by nextItem
|
||||
self.paren_depth: int = 0 # Nesting depth of ( ) exprs
|
||||
self.brace_depth: int = 0 # Nesting depth of { }
|
||||
self.sbrace_depth: int = 0 # Nesting depth of [ ]
|
||||
self.items: list[Item] = []
|
||||
self.allow_issue_start_with_letter = allow_issue_start_with_letter
|
||||
|
||||
# Next returns the next rune in the input.
|
||||
def get(self) -> str:
|
||||
if int(self.pos) >= len(self.input) - 1:
|
||||
self.pos += 1
|
||||
return eof
|
||||
|
||||
self.pos += 1
|
||||
return self.input[self.pos]
|
||||
|
||||
# Peek returns but does not consume the next rune in the input.
|
||||
def peek(self) -> str:
|
||||
if int(self.pos) >= len(self.input) - 1:
|
||||
return eof
|
||||
|
||||
return self.input[self.pos + 1]
|
||||
|
||||
def backup(self) -> None:
|
||||
self.pos -= 1
|
||||
|
||||
# Emit passes an item back to the client.
|
||||
def emit(self, t: ItemType) -> None:
|
||||
self.items.append(Item(t, self.start, self.input[self.start : self.pos + 1]))
|
||||
self.start = self.pos + 1
|
||||
|
||||
# Ignore skips over the pending input before this point.
|
||||
def ignore(self) -> None:
|
||||
self.start = self.pos
|
||||
|
||||
# Accept consumes the next rune if it's from the valid se:
|
||||
def accept(self, valid: str | Callable[[str], bool]) -> bool:
|
||||
if isinstance(valid, str):
|
||||
if self.get() in valid:
|
||||
return True
|
||||
else:
|
||||
if valid(self.get()):
|
||||
return True
|
||||
|
||||
self.backup()
|
||||
return False
|
||||
|
||||
# AcceptRun consumes a run of runes from the valid set.
|
||||
def accept_run(self, valid: str | Callable[[str], bool]) -> None:
|
||||
if isinstance(valid, str):
|
||||
while self.get() in valid:
|
||||
continue
|
||||
else:
|
||||
while valid(self.get()):
|
||||
continue
|
||||
|
||||
self.backup()
|
||||
|
||||
def scan_number(self) -> bool:
|
||||
digits = "0123456789.,"
|
||||
|
||||
self.accept_run(digits)
|
||||
if self.input[self.pos] == ".":
|
||||
self.backup()
|
||||
self.accept_run(str.isalpha)
|
||||
|
||||
return True
|
||||
|
||||
# Runs the state machine for the lexer.
|
||||
def run(self) -> None:
|
||||
self.state = lex_filename
|
||||
while self.state is not None:
|
||||
self.state = self.state(self)
|
||||
|
||||
|
||||
# Errorf returns an error token and terminates the scan by passing
|
||||
# Back a nil pointer that will be the next state, terminating self.nextItem.
|
||||
def errorf(lex: Lexer, message: str) -> Any:
|
||||
lex.items.append(Item(ItemType.Error, lex.start, message))
|
||||
return None
|
||||
|
||||
|
||||
# Scans the elements inside action delimiters.
|
||||
def lex_filename(lex: Lexer) -> LexerFunc | None:
|
||||
r = lex.get()
|
||||
if r == eof:
|
||||
if lex.paren_depth != 0:
|
||||
errorf(lex, "unclosed left paren")
|
||||
return None
|
||||
|
||||
if lex.brace_depth != 0:
|
||||
errorf(lex, "unclosed left paren")
|
||||
return None
|
||||
lex.emit(ItemType.EOF)
|
||||
return None
|
||||
elif is_space(r):
|
||||
if r == "_" and lex.peek() == "_":
|
||||
lex.get()
|
||||
lex.emit(ItemType.Skip)
|
||||
else:
|
||||
return lex_space
|
||||
elif r == ".":
|
||||
r = lex.peek()
|
||||
if r.isnumeric() and lex.pos > 0 and is_space(lex.input[lex.pos - 1]):
|
||||
return lex_number
|
||||
lex.emit(ItemType.Dot)
|
||||
return lex_filename
|
||||
elif r == "'":
|
||||
r = lex.peek()
|
||||
if r.isdigit():
|
||||
return lex_number
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
elif r.isnumeric():
|
||||
lex.backup()
|
||||
return lex_number
|
||||
elif r == "#":
|
||||
if lex.allow_issue_start_with_letter and is_alpha_numeric(lex.peek()):
|
||||
return lex_issue_number
|
||||
elif lex.peek().isdigit() or lex.peek() in "-+.":
|
||||
return lex_issue_number
|
||||
lex.emit(ItemType.Symbol)
|
||||
elif is_operator(r):
|
||||
if r == "-" and lex.peek() == "-":
|
||||
lex.get()
|
||||
lex.emit(ItemType.Skip)
|
||||
else:
|
||||
return lex_operator
|
||||
elif is_alpha_numeric(r):
|
||||
lex.backup()
|
||||
return lex_text
|
||||
elif r == "(":
|
||||
lex.emit(ItemType.LeftParen)
|
||||
lex.paren_depth += 1
|
||||
elif r == ")":
|
||||
lex.emit(ItemType.RightParen)
|
||||
lex.paren_depth -= 1
|
||||
if lex.paren_depth < 0:
|
||||
errorf(lex, "unexpected right paren " + r)
|
||||
return None
|
||||
|
||||
elif r == "{":
|
||||
lex.emit(ItemType.LeftBrace)
|
||||
lex.brace_depth += 1
|
||||
elif r == "}":
|
||||
lex.emit(ItemType.RightBrace)
|
||||
lex.brace_depth -= 1
|
||||
if lex.brace_depth < 0:
|
||||
errorf(lex, "unexpected right brace " + r)
|
||||
return None
|
||||
|
||||
elif r == "[":
|
||||
lex.emit(ItemType.LeftSBrace)
|
||||
lex.sbrace_depth += 1
|
||||
elif r == "]":
|
||||
lex.emit(ItemType.RightSBrace)
|
||||
lex.sbrace_depth -= 1
|
||||
if lex.sbrace_depth < 0:
|
||||
errorf(lex, "unexpected right brace " + r)
|
||||
return None
|
||||
elif is_symbol(r):
|
||||
if unicodedata.category(r) == "Sc":
|
||||
return lex_currency
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
else:
|
||||
errorf(lex, "unrecognized character in action: " + repr(r))
|
||||
return None
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_currency(lex: Lexer) -> LexerFunc:
|
||||
orig = lex.pos
|
||||
lex.accept_run(is_space)
|
||||
if lex.peek().isnumeric():
|
||||
return lex_number
|
||||
else:
|
||||
lex.pos = orig
|
||||
# We don't have a number with this currency symbol. Don't treat it special
|
||||
lex.emit(ItemType.Symbol)
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_operator(lex: Lexer) -> LexerFunc:
|
||||
lex.accept_run("-|:;")
|
||||
lex.emit(ItemType.Operator)
|
||||
return lex_filename
|
||||
|
||||
|
||||
# LexSpace scans a run of space characters.
|
||||
# One space has already been seen.
|
||||
def lex_space(lex: Lexer) -> LexerFunc:
|
||||
lex.accept_run(is_space)
|
||||
|
||||
lex.emit(ItemType.Space)
|
||||
return lex_filename
|
||||
|
||||
|
||||
# Lex_text scans an alphanumeric.
|
||||
def lex_text(lex: Lexer) -> LexerFunc:
|
||||
while True:
|
||||
r = lex.get()
|
||||
if is_alpha_numeric(r):
|
||||
if r.isnumeric(): # E.g. v1
|
||||
word = lex.input[lex.start : lex.pos]
|
||||
if word.casefold() in key and key[word.casefold()] == ItemType.InfoSpecifier:
|
||||
lex.backup()
|
||||
lex.emit(key[word.casefold()])
|
||||
return lex_filename
|
||||
else:
|
||||
if r == "'" and lex.peek() == "s":
|
||||
lex.get()
|
||||
else:
|
||||
lex.backup()
|
||||
word = lex.input[lex.start : lex.pos + 1]
|
||||
if word.casefold() == "vol" and lex.peek() == ".":
|
||||
lex.get()
|
||||
word = lex.input[lex.start : lex.pos + 1]
|
||||
|
||||
if word.casefold() in key:
|
||||
lex.emit(key[word.casefold()])
|
||||
elif cal(word):
|
||||
lex.emit(ItemType.Calendar)
|
||||
else:
|
||||
lex.emit(ItemType.Text)
|
||||
break
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def cal(value: str) -> set[Any]:
|
||||
month_abbr = [i for i, x in enumerate(calendar.month_abbr) if x == value.title()]
|
||||
month_name = [i for i, x in enumerate(calendar.month_name) if x == value.title()]
|
||||
day_abbr = [i for i, x in enumerate(calendar.day_abbr) if x == value.title()]
|
||||
day_name = [i for i, x in enumerate(calendar.day_name) if x == value.title()]
|
||||
return set(month_abbr + month_name + day_abbr + day_name)
|
||||
|
||||
|
||||
def lex_number(lex: Lexer) -> LexerFunc | None:
|
||||
if not lex.scan_number():
|
||||
return errorf(lex, "bad number syntax: " + lex.input[lex.start : lex.pos])
|
||||
# Complex number logic removed. Messes with math operations without space
|
||||
|
||||
if lex.input[lex.start] == "#":
|
||||
lex.emit(ItemType.IssueNumber)
|
||||
elif not lex.input[lex.pos].isdigit():
|
||||
# Assume that 80th is just text and not a number
|
||||
lex.emit(ItemType.Text)
|
||||
else:
|
||||
# Used to check for a '$'
|
||||
endNumber = lex.pos
|
||||
|
||||
# Consume any spaces
|
||||
lex.accept_run(is_space)
|
||||
|
||||
# This number starts with a '$' emit it as Text instead of a Number
|
||||
if "Sc" == unicodedata.category(lex.input[lex.start]):
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Text)
|
||||
|
||||
# This number ends in a '$' if there is a number on the other side we assume it belongs to the following number
|
||||
elif "Sc" == unicodedata.category(lex.get()):
|
||||
# Store the end of the number '$'. We still need to check to see if there is a number coming up
|
||||
endCurrency = lex.pos
|
||||
# Consume any spaces
|
||||
lex.accept_run(is_space)
|
||||
|
||||
# This is a number
|
||||
if lex.peek().isnumeric():
|
||||
# We go back to the original number before the '$' and emit a number
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Number)
|
||||
else:
|
||||
# There was no following number, reset to the '$' and emit a number
|
||||
lex.pos = endCurrency
|
||||
lex.emit(ItemType.Text)
|
||||
else:
|
||||
# We go back to the original number there is no '$'
|
||||
lex.pos = endNumber
|
||||
lex.emit(ItemType.Number)
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def lex_issue_number(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # type: ignore[type-arg]
|
||||
# Only called when lex.input[lex.start] == "#"
|
||||
original_start = lex.pos
|
||||
lex.accept_run(str.isalpha)
|
||||
|
||||
if lex.peek().isnumeric():
|
||||
return lex_number
|
||||
else:
|
||||
lex.pos = original_start
|
||||
lex.emit(ItemType.Symbol)
|
||||
|
||||
return lex_filename
|
||||
|
||||
|
||||
def is_space(character: str) -> bool:
|
||||
return character in "_ \t"
|
||||
|
||||
|
||||
# IsAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
def is_alpha_numeric(character: str) -> bool:
|
||||
return character.isalpha() or character.isnumeric()
|
||||
|
||||
|
||||
def is_operator(character: str) -> bool:
|
||||
return character in "-|:;/\\"
|
||||
|
||||
|
||||
def is_symbol(character: str) -> bool:
|
||||
return unicodedata.category(character)[0] in "PS"
|
||||
|
||||
|
||||
def Lex(filename: str, allow_issue_start_with_letter: bool = False) -> Lexer:
|
||||
lex = Lexer(os.path.basename(filename), allow_issue_start_with_letter)
|
||||
lex.run()
|
||||
return lex
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,25 +6,35 @@ possible, however lossy it might be
|
||||
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
from . import utils
|
||||
import copy
|
||||
import dataclasses
|
||||
import logging
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, TypedDict
|
||||
|
||||
from typing_extensions import NamedTuple, Required
|
||||
|
||||
from comicapi import utils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PageType:
|
||||
|
||||
"""
|
||||
These page info classes are exactly the same as the CIX scheme, since
|
||||
it's unique
|
||||
@@ -42,269 +52,378 @@ class PageType:
|
||||
Other = "Other"
|
||||
Deleted = "Deleted"
|
||||
|
||||
"""
|
||||
class PageInfo:
|
||||
Image = 0
|
||||
Type = PageType.Story
|
||||
DoublePage = False
|
||||
ImageSize = 0
|
||||
Key = ""
|
||||
ImageWidth = 0
|
||||
ImageHeight = 0
|
||||
"""
|
||||
|
||||
class ImageMetadata(TypedDict, total=False):
|
||||
filename: str
|
||||
type: str
|
||||
bookmark: str
|
||||
double_page: bool
|
||||
image_index: Required[int]
|
||||
size: str
|
||||
height: str
|
||||
width: str
|
||||
|
||||
|
||||
class Credit(TypedDict):
|
||||
person: str
|
||||
role: str
|
||||
primary: bool
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ComicSeries:
|
||||
id: str
|
||||
name: str
|
||||
aliases: set[str]
|
||||
count_of_issues: int | None
|
||||
count_of_volumes: int | None
|
||||
description: str
|
||||
image_url: str
|
||||
publisher: str
|
||||
start_year: int | None
|
||||
format: str | None
|
||||
|
||||
def copy(self) -> ComicSeries:
|
||||
return copy.deepcopy(self)
|
||||
|
||||
|
||||
class TagOrigin(NamedTuple):
|
||||
id: str
|
||||
name: str
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class GenericMetadata:
|
||||
writer_synonyms = ("writer", "plotter", "scripter", "script")
|
||||
penciller_synonyms = ("artist", "penciller", "penciler", "breakdowns", "pencils", "painting")
|
||||
inker_synonyms = ("inker", "artist", "finishes", "inks", "painting")
|
||||
colorist_synonyms = ("colorist", "colourist", "colorer", "colourer", "colors", "painting")
|
||||
letterer_synonyms = ("letterer", "letters")
|
||||
cover_synonyms = ("cover", "covers", "coverartist", "cover artist")
|
||||
editor_synonyms = ("editor", "edits", "editing")
|
||||
translator_synonyms = ("translator", "translation")
|
||||
|
||||
def __init__(self):
|
||||
is_empty: bool = True
|
||||
tag_origin: TagOrigin | None = None
|
||||
issue_id: str | None = None
|
||||
series_id: str | None = None
|
||||
|
||||
self.isEmpty = True
|
||||
self.tagOrigin = None
|
||||
series: str | None = None
|
||||
series_aliases: set[str] = dataclasses.field(default_factory=set)
|
||||
issue: str | None = None
|
||||
issue_count: int | None = None
|
||||
title: str | None = None
|
||||
title_aliases: set[str] = dataclasses.field(default_factory=set)
|
||||
volume: int | None = None
|
||||
volume_count: int | None = None
|
||||
genres: set[str] = dataclasses.field(default_factory=set)
|
||||
description: str | None = None # use same way as Summary in CIX
|
||||
notes: str | None = None
|
||||
|
||||
self.series = None
|
||||
self.issue = None
|
||||
self.title = None
|
||||
self.publisher = None
|
||||
self.month = None
|
||||
self.year = None
|
||||
self.day = None
|
||||
self.issueCount = None
|
||||
self.volume = None
|
||||
self.genre = None
|
||||
self.language = None # 2 letter iso code
|
||||
self.comments = None # use same way as Summary in CIX
|
||||
alternate_series: str | None = None
|
||||
alternate_number: str | None = None
|
||||
alternate_count: int | None = None
|
||||
story_arcs: list[str] = dataclasses.field(default_factory=list)
|
||||
series_groups: list[str] = dataclasses.field(default_factory=list)
|
||||
|
||||
self.volumeCount = None
|
||||
self.criticalRating = None
|
||||
self.country = None
|
||||
publisher: str | None = None
|
||||
imprint: str | None = None
|
||||
day: int | None = None
|
||||
month: int | None = None
|
||||
year: int | None = None
|
||||
language: str | None = None # 2 letter iso code
|
||||
country: str | None = None
|
||||
web_link: str | None = None
|
||||
format: str | None = None
|
||||
manga: str | None = None
|
||||
black_and_white: bool | None = None
|
||||
maturity_rating: str | None = None
|
||||
critical_rating: float | None = None # rating in CBL; CommunityRating in CIX
|
||||
scan_info: str | None = None
|
||||
|
||||
self.alternateSeries = None
|
||||
self.alternateNumber = None
|
||||
self.alternateCount = None
|
||||
self.imprint = None
|
||||
self.notes = None
|
||||
self.webLink = None
|
||||
self.format = None
|
||||
self.manga = None
|
||||
self.blackAndWhite = None
|
||||
self.pageCount = None
|
||||
self.maturityRating = None
|
||||
tags: set[str] = dataclasses.field(default_factory=set)
|
||||
pages: list[ImageMetadata] = dataclasses.field(default_factory=list)
|
||||
page_count: int | None = None
|
||||
|
||||
self.storyArc = None
|
||||
self.seriesGroup = None
|
||||
self.scanInfo = None
|
||||
characters: set[str] = dataclasses.field(default_factory=set)
|
||||
teams: set[str] = dataclasses.field(default_factory=set)
|
||||
locations: set[str] = dataclasses.field(default_factory=set)
|
||||
credits: list[Credit] = dataclasses.field(default_factory=list)
|
||||
|
||||
self.characters = None
|
||||
self.teams = None
|
||||
self.locations = None
|
||||
# Some CoMet-only items
|
||||
price: float | None = None
|
||||
is_version_of: str | None = None
|
||||
rights: str | None = None
|
||||
identifier: str | None = None
|
||||
last_mark: str | None = None
|
||||
|
||||
self.credits = list()
|
||||
self.tags = list()
|
||||
self.pages = list()
|
||||
# urls to cover image, not generally part of the metadata
|
||||
_cover_image: str | None = None
|
||||
_alternate_images: list[str] = dataclasses.field(default_factory=list)
|
||||
|
||||
# Some CoMet-only items
|
||||
self.price = None
|
||||
self.isVersionOf = None
|
||||
self.rights = None
|
||||
self.identifier = None
|
||||
self.lastMark = None
|
||||
self.coverImage = None
|
||||
def __post_init__(self) -> None:
|
||||
for key, value in self.__dict__.items():
|
||||
if value and key != "is_empty":
|
||||
self.is_empty = False
|
||||
break
|
||||
|
||||
def overlay(self, new_md):
|
||||
def copy(self) -> GenericMetadata:
|
||||
return copy.deepcopy(self)
|
||||
|
||||
def replace(self, /, **kwargs: Any) -> GenericMetadata:
|
||||
tmp = self.copy()
|
||||
tmp.__dict__.update(kwargs)
|
||||
return tmp
|
||||
|
||||
def get_clean_metadata(self, *attributes: str) -> GenericMetadata:
|
||||
new_md = GenericMetadata()
|
||||
for attr in sorted(attributes):
|
||||
if "." in attr:
|
||||
lst, _, name = attr.partition(".")
|
||||
old_value = getattr(self, lst)
|
||||
new_value = getattr(new_md, lst)
|
||||
if old_value:
|
||||
if not new_value:
|
||||
for x in old_value:
|
||||
new_value.append(x.__class__())
|
||||
for i, x in enumerate(old_value):
|
||||
if isinstance(x, dict):
|
||||
if name in x:
|
||||
new_value[i][name] = x[name]
|
||||
else:
|
||||
setattr(new_value[i], name, getattr(x, name))
|
||||
|
||||
else:
|
||||
old_value = getattr(self, attr)
|
||||
if isinstance(old_value, list):
|
||||
continue
|
||||
setattr(new_md, attr, old_value)
|
||||
|
||||
new_md.__post_init__()
|
||||
return new_md
|
||||
|
||||
def overlay(self, new_md: GenericMetadata) -> None:
|
||||
"""Overlay a metadata object on this one
|
||||
|
||||
That is, when the new object has non-None values, over-write them
|
||||
to this one.
|
||||
"""
|
||||
|
||||
def assign(cur, new):
|
||||
def assign(cur: str, new: Any) -> None:
|
||||
if new is not None:
|
||||
if isinstance(new, str) and len(new) == 0:
|
||||
setattr(self, cur, None)
|
||||
if isinstance(getattr(self, cur), (list, set)):
|
||||
getattr(self, cur).clear()
|
||||
else:
|
||||
setattr(self, cur, None)
|
||||
elif isinstance(new, (list, set)) and len(new) == 0:
|
||||
pass
|
||||
else:
|
||||
setattr(self, cur, new)
|
||||
|
||||
if not new_md.isEmpty:
|
||||
self.isEmpty = False
|
||||
if not new_md.is_empty:
|
||||
self.is_empty = False
|
||||
|
||||
assign('series', new_md.series)
|
||||
assign("tag_origin", new_md.tag_origin)
|
||||
assign("issue_id", new_md.issue_id)
|
||||
assign("series_id", new_md.series_id)
|
||||
|
||||
assign("series", new_md.series)
|
||||
assign("series_aliases", new_md.series_aliases)
|
||||
assign("issue", new_md.issue)
|
||||
assign("issueCount", new_md.issueCount)
|
||||
assign("issue_count", new_md.issue_count)
|
||||
assign("title", new_md.title)
|
||||
assign("title_aliases", new_md.title_aliases)
|
||||
assign("volume", new_md.volume)
|
||||
assign("volume_count", new_md.volume_count)
|
||||
assign("genres", new_md.genres)
|
||||
assign("description", new_md.description)
|
||||
assign("notes", new_md.notes)
|
||||
|
||||
assign("alternate_series", new_md.alternate_series)
|
||||
assign("alternate_number", new_md.alternate_number)
|
||||
assign("alternate_count", new_md.alternate_count)
|
||||
assign("story_arcs", new_md.story_arcs)
|
||||
assign("series_groups", new_md.series_groups)
|
||||
|
||||
assign("publisher", new_md.publisher)
|
||||
assign("imprint", new_md.imprint)
|
||||
assign("day", new_md.day)
|
||||
assign("month", new_md.month)
|
||||
assign("year", new_md.year)
|
||||
assign("volume", new_md.volume)
|
||||
assign("volumeCount", new_md.volumeCount)
|
||||
assign("genre", new_md.genre)
|
||||
assign("language", new_md.language)
|
||||
assign("country", new_md.country)
|
||||
assign("criticalRating", new_md.criticalRating)
|
||||
assign("alternateSeries", new_md.alternateSeries)
|
||||
assign("alternateNumber", new_md.alternateNumber)
|
||||
assign("alternateCount", new_md.alternateCount)
|
||||
assign("imprint", new_md.imprint)
|
||||
assign("webLink", new_md.webLink)
|
||||
assign("web_link", new_md.web_link)
|
||||
assign("format", new_md.format)
|
||||
assign("manga", new_md.manga)
|
||||
assign("blackAndWhite", new_md.blackAndWhite)
|
||||
assign("maturityRating", new_md.maturityRating)
|
||||
assign("storyArc", new_md.storyArc)
|
||||
assign("seriesGroup", new_md.seriesGroup)
|
||||
assign("scanInfo", new_md.scanInfo)
|
||||
assign("black_and_white", new_md.black_and_white)
|
||||
assign("maturity_rating", new_md.maturity_rating)
|
||||
assign("critical_rating", new_md.critical_rating)
|
||||
assign("scan_info", new_md.scan_info)
|
||||
|
||||
assign("tags", new_md.tags)
|
||||
assign("pages", new_md.pages)
|
||||
assign("page_count", new_md.page_count)
|
||||
|
||||
assign("characters", new_md.characters)
|
||||
assign("teams", new_md.teams)
|
||||
assign("locations", new_md.locations)
|
||||
assign("comments", new_md.comments)
|
||||
assign("notes", new_md.notes)
|
||||
self.overlay_credits(new_md.credits)
|
||||
|
||||
assign("price", new_md.price)
|
||||
assign("isVersionOf", new_md.isVersionOf)
|
||||
assign("is_version_of", new_md.is_version_of)
|
||||
assign("rights", new_md.rights)
|
||||
assign("identifier", new_md.identifier)
|
||||
assign("lastMark", new_md.lastMark)
|
||||
assign("last_mark", new_md.last_mark)
|
||||
assign("_cover_image", new_md._cover_image)
|
||||
assign("_alternate_images", new_md._alternate_images)
|
||||
|
||||
self.overlayCredits(new_md.credits)
|
||||
# TODO
|
||||
|
||||
# not sure if the tags and pages should broken down, or treated
|
||||
# as whole lists....
|
||||
|
||||
# For now, go the easy route, where any overlay
|
||||
# value wipes out the whole list
|
||||
if len(new_md.tags) > 0:
|
||||
assign("tags", new_md.tags)
|
||||
|
||||
if len(new_md.pages) > 0:
|
||||
assign("pages", new_md.pages)
|
||||
|
||||
def overlayCredits(self, new_credits):
|
||||
def overlay_credits(self, new_credits: list[Credit]) -> None:
|
||||
if isinstance(new_credits, str) and len(new_credits) == 0:
|
||||
self.credits = []
|
||||
for c in new_credits:
|
||||
if 'primary' in c and c['primary']:
|
||||
primary = True
|
||||
else:
|
||||
primary = False
|
||||
primary = bool("primary" in c and c["primary"])
|
||||
|
||||
# Remove credit role if person is blank
|
||||
if c['person'] == "":
|
||||
if c["person"] == "":
|
||||
for r in reversed(self.credits):
|
||||
if r['role'].lower() == c['role'].lower():
|
||||
if r["role"].casefold() == c["role"].casefold():
|
||||
self.credits.remove(r)
|
||||
# otherwise, add it!
|
||||
else:
|
||||
self.addCredit(c['person'], c['role'], primary)
|
||||
self.add_credit(c["person"], c["role"], primary)
|
||||
|
||||
def setDefaultPageList(self, count):
|
||||
def apply_default_page_list(self, page_list: Sequence[str]) -> None:
|
||||
# generate a default page list, with the first page marked as the cover
|
||||
for i in range(count):
|
||||
page_dict = dict()
|
||||
page_dict['Image'] = str(i)
|
||||
if i == 0:
|
||||
page_dict['Type'] = PageType.FrontCover
|
||||
self.pages.append(page_dict)
|
||||
|
||||
def getArchivePageIndex(self, pagenum):
|
||||
# convert the displayed page number to the page index of the file in
|
||||
# the archive
|
||||
# Create a dictionary of all pages in the metadata
|
||||
pages = {p["image_index"]: p for p in self.pages}
|
||||
cover_set = False
|
||||
# Go through each page in the archive
|
||||
# The indexes should always match up
|
||||
# It might be a good idea to validate that each page in `pages` is found
|
||||
for i, filename in enumerate(page_list):
|
||||
if i not in pages:
|
||||
pages[i] = ImageMetadata(image_index=i, filename=filename)
|
||||
else:
|
||||
pages[i]["filename"] = filename
|
||||
|
||||
# Check if we know what the cover is
|
||||
cover_set = pages[i].get("type", None) == PageType.FrontCover or cover_set
|
||||
|
||||
self.pages = [p[1] for p in sorted(pages.items())]
|
||||
|
||||
# Set the cover to the first image if we don't know what the cover is
|
||||
if not cover_set:
|
||||
self.pages[0]["type"] = PageType.FrontCover
|
||||
|
||||
def get_archive_page_index(self, pagenum: int) -> int:
|
||||
# convert the displayed page number to the page index of the file in the archive
|
||||
if pagenum < len(self.pages):
|
||||
return int(self.pages[pagenum]['Image'])
|
||||
else:
|
||||
return 0
|
||||
return int(self.pages[pagenum]["image_index"])
|
||||
|
||||
def getCoverPageIndexList(self):
|
||||
return 0
|
||||
|
||||
def get_cover_page_index_list(self) -> list[int]:
|
||||
# return a list of archive page indices of cover pages
|
||||
coverlist = []
|
||||
for p in self.pages:
|
||||
if 'Type' in p and p['Type'] == PageType.FrontCover:
|
||||
coverlist.append(int(p['Image']))
|
||||
if "type" in p and p["type"] == PageType.FrontCover:
|
||||
coverlist.append(int(p["image_index"]))
|
||||
|
||||
if len(coverlist) == 0:
|
||||
coverlist.append(0)
|
||||
|
||||
return coverlist
|
||||
|
||||
def addCredit(self, person, role, primary=False):
|
||||
|
||||
credit = dict()
|
||||
credit['person'] = person
|
||||
credit['role'] = role
|
||||
if primary:
|
||||
credit['primary'] = primary
|
||||
def add_credit(self, person: str, role: str, primary: bool = False) -> None:
|
||||
credit = Credit(person=person, role=role, primary=primary)
|
||||
|
||||
# look to see if it's not already there...
|
||||
found = False
|
||||
for c in self.credits:
|
||||
if (c['person'].lower() == person.lower() and
|
||||
c['role'].lower() == role.lower()):
|
||||
if c["person"].casefold() == person.casefold() and c["role"].casefold() == role.casefold():
|
||||
# no need to add it. just adjust the "primary" flag as needed
|
||||
c['primary'] = primary
|
||||
c["primary"] = primary
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
self.credits.append(credit)
|
||||
|
||||
def __str__(self):
|
||||
vals = []
|
||||
if self.isEmpty:
|
||||
def get_primary_credit(self, role: str) -> str:
|
||||
primary = ""
|
||||
for credit in self.credits:
|
||||
if "role" not in credit or "person" not in credit:
|
||||
continue
|
||||
if (primary == "" and credit["role"].casefold() == role.casefold()) or (
|
||||
credit["role"].casefold() == role.casefold() and "primary" in credit and credit["primary"]
|
||||
):
|
||||
primary = credit["person"]
|
||||
return primary
|
||||
|
||||
def __str__(self) -> str:
|
||||
vals: list[tuple[str, Any]] = []
|
||||
if self.is_empty:
|
||||
return "No metadata"
|
||||
|
||||
def add_string(tag, val):
|
||||
if val is not None and "{0}".format(val) != "":
|
||||
def add_string(tag: str, val: Any) -> None:
|
||||
if isinstance(val, Sequence):
|
||||
if val:
|
||||
vals.append((tag, val))
|
||||
elif val is not None:
|
||||
vals.append((tag, val))
|
||||
|
||||
def add_attr_string(tag):
|
||||
val = getattr(self, tag)
|
||||
def add_attr_string(tag: str) -> None:
|
||||
add_string(tag, getattr(self, tag))
|
||||
|
||||
add_attr_string("series")
|
||||
add_attr_string("issue")
|
||||
add_attr_string("issueCount")
|
||||
add_attr_string("issue_count")
|
||||
add_attr_string("title")
|
||||
add_attr_string("publisher")
|
||||
add_attr_string("year")
|
||||
add_attr_string("month")
|
||||
add_attr_string("day")
|
||||
add_attr_string("volume")
|
||||
add_attr_string("volumeCount")
|
||||
add_attr_string("genre")
|
||||
add_attr_string("volume_count")
|
||||
add_string("genres", ", ".join(self.genres))
|
||||
add_attr_string("language")
|
||||
add_attr_string("country")
|
||||
add_attr_string("criticalRating")
|
||||
add_attr_string("alternateSeries")
|
||||
add_attr_string("alternateNumber")
|
||||
add_attr_string("alternateCount")
|
||||
add_attr_string("critical_rating")
|
||||
add_attr_string("alternate_series")
|
||||
add_attr_string("alternate_number")
|
||||
add_attr_string("alternate_count")
|
||||
add_attr_string("imprint")
|
||||
add_attr_string("webLink")
|
||||
add_attr_string("web_link")
|
||||
add_attr_string("format")
|
||||
add_attr_string("manga")
|
||||
|
||||
add_attr_string("price")
|
||||
add_attr_string("isVersionOf")
|
||||
add_attr_string("is_version_of")
|
||||
add_attr_string("rights")
|
||||
add_attr_string("identifier")
|
||||
add_attr_string("lastMark")
|
||||
add_attr_string("last_mark")
|
||||
|
||||
if self.blackAndWhite:
|
||||
add_attr_string("blackAndWhite")
|
||||
add_attr_string("maturityRating")
|
||||
add_attr_string("storyArc")
|
||||
add_attr_string("seriesGroup")
|
||||
add_attr_string("scanInfo")
|
||||
add_attr_string("characters")
|
||||
add_attr_string("teams")
|
||||
add_attr_string("locations")
|
||||
add_attr_string("comments")
|
||||
if self.black_and_white:
|
||||
add_attr_string("black_and_white")
|
||||
add_attr_string("maturity_rating")
|
||||
add_attr_string("story_arcs")
|
||||
add_attr_string("series_groups")
|
||||
add_attr_string("scan_info")
|
||||
add_string("characters", ", ".join(self.characters))
|
||||
add_string("teams", ", ".join(self.teams))
|
||||
add_string("locations", ", ".join(self.locations))
|
||||
add_attr_string("description")
|
||||
add_attr_string("notes")
|
||||
|
||||
add_string("tags", utils.listToString(self.tags))
|
||||
add_string("tags", ", ".join(self.tags))
|
||||
|
||||
for c in self.credits:
|
||||
primary = ""
|
||||
if 'primary' in c and c['primary']:
|
||||
if "primary" in c and c["primary"]:
|
||||
primary = " [P]"
|
||||
add_string("credit", c['role'] + ": " + c['person'] + primary)
|
||||
add_string("credit", c["role"] + ": " + c["person"] + primary)
|
||||
|
||||
# find the longest field name
|
||||
flen = 0
|
||||
@@ -319,3 +438,116 @@ class GenericMetadata:
|
||||
outstr += fmt_str.format(i[0] + ":", i[1])
|
||||
|
||||
return outstr
|
||||
|
||||
def fix_publisher(self) -> None:
|
||||
if self.publisher is None:
|
||||
return
|
||||
if self.imprint is None:
|
||||
self.imprint = ""
|
||||
|
||||
imprint, publisher = utils.get_publisher(self.publisher)
|
||||
|
||||
self.publisher = publisher
|
||||
|
||||
if self.imprint.casefold() in publisher.casefold():
|
||||
self.imprint = None
|
||||
|
||||
if self.imprint is None or self.imprint == "":
|
||||
self.imprint = imprint
|
||||
elif self.imprint.casefold() in imprint.casefold():
|
||||
self.imprint = imprint
|
||||
|
||||
|
||||
md_test: GenericMetadata = GenericMetadata(
|
||||
is_empty=False,
|
||||
tag_origin=TagOrigin("comicvine", "Comic Vine"),
|
||||
series="Cory Doctorow's Futuristic Tales of the Here and Now",
|
||||
series_id="23437",
|
||||
issue="1",
|
||||
issue_id="140529",
|
||||
title="Anda's Game",
|
||||
publisher="IDW Publishing",
|
||||
month=10,
|
||||
year=2007,
|
||||
day=1,
|
||||
issue_count=6,
|
||||
volume=1,
|
||||
genres={"Sci-Fi"},
|
||||
language="en",
|
||||
description=(
|
||||
"For 12-year-old Anda, getting paid real money to kill the characters of players who were cheating"
|
||||
" in her favorite online computer game was a win-win situation. Until she found out who was paying her,"
|
||||
" and what those characters meant to the livelihood of children around the world."
|
||||
),
|
||||
volume_count=None,
|
||||
critical_rating=3.0,
|
||||
country=None,
|
||||
alternate_series="Tales",
|
||||
alternate_number="2",
|
||||
alternate_count=7,
|
||||
imprint="craphound.com",
|
||||
notes="Tagged with ComicTagger 1.3.2a5 using info from Comic Vine on 2022-04-16 15:52:26. [Issue ID 140529]",
|
||||
web_link="https://comicvine.gamespot.com/cory-doctorows-futuristic-tales-of-the-here-and-no/4000-140529/",
|
||||
format="Series",
|
||||
manga="No",
|
||||
black_and_white=None,
|
||||
page_count=24,
|
||||
maturity_rating="Everyone 10+",
|
||||
story_arcs=["Here and Now"],
|
||||
series_groups=["Futuristic Tales"],
|
||||
scan_info="(CC BY-NC-SA 3.0)",
|
||||
characters={"Anda"},
|
||||
teams={"Fahrenheit"},
|
||||
locations=set(utils.split("lonely cottage ", ",")),
|
||||
credits=[
|
||||
Credit(primary=False, person="Dara Naraghi", role="Writer"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Penciller"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Inker"),
|
||||
Credit(primary=False, person="Neil Uyetake", role="Letterer"),
|
||||
Credit(primary=False, person="Sam Kieth", role="Cover"),
|
||||
Credit(primary=False, person="Ted Adams", role="Editor"),
|
||||
],
|
||||
tags=set(),
|
||||
pages=[
|
||||
ImageMetadata(
|
||||
image_index=0, height="1280", size="195977", width="800", type=PageType.FrontCover, filename="!cover.jpg"
|
||||
),
|
||||
ImageMetadata(image_index=1, height="2039", size="611993", width="1327", filename="01.jpg"),
|
||||
ImageMetadata(image_index=2, height="2039", size="783726", width="1327", filename="02.jpg"),
|
||||
ImageMetadata(image_index=3, height="2039", size="679584", width="1327", filename="03.jpg"),
|
||||
ImageMetadata(image_index=4, height="2039", size="788179", width="1327", filename="04.jpg"),
|
||||
ImageMetadata(image_index=5, height="2039", size="864433", width="1327", filename="05.jpg"),
|
||||
ImageMetadata(image_index=6, height="2039", size="765606", width="1327", filename="06.jpg"),
|
||||
ImageMetadata(image_index=7, height="2039", size="876427", width="1327", filename="07.jpg"),
|
||||
ImageMetadata(image_index=8, height="2039", size="852622", width="1327", filename="08.jpg"),
|
||||
ImageMetadata(image_index=9, height="2039", size="800205", width="1327", filename="09.jpg"),
|
||||
ImageMetadata(image_index=10, height="2039", size="746243", width="1326", filename="10.jpg"),
|
||||
ImageMetadata(image_index=11, height="2039", size="718062", width="1327", filename="11.jpg"),
|
||||
ImageMetadata(image_index=12, height="2039", size="532179", width="1326", filename="12.jpg"),
|
||||
ImageMetadata(image_index=13, height="2039", size="686708", width="1327", filename="13.jpg"),
|
||||
ImageMetadata(image_index=14, height="2039", size="641907", width="1327", filename="14.jpg"),
|
||||
ImageMetadata(image_index=15, height="2039", size="805388", width="1327", filename="15.jpg"),
|
||||
ImageMetadata(image_index=16, height="2039", size="668927", width="1326", filename="16.jpg"),
|
||||
ImageMetadata(image_index=17, height="2039", size="710605", width="1327", filename="17.jpg"),
|
||||
ImageMetadata(image_index=18, height="2039", size="761398", width="1326", filename="18.jpg"),
|
||||
ImageMetadata(image_index=19, height="2039", size="743807", width="1327", filename="19.jpg"),
|
||||
ImageMetadata(image_index=20, height="2039", size="552911", width="1326", filename="20.jpg"),
|
||||
ImageMetadata(image_index=21, height="2039", size="556827", width="1327", filename="21.jpg"),
|
||||
ImageMetadata(image_index=22, height="2039", size="675078", width="1326", filename="22.jpg"),
|
||||
ImageMetadata(
|
||||
bookmark="Interview",
|
||||
image_index=23,
|
||||
height="2032",
|
||||
size="800965",
|
||||
width="1338",
|
||||
type=PageType.Letters,
|
||||
filename="23.jpg",
|
||||
),
|
||||
],
|
||||
price=None,
|
||||
is_version_of=None,
|
||||
rights=None,
|
||||
identifier=None,
|
||||
last_mark=None,
|
||||
_cover_image=None,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# coding=utf-8
|
||||
"""Support for mixed digit/string type Issue field
|
||||
|
||||
Class for handling the odd permutations of an 'issue number' that the
|
||||
@@ -6,59 +5,63 @@ comics industry throws at us.
|
||||
e.g.: "12", "12.1", "0", "-1", "5AU", "100-2"
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import utils
|
||||
#import math
|
||||
#import re
|
||||
import logging
|
||||
import unicodedata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IssueString:
|
||||
|
||||
def __init__(self, text):
|
||||
|
||||
def __init__(self, text: str | None) -> None:
|
||||
# break up the issue number string into 2 parts: the numeric and suffix string.
|
||||
# (assumes that the numeric portion is always first)
|
||||
|
||||
self.num = None
|
||||
self.suffix = ""
|
||||
self.prefix = ""
|
||||
|
||||
if text is None:
|
||||
return
|
||||
|
||||
if isinstance(text, int):
|
||||
text = str(text)
|
||||
text = str(text)
|
||||
|
||||
if len(text) == 0:
|
||||
return
|
||||
|
||||
text = str(text)
|
||||
for idx, r in enumerate(text):
|
||||
if not r.isalpha():
|
||||
break
|
||||
self.prefix = text[:idx]
|
||||
self.num, self.suffix = self.get_number(text[idx:])
|
||||
|
||||
def get_number(self, text: str) -> tuple[float | None, str]:
|
||||
num, suffix = None, ""
|
||||
start = 0
|
||||
# skip the minus sign if it's first
|
||||
if text[0] == '-':
|
||||
if text[0] in ("-", "+"):
|
||||
start = 1
|
||||
else:
|
||||
start = 0
|
||||
|
||||
# if it's still not numeric at start skip it
|
||||
if text[start].isdigit() or text[start] == ".":
|
||||
# walk through the string, look for split point (the first
|
||||
# non-numeric)
|
||||
# walk through the string, look for split point (the first non-numeric)
|
||||
decimal_count = 0
|
||||
for idx in range(start, len(text)):
|
||||
if text[idx] not in "0123456789.":
|
||||
if not (text[idx].isdigit() or text[idx] in "."):
|
||||
break
|
||||
# special case: also split on second "."
|
||||
if text[idx] == ".":
|
||||
@@ -73,61 +76,55 @@ class IssueString:
|
||||
if text[idx - 1] == "." and len(text) != idx:
|
||||
idx = idx - 1
|
||||
|
||||
# if there is no numeric after the minus, make the minus part of
|
||||
# the suffix
|
||||
# if there is no numeric after the minus, make the minus part of the suffix
|
||||
if idx == 1 and start == 1:
|
||||
idx = 0
|
||||
|
||||
part1 = text[0:idx]
|
||||
part2 = text[idx:len(text)]
|
||||
|
||||
if part1 != "":
|
||||
self.num = float(part1)
|
||||
self.suffix = part2
|
||||
if text[0:idx]:
|
||||
num = float(text[0:idx])
|
||||
suffix = text[idx : len(text)]
|
||||
else:
|
||||
self.suffix = text
|
||||
suffix = text
|
||||
return num, suffix
|
||||
|
||||
# print "num: {0} suf: {1}".format(self.num, self.suffix)
|
||||
def as_string(self, pad: int = 0) -> str:
|
||||
"""return the number, left side zero-padded, with suffix attached"""
|
||||
|
||||
def asString(self, pad=0):
|
||||
# return the float, left side zero-padded, with suffix attached
|
||||
# if there is no number return the text
|
||||
if self.num is None:
|
||||
return self.suffix
|
||||
return self.prefix + self.suffix
|
||||
|
||||
# negative is added back in last
|
||||
negative = self.num < 0
|
||||
|
||||
num_f = abs(self.num)
|
||||
|
||||
# used for padding
|
||||
num_int = int(num_f)
|
||||
num_s = str(num_int)
|
||||
if float(num_int) != num_f:
|
||||
num_s = str(num_f)
|
||||
|
||||
num_s += self.suffix
|
||||
if num_f.is_integer():
|
||||
num_s = str(num_int)
|
||||
else:
|
||||
num_s = str(num_f)
|
||||
|
||||
# create padding
|
||||
padding = ""
|
||||
l = len(str(num_int))
|
||||
if l < pad:
|
||||
padding = "0" * (pad - l)
|
||||
# we only pad the whole number part, we don't care about the decimal
|
||||
length = len(str(num_int))
|
||||
if length < pad:
|
||||
padding = "0" * (pad - length)
|
||||
|
||||
# add the padding to the front
|
||||
num_s = padding + num_s
|
||||
|
||||
# finally add the negative back in
|
||||
if negative:
|
||||
num_s = "-" + num_s
|
||||
|
||||
return num_s
|
||||
# return the prefix + formatted number + suffix
|
||||
return self.prefix + num_s + self.suffix
|
||||
|
||||
def asFloat(self):
|
||||
def as_float(self) -> float | None:
|
||||
# return the float, with no suffix
|
||||
if self.suffix == "½":
|
||||
if self.num is not None:
|
||||
return self.num + .5
|
||||
else:
|
||||
return .5
|
||||
if len(self.suffix) == 1 and self.suffix.isnumeric():
|
||||
return (self.num or 0) + unicodedata.numeric(self.suffix)
|
||||
return self.num
|
||||
|
||||
def asInt(self):
|
||||
# return the int version of the float
|
||||
if self.num is None:
|
||||
return None
|
||||
return int(self.num)
|
||||
|
||||
5
comicapi/metadata/__init__.py
Normal file
5
comicapi/metadata/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.metadata.metadata import Metadata
|
||||
|
||||
__all__ = ["Metadata"]
|
||||
315
comicapi/metadata/comet.py
Normal file
315
comicapi/metadata/comet.py
Normal file
@@ -0,0 +1,315 @@
|
||||
"""A class to encapsulate CoMet data"""
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Any
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comicapi.genericmetadata import GenericMetadata, ImageMetadata, PageType
|
||||
from comicapi.metadata import Metadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CoMet(Metadata):
|
||||
enabled = True
|
||||
|
||||
short_name = "comet"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.comet_filename = "CoMet.xml"
|
||||
self.file = "CoMet.xml"
|
||||
self.supported_attributes = {
|
||||
"series",
|
||||
"issue",
|
||||
"title",
|
||||
"volume",
|
||||
"genres",
|
||||
"description",
|
||||
"publisher",
|
||||
"language",
|
||||
"format",
|
||||
"maturity_rating",
|
||||
"month",
|
||||
"year",
|
||||
"page_count",
|
||||
"characters",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.primary",
|
||||
"credits.role",
|
||||
"price",
|
||||
"is_version_of",
|
||||
"rights",
|
||||
"identifier",
|
||||
"last_mark",
|
||||
"pages.type", # This is required for setting the cover image none of the other types will be saved
|
||||
"pages",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return role.casefold() in self._get_parseable_credits()
|
||||
|
||||
def supports_metadata(self, archive: Archiver) -> bool:
|
||||
return archive.supports_files()
|
||||
|
||||
def has_metadata(self, archive: Archiver) -> bool:
|
||||
if not self.supports_metadata(archive):
|
||||
return False
|
||||
has_metadata = False
|
||||
# look at all xml files in root, and search for CoMet data, get first
|
||||
for n in archive.get_filename_list():
|
||||
if os.path.dirname(n) == "" and os.path.splitext(n)[1].casefold() == ".xml":
|
||||
# read in XML file, and validate it
|
||||
data = b""
|
||||
try:
|
||||
data = archive.read_file(n)
|
||||
except Exception as e:
|
||||
logger.warning("Error reading in Comet XML for validation! from %s: %s", archive.path, e)
|
||||
if self._validate_bytes(data):
|
||||
# since we found it, save it!
|
||||
self.file = n
|
||||
has_metadata = True
|
||||
break
|
||||
return has_metadata
|
||||
|
||||
def remove_metadata(self, archive: Archiver) -> bool:
|
||||
return self.has_metadata(archive) and archive.remove_file(self.file)
|
||||
|
||||
def get_metadata(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_metadata(archive):
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata, archive)
|
||||
return GenericMetadata()
|
||||
|
||||
def get_metadata_string(self, archive: Archiver) -> str:
|
||||
if self.has_metadata(archive):
|
||||
return ET.tostring(ET.fromstring(archive.read_file(self.file)), encoding="unicode", xml_declaration=True)
|
||||
return ""
|
||||
|
||||
def set_metadata(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_metadata(archive):
|
||||
success = True
|
||||
xml = b""
|
||||
if self.has_metadata(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
if self.file != self.comet_filename:
|
||||
success = self.remove_metadata(archive)
|
||||
|
||||
return success and archive.write_file(self.comet_filename, self._bytes_from_metadata(metadata, xml))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "Comic Metadata (CoMet)"
|
||||
|
||||
@classmethod
|
||||
def _get_parseable_credits(cls) -> list[str]:
|
||||
parsable_credits: list[str] = []
|
||||
parsable_credits.extend(GenericMetadata.writer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.penciller_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.inker_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.colorist_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.letterer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.cover_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.editor_synonyms)
|
||||
return parsable_credits
|
||||
|
||||
def _metadata_from_bytes(self, string: bytes, archive: Archiver) -> GenericMetadata:
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
return self._convert_xml_to_metadata(tree, archive)
|
||||
|
||||
def _bytes_from_metadata(self, metadata: GenericMetadata, xml: bytes = b"") -> bytes:
|
||||
tree = self._convert_metadata_to_xml(metadata, xml)
|
||||
return ET.tostring(tree.getroot(), encoding="utf-8", xml_declaration=True)
|
||||
|
||||
def _convert_metadata_to_xml(self, metadata: GenericMetadata, xml: bytes = b"") -> ET.ElementTree:
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
if xml:
|
||||
root = ET.fromstring(xml)
|
||||
else:
|
||||
# build a tree structure
|
||||
root = ET.Element("comet")
|
||||
root.attrib["xmlns:comet"] = "http://www.denvog.com/comet/"
|
||||
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib["xsi:schemaLocation"] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
|
||||
|
||||
# helper func
|
||||
def assign(comet_entry: str, md_entry: Any) -> None:
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, comet_entry).text = str(md_entry)
|
||||
|
||||
# title is manditory
|
||||
assign("title", md.title or "")
|
||||
assign("series", md.series)
|
||||
assign("issue", md.issue) # must be int??
|
||||
assign("volume", md.volume)
|
||||
assign("description", md.description)
|
||||
assign("publisher", md.publisher)
|
||||
assign("pages", md.page_count)
|
||||
assign("format", md.format)
|
||||
assign("language", md.language)
|
||||
assign("rating", md.maturity_rating)
|
||||
assign("price", md.price)
|
||||
assign("isVersionOf", md.is_version_of)
|
||||
assign("rights", md.rights)
|
||||
assign("identifier", md.identifier)
|
||||
assign("lastMark", md.last_mark)
|
||||
assign("genre", ",".join(md.genres)) # TODO repeatable
|
||||
|
||||
for c in md.characters:
|
||||
assign("character", c.strip())
|
||||
|
||||
if md.manga is not None and md.manga == "YesAndRightToLeft":
|
||||
assign("readingDirection", "rtl")
|
||||
|
||||
if md.year is not None:
|
||||
date_str = f"{md.year:04}"
|
||||
if md.month is not None:
|
||||
date_str += f"-{md.month:02}"
|
||||
assign("date", date_str)
|
||||
|
||||
page = md.get_cover_page_index_list()[0]
|
||||
assign("coverImage", md.pages[page]["filename"])
|
||||
|
||||
# loop thru credits, and build a list for each role that CoMet supports
|
||||
for credit in metadata.credits:
|
||||
if credit["role"].casefold() in set(GenericMetadata.writer_synonyms):
|
||||
ET.SubElement(root, "writer").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.penciller_synonyms):
|
||||
ET.SubElement(root, "penciller").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.inker_synonyms):
|
||||
ET.SubElement(root, "inker").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.colorist_synonyms):
|
||||
ET.SubElement(root, "colorist").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.letterer_synonyms):
|
||||
ET.SubElement(root, "letterer").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.cover_synonyms):
|
||||
ET.SubElement(root, "coverDesigner").text = str(credit["person"])
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.editor_synonyms):
|
||||
ET.SubElement(root, "editor").text = str(credit["person"])
|
||||
|
||||
ET.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
def _convert_xml_to_metadata(self, tree: ET.ElementTree, archive: Archiver) -> GenericMetadata:
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != "comet":
|
||||
raise Exception("Not a CoMet file")
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
# Helper function
|
||||
def get(tag: str) -> Any:
|
||||
node = root.find(tag)
|
||||
if node is not None:
|
||||
return node.text
|
||||
return None
|
||||
|
||||
md.series = utils.xlate(get("series"))
|
||||
md.title = utils.xlate(get("title"))
|
||||
md.issue = utils.xlate(get("issue"))
|
||||
md.volume = utils.xlate_int(get("volume"))
|
||||
md.description = utils.xlate(get("description"))
|
||||
md.publisher = utils.xlate(get("publisher"))
|
||||
md.language = utils.xlate(get("language"))
|
||||
md.format = utils.xlate(get("format"))
|
||||
md.page_count = utils.xlate_int(get("pages"))
|
||||
md.maturity_rating = utils.xlate(get("rating"))
|
||||
md.price = utils.xlate_float(get("price"))
|
||||
md.is_version_of = utils.xlate(get("isVersionOf"))
|
||||
md.rights = utils.xlate(get("rights"))
|
||||
md.identifier = utils.xlate(get("identifier"))
|
||||
md.last_mark = utils.xlate(get("lastMark"))
|
||||
|
||||
_, md.month, md.year = utils.parse_date_str(utils.xlate(get("date")))
|
||||
|
||||
ca = ComicArchive(archive)
|
||||
cover_filename = utils.xlate(get("coverImage"))
|
||||
page_list = ca.get_page_name_list()
|
||||
if cover_filename in page_list:
|
||||
cover_index = page_list.index(cover_filename)
|
||||
md.pages = [ImageMetadata(image_index=cover_index, filename=cover_filename, type=PageType.FrontCover)]
|
||||
|
||||
reading_direction = utils.xlate(get("readingDirection"))
|
||||
if reading_direction is not None and reading_direction == "rtl":
|
||||
md.manga = "YesAndRightToLeft"
|
||||
|
||||
# loop for genre tags
|
||||
for n in root:
|
||||
if n.tag == "genre":
|
||||
md.genres.add((n.text or "").strip())
|
||||
|
||||
# loop for character tags
|
||||
for n in root:
|
||||
if n.tag == "character":
|
||||
md.characters.add((n.text or "").strip())
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if any(
|
||||
[
|
||||
n.tag == "writer",
|
||||
n.tag == "penciller",
|
||||
n.tag == "inker",
|
||||
n.tag == "colorist",
|
||||
n.tag == "letterer",
|
||||
n.tag == "editor",
|
||||
]
|
||||
):
|
||||
metadata.add_credit((n.text or "").strip(), n.tag.title())
|
||||
|
||||
if n.tag == "coverDesigner":
|
||||
metadata.add_credit((n.text or "").strip(), "Cover")
|
||||
|
||||
metadata.is_empty = False
|
||||
|
||||
return metadata
|
||||
|
||||
# verify that the string actually contains CoMet data in XML format
|
||||
def _validate_bytes(self, string: bytes) -> bool:
|
||||
try:
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
root = tree.getroot()
|
||||
if root.tag != "comet":
|
||||
return False
|
||||
except ET.ParseError:
|
||||
return False
|
||||
|
||||
return True
|
||||
223
comicapi/metadata/comicbookinfo.py
Normal file
223
comicapi/metadata/comicbookinfo.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""A class to encapsulate the ComicBookInfo data"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal, TypedDict
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import Credit, GenericMetadata
|
||||
from comicapi.metadata import Metadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_CBILiteralType = Literal[
|
||||
"series",
|
||||
"title",
|
||||
"issue",
|
||||
"publisher",
|
||||
"publicationMonth",
|
||||
"publicationYear",
|
||||
"numberOfIssues",
|
||||
"comments",
|
||||
"genre",
|
||||
"volume",
|
||||
"numberOfVolumes",
|
||||
"language",
|
||||
"country",
|
||||
"rating",
|
||||
"credits",
|
||||
"tags",
|
||||
]
|
||||
|
||||
|
||||
class _ComicBookInfoJson(TypedDict, total=False):
|
||||
series: str
|
||||
title: str
|
||||
publisher: str
|
||||
publicationMonth: int
|
||||
publicationYear: int
|
||||
issue: int
|
||||
numberOfIssues: int
|
||||
volume: int
|
||||
numberOfVolumes: int
|
||||
rating: int
|
||||
genre: str
|
||||
language: str
|
||||
country: str
|
||||
credits: list[Credit]
|
||||
tags: list[str]
|
||||
comments: str
|
||||
|
||||
|
||||
_CBIContainer = TypedDict("_CBIContainer", {"appID": str, "lastModified": str, "ComicBookInfo/1.0": _ComicBookInfoJson})
|
||||
|
||||
|
||||
class ComicBookInfo(Metadata):
|
||||
enabled = True
|
||||
|
||||
short_name = "cbi"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.supported_attributes = {
|
||||
"series",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"volume",
|
||||
"volume_count",
|
||||
"genres",
|
||||
"description",
|
||||
"publisher",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"country",
|
||||
"critical_rating",
|
||||
"tags",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.primary",
|
||||
"credits.role",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return True
|
||||
|
||||
def supports_metadata(self, archive: Archiver) -> bool:
|
||||
return archive.supports_comment()
|
||||
|
||||
def has_metadata(self, archive: Archiver) -> bool:
|
||||
return self.supports_metadata(archive) and self._validate_string(archive.get_comment())
|
||||
|
||||
def remove_metadata(self, archive: Archiver) -> bool:
|
||||
return archive.set_comment("")
|
||||
|
||||
def get_metadata(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_metadata(archive):
|
||||
comment = archive.get_comment()
|
||||
if self._validate_string(comment):
|
||||
return self._metadata_from_string(comment)
|
||||
return GenericMetadata()
|
||||
|
||||
def get_metadata_string(self, archive: Archiver) -> str:
|
||||
if self.has_metadata(archive):
|
||||
return json.dumps(json.loads(archive.get_comment()), indent=2)
|
||||
return ""
|
||||
|
||||
def set_metadata(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_metadata(archive):
|
||||
return archive.set_comment(self._string_from_metadata(metadata))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "ComicBookInfo"
|
||||
|
||||
def _metadata_from_string(self, string: str) -> GenericMetadata:
|
||||
cbi_container: _CBIContainer = json.loads(string)
|
||||
|
||||
metadata = GenericMetadata()
|
||||
|
||||
cbi = cbi_container["ComicBookInfo/1.0"]
|
||||
|
||||
metadata.series = utils.xlate(cbi.get("series"))
|
||||
metadata.title = utils.xlate(cbi.get("title"))
|
||||
metadata.issue = utils.xlate(cbi.get("issue"))
|
||||
metadata.publisher = utils.xlate(cbi.get("publisher"))
|
||||
metadata.month = utils.xlate_int(cbi.get("publicationMonth"))
|
||||
metadata.year = utils.xlate_int(cbi.get("publicationYear"))
|
||||
metadata.issue_count = utils.xlate_int(cbi.get("numberOfIssues"))
|
||||
metadata.description = utils.xlate(cbi.get("comments"))
|
||||
metadata.genres = set(utils.split(cbi.get("genre"), ","))
|
||||
metadata.volume = utils.xlate_int(cbi.get("volume"))
|
||||
metadata.volume_count = utils.xlate_int(cbi.get("numberOfVolumes"))
|
||||
metadata.language = utils.xlate(cbi.get("language"))
|
||||
metadata.country = utils.xlate(cbi.get("country"))
|
||||
metadata.critical_rating = utils.xlate_int(cbi.get("rating"))
|
||||
|
||||
metadata.credits = [
|
||||
Credit(
|
||||
person=x["person"] if "person" in x else "",
|
||||
role=x["role"] if "role" in x else "",
|
||||
primary=x["primary"] if "primary" in x else False,
|
||||
)
|
||||
for x in cbi.get("credits", [])
|
||||
]
|
||||
metadata.tags.update(cbi.get("tags", set()))
|
||||
|
||||
# need the language string to be ISO
|
||||
if metadata.language:
|
||||
metadata.language = utils.get_language_iso(metadata.language)
|
||||
|
||||
metadata.is_empty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _string_from_metadata(self, metadata: GenericMetadata) -> str:
|
||||
cbi_container = self._create_json_dictionary(metadata)
|
||||
return json.dumps(cbi_container)
|
||||
|
||||
def _validate_string(self, string: bytes | str) -> bool:
|
||||
"""Verify that the string actually contains CBI data in JSON format"""
|
||||
|
||||
try:
|
||||
cbi_container = json.loads(string)
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
|
||||
return "ComicBookInfo/1.0" in cbi_container
|
||||
|
||||
def _create_json_dictionary(self, metadata: GenericMetadata) -> _CBIContainer:
|
||||
"""Create the dictionary that we will convert to JSON text"""
|
||||
|
||||
cbi_container = _CBIContainer(
|
||||
{
|
||||
"appID": "ComicTagger/1.0.0",
|
||||
"lastModified": str(datetime.now()),
|
||||
"ComicBookInfo/1.0": {},
|
||||
}
|
||||
) # TODO: ctversion.version,
|
||||
|
||||
# helper func
|
||||
def assign(cbi_entry: _CBILiteralType, md_entry: Any) -> None:
|
||||
if md_entry is not None or isinstance(md_entry, str) and md_entry != "":
|
||||
cbi_container["ComicBookInfo/1.0"][cbi_entry] = md_entry
|
||||
|
||||
assign("series", utils.xlate(metadata.series))
|
||||
assign("title", utils.xlate(metadata.title))
|
||||
assign("issue", utils.xlate(metadata.issue))
|
||||
assign("publisher", utils.xlate(metadata.publisher))
|
||||
assign("publicationMonth", utils.xlate_int(metadata.month))
|
||||
assign("publicationYear", utils.xlate_int(metadata.year))
|
||||
assign("numberOfIssues", utils.xlate_int(metadata.issue_count))
|
||||
assign("comments", utils.xlate(metadata.description))
|
||||
assign("genre", utils.xlate(",".join(metadata.genres)))
|
||||
assign("volume", utils.xlate_int(metadata.volume))
|
||||
assign("numberOfVolumes", utils.xlate_int(metadata.volume_count))
|
||||
assign("language", utils.xlate(utils.get_language_from_iso(metadata.language)))
|
||||
assign("country", utils.xlate(metadata.country))
|
||||
assign("rating", utils.xlate_int(metadata.critical_rating))
|
||||
assign("credits", metadata.credits)
|
||||
assign("tags", list(metadata.tags))
|
||||
|
||||
return cbi_container
|
||||
389
comicapi/metadata/comicrack.py
Normal file
389
comicapi/metadata/comicrack.py
Normal file
@@ -0,0 +1,389 @@
|
||||
"""A class to encapsulate ComicRack's ComicInfo.xml data"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import xml.etree.ElementTree as ET
|
||||
from collections import OrderedDict
|
||||
from typing import Any
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import GenericMetadata, ImageMetadata
|
||||
from comicapi.metadata import Metadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ComicRack(Metadata):
|
||||
enabled = True
|
||||
|
||||
short_name = "cr"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.file = "ComicInfo.xml"
|
||||
self.supported_attributes = {
|
||||
"series",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"volume",
|
||||
"genres",
|
||||
"description",
|
||||
"notes",
|
||||
"alternate_series",
|
||||
"alternate_number",
|
||||
"alternate_count",
|
||||
"story_arcs",
|
||||
"series_groups",
|
||||
"publisher",
|
||||
"imprint",
|
||||
"day",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"web_link",
|
||||
"format",
|
||||
"manga",
|
||||
"black_and_white",
|
||||
"maturity_rating",
|
||||
"critical_rating",
|
||||
"scan_info",
|
||||
"pages",
|
||||
"pages.bookmark",
|
||||
"pages.double_page",
|
||||
"pages.height",
|
||||
"pages.image_index",
|
||||
"pages.size",
|
||||
"pages.type",
|
||||
"pages.width",
|
||||
"page_count",
|
||||
"characters",
|
||||
"teams",
|
||||
"locations",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.role",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return role.casefold() in self._get_parseable_credits()
|
||||
|
||||
def supports_metadata(self, archive: Archiver) -> bool:
|
||||
return archive.supports_files()
|
||||
|
||||
def has_metadata(self, archive: Archiver) -> bool:
|
||||
return (
|
||||
self.supports_metadata(archive)
|
||||
and self.file in archive.get_filename_list()
|
||||
and self._validate_bytes(archive.read_file(self.file))
|
||||
)
|
||||
|
||||
def remove_metadata(self, archive: Archiver) -> bool:
|
||||
return self.has_metadata(archive) and archive.remove_file(self.file)
|
||||
|
||||
def get_metadata(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_metadata(archive):
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata)
|
||||
return GenericMetadata()
|
||||
|
||||
def get_metadata_string(self, archive: Archiver) -> str:
|
||||
if self.has_metadata(archive):
|
||||
return ET.tostring(ET.fromstring(archive.read_file(self.file)), encoding="unicode", xml_declaration=True)
|
||||
return ""
|
||||
|
||||
def set_metadata(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_metadata(archive):
|
||||
xml = b""
|
||||
if self.has_metadata(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
return archive.write_file(self.file, self._bytes_from_metadata(metadata, xml))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "Comic Rack"
|
||||
|
||||
@classmethod
|
||||
def _get_parseable_credits(cls) -> list[str]:
|
||||
parsable_credits: list[str] = []
|
||||
parsable_credits.extend(GenericMetadata.writer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.penciller_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.inker_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.colorist_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.letterer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.cover_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.editor_synonyms)
|
||||
return parsable_credits
|
||||
|
||||
def _metadata_from_bytes(self, string: bytes) -> GenericMetadata:
|
||||
root = ET.fromstring(string)
|
||||
return self._convert_xml_to_metadata(root)
|
||||
|
||||
def _bytes_from_metadata(self, metadata: GenericMetadata, xml: bytes = b"") -> bytes:
|
||||
root = self._convert_metadata_to_xml(metadata, xml)
|
||||
return ET.tostring(root, encoding="utf-8", xml_declaration=True)
|
||||
|
||||
def _convert_metadata_to_xml(self, metadata: GenericMetadata, xml: bytes = b"") -> ET.Element:
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
if xml:
|
||||
root = ET.fromstring(xml)
|
||||
else:
|
||||
# build a tree structure
|
||||
root = ET.Element("ComicInfo")
|
||||
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
|
||||
# helper func
|
||||
|
||||
def assign(cr_entry: str, md_entry: Any) -> None:
|
||||
if md_entry:
|
||||
text = ""
|
||||
if isinstance(md_entry, str):
|
||||
text = md_entry
|
||||
elif isinstance(md_entry, (list, set)):
|
||||
text = ",".join(md_entry)
|
||||
else:
|
||||
text = str(md_entry)
|
||||
et_entry = root.find(cr_entry)
|
||||
if et_entry is not None:
|
||||
et_entry.text = text
|
||||
else:
|
||||
ET.SubElement(root, cr_entry).text = text
|
||||
else:
|
||||
et_entry = root.find(cr_entry)
|
||||
if et_entry is not None:
|
||||
root.remove(et_entry)
|
||||
|
||||
# need to specially process the credits, since they are structured
|
||||
# differently than CIX
|
||||
credit_writer_list = []
|
||||
credit_penciller_list = []
|
||||
credit_inker_list = []
|
||||
credit_colorist_list = []
|
||||
credit_letterer_list = []
|
||||
credit_cover_list = []
|
||||
credit_editor_list = []
|
||||
|
||||
# first, loop thru credits, and build a list for each role that CIX
|
||||
# supports
|
||||
for credit in metadata.credits:
|
||||
if credit["role"].casefold() in set(GenericMetadata.writer_synonyms):
|
||||
credit_writer_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.penciller_synonyms):
|
||||
credit_penciller_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.inker_synonyms):
|
||||
credit_inker_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.colorist_synonyms):
|
||||
credit_colorist_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.letterer_synonyms):
|
||||
credit_letterer_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.cover_synonyms):
|
||||
credit_cover_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
if credit["role"].casefold() in set(GenericMetadata.editor_synonyms):
|
||||
credit_editor_list.append(credit["person"].replace(",", ""))
|
||||
|
||||
assign("Series", md.series)
|
||||
assign("Number", md.issue)
|
||||
assign("Count", md.issue_count)
|
||||
assign("Title", md.title)
|
||||
assign("Volume", md.volume)
|
||||
assign("Genre", md.genres)
|
||||
assign("Summary", md.description)
|
||||
assign("Notes", md.notes)
|
||||
|
||||
assign("AlternateSeries", md.alternate_series)
|
||||
assign("AlternateNumber", md.alternate_number)
|
||||
assign("AlternateCount", md.alternate_count)
|
||||
assign("StoryArc", md.story_arcs)
|
||||
assign("SeriesGroup", md.series_groups)
|
||||
|
||||
assign("Publisher", md.publisher)
|
||||
assign("Imprint", md.imprint)
|
||||
assign("Day", md.day)
|
||||
assign("Month", md.month)
|
||||
assign("Year", md.year)
|
||||
assign("LanguageISO", md.language)
|
||||
assign("Web", md.web_link)
|
||||
assign("Format", md.format)
|
||||
assign("Manga", md.manga)
|
||||
assign("BlackAndWhite", "Yes" if md.black_and_white else None)
|
||||
assign("AgeRating", md.maturity_rating)
|
||||
assign("CommunityRating", md.critical_rating)
|
||||
assign("ScanInformation", md.scan_info)
|
||||
|
||||
assign("PageCount", md.page_count)
|
||||
|
||||
assign("Characters", md.characters)
|
||||
assign("Teams", md.teams)
|
||||
assign("Locations", md.locations)
|
||||
assign("Writer", ", ".join(credit_writer_list))
|
||||
assign("Penciller", ", ".join(credit_penciller_list))
|
||||
assign("Inker", ", ".join(credit_inker_list))
|
||||
assign("Colorist", ", ".join(credit_colorist_list))
|
||||
assign("Letterer", ", ".join(credit_letterer_list))
|
||||
assign("CoverArtist", ", ".join(credit_cover_list))
|
||||
assign("Editor", ", ".join(credit_editor_list))
|
||||
|
||||
# loop and add the page entries under pages node
|
||||
pages_node = root.find("Pages")
|
||||
if pages_node is not None:
|
||||
pages_node.clear()
|
||||
else:
|
||||
pages_node = ET.SubElement(root, "Pages")
|
||||
|
||||
for page_dict in md.pages:
|
||||
page_node = ET.SubElement(pages_node, "Page")
|
||||
page_node.attrib = {}
|
||||
if "bookmark" in page_dict:
|
||||
page_node.attrib["Bookmark"] = str(page_dict["bookmark"])
|
||||
if "double_page" in page_dict:
|
||||
page_node.attrib["DoublePage"] = str(page_dict["double_page"])
|
||||
if "image_index" in page_dict:
|
||||
page_node.attrib["Image"] = str(page_dict["image_index"])
|
||||
if "height" in page_dict:
|
||||
page_node.attrib["ImageHeight"] = str(page_dict["height"])
|
||||
if "size" in page_dict:
|
||||
page_node.attrib["ImageSize"] = str(page_dict["size"])
|
||||
if "width" in page_dict:
|
||||
page_node.attrib["ImageWidth"] = str(page_dict["width"])
|
||||
if "type" in page_dict:
|
||||
page_node.attrib["Type"] = str(page_dict["type"])
|
||||
page_node.attrib = OrderedDict(sorted(page_node.attrib.items()))
|
||||
|
||||
ET.indent(root)
|
||||
|
||||
return root
|
||||
|
||||
def _convert_xml_to_metadata(self, root: ET.Element) -> GenericMetadata:
|
||||
if root.tag != "ComicInfo":
|
||||
raise Exception("Not a ComicInfo file")
|
||||
|
||||
def get(name: str) -> str | None:
|
||||
tag = root.find(name)
|
||||
if tag is None:
|
||||
return None
|
||||
return tag.text
|
||||
|
||||
md = GenericMetadata()
|
||||
|
||||
md.series = utils.xlate(get("Series"))
|
||||
md.issue = utils.xlate(get("Number"))
|
||||
md.issue_count = utils.xlate_int(get("Count"))
|
||||
md.title = utils.xlate(get("Title"))
|
||||
md.volume = utils.xlate_int(get("Volume"))
|
||||
md.genres = set(utils.split(get("Genre"), ","))
|
||||
md.description = utils.xlate(get("Summary"))
|
||||
md.notes = utils.xlate(get("Notes"))
|
||||
|
||||
md.alternate_series = utils.xlate(get("AlternateSeries"))
|
||||
md.alternate_number = utils.xlate(get("AlternateNumber"))
|
||||
md.alternate_count = utils.xlate_int(get("AlternateCount"))
|
||||
md.story_arcs = utils.split(get("StoryArc"), ",")
|
||||
md.series_groups = utils.split(get("SeriesGroup"), ",")
|
||||
|
||||
md.publisher = utils.xlate(get("Publisher"))
|
||||
md.imprint = utils.xlate(get("Imprint"))
|
||||
md.day = utils.xlate_int(get("Day"))
|
||||
md.month = utils.xlate_int(get("Month"))
|
||||
md.year = utils.xlate_int(get("Year"))
|
||||
md.language = utils.xlate(get("LanguageISO"))
|
||||
md.web_link = utils.xlate(get("Web"))
|
||||
md.format = utils.xlate(get("Format"))
|
||||
md.manga = utils.xlate(get("Manga"))
|
||||
md.maturity_rating = utils.xlate(get("AgeRating"))
|
||||
md.critical_rating = utils.xlate_float(get("CommunityRating"))
|
||||
md.scan_info = utils.xlate(get("ScanInformation"))
|
||||
|
||||
md.page_count = utils.xlate_int(get("PageCount"))
|
||||
|
||||
md.characters = set(utils.split(get("Characters"), ","))
|
||||
md.teams = set(utils.split(get("Teams"), ","))
|
||||
md.locations = set(utils.split(get("Locations"), ","))
|
||||
|
||||
tmp = utils.xlate(get("BlackAndWhite"))
|
||||
if tmp is not None:
|
||||
md.black_and_white = tmp.casefold() in ["yes", "true", "1"]
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if any(
|
||||
[
|
||||
n.tag == "Writer",
|
||||
n.tag == "Penciller",
|
||||
n.tag == "Inker",
|
||||
n.tag == "Colorist",
|
||||
n.tag == "Letterer",
|
||||
n.tag == "Editor",
|
||||
]
|
||||
):
|
||||
if n.text is not None:
|
||||
for name in utils.split(n.text, ","):
|
||||
md.add_credit(name.strip(), n.tag)
|
||||
|
||||
if n.tag == "CoverArtist":
|
||||
if n.text is not None:
|
||||
for name in utils.split(n.text, ","):
|
||||
md.add_credit(name.strip(), "Cover")
|
||||
|
||||
# parse page data now
|
||||
pages_node = root.find("Pages")
|
||||
if pages_node is not None:
|
||||
for i, page in enumerate(pages_node):
|
||||
p: dict[str, Any] = page.attrib
|
||||
md_page = ImageMetadata(image_index=int(p.get("Image", i)))
|
||||
|
||||
if "Bookmark" in p:
|
||||
md_page["bookmark"] = p["Bookmark"]
|
||||
if "DoublePage" in p:
|
||||
md_page["double_page"] = True if p["DoublePage"].casefold() in ("yes", "true", "1") else False
|
||||
if "ImageHeight" in p:
|
||||
md_page["height"] = p["ImageHeight"]
|
||||
if "ImageSize" in p:
|
||||
md_page["size"] = p["ImageSize"]
|
||||
if "ImageWidth" in p:
|
||||
md_page["width"] = p["ImageWidth"]
|
||||
if "Type" in p:
|
||||
md_page["type"] = p["Type"]
|
||||
|
||||
md.pages.append(md_page)
|
||||
|
||||
md.is_empty = False
|
||||
|
||||
return md
|
||||
|
||||
def _validate_bytes(self, string: bytes) -> bool:
|
||||
"""verify that the string actually contains CIX data in XML format"""
|
||||
try:
|
||||
root = ET.fromstring(string)
|
||||
if root.tag != "ComicInfo":
|
||||
return False
|
||||
except ET.ParseError:
|
||||
return False
|
||||
|
||||
return True
|
||||
123
comicapi/metadata/metadata.py
Normal file
123
comicapi/metadata/metadata.py
Normal file
@@ -0,0 +1,123 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
|
||||
|
||||
class Metadata:
|
||||
enabled: bool = False
|
||||
short_name: str = ""
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
self.version: str = version
|
||||
self.supported_attributes = {
|
||||
"tag_origin",
|
||||
"issue_id",
|
||||
"series_id",
|
||||
"series",
|
||||
"series_aliases",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"title_aliases",
|
||||
"volume",
|
||||
"volume_count",
|
||||
"genres",
|
||||
"description",
|
||||
"notes",
|
||||
"alternate_series",
|
||||
"alternate_number",
|
||||
"alternate_count",
|
||||
"story_arcs",
|
||||
"series_groups",
|
||||
"publisher",
|
||||
"imprint",
|
||||
"day",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"country",
|
||||
"web_link",
|
||||
"format",
|
||||
"manga",
|
||||
"black_and_white",
|
||||
"maturity_rating",
|
||||
"critical_rating",
|
||||
"scan_info",
|
||||
"tags",
|
||||
"pages",
|
||||
"pages.type",
|
||||
"pages.bookmark",
|
||||
"pages.double_page",
|
||||
"pages.image_index",
|
||||
"pages.size",
|
||||
"pages.height",
|
||||
"pages.width",
|
||||
"page_count",
|
||||
"characters",
|
||||
"teams",
|
||||
"locations",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.role",
|
||||
"credits.primary",
|
||||
"price",
|
||||
"is_version_of",
|
||||
"rights",
|
||||
"identifier",
|
||||
"last_mark",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return False
|
||||
|
||||
def supports_metadata(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Checks the given archive for the ability to save this metadata style.
|
||||
Should always return a bool. Failures should return False.
|
||||
Typically consists of a call to either `archive.supports_comment` or `archive.supports_file`
|
||||
"""
|
||||
return False
|
||||
|
||||
def has_metadata(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Checks the given archive for metadata.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def remove_metadata(self, archive: Archiver) -> bool:
|
||||
"""
|
||||
Removes the metadata from the given archive.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def get_metadata(self, archive: Archiver) -> GenericMetadata:
|
||||
"""
|
||||
Returns a GenericMetadata representing the data saved in the given archive.
|
||||
Should always return a GenericMetadata. Failures should return an empty metadata object.
|
||||
"""
|
||||
return GenericMetadata()
|
||||
|
||||
def get_metadata_string(self, archive: Archiver) -> str:
|
||||
"""
|
||||
Returns the raw metadata as a string.
|
||||
If the metadata is a binary format a roughly similar text format should be used.
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
|
||||
def set_metadata(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
"""
|
||||
Saves the given metadata to the given archive.
|
||||
Should always return a bool. Failures should return False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
"""
|
||||
Returns the name of this metadata for display purposes eg "Comic Rack".
|
||||
Should always return a string. Failures should return the empty string.
|
||||
"""
|
||||
return ""
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
from comictaggerlib.main import ctmain
|
||||
|
||||
if __name__ == '__main__':
|
||||
ctmain()
|
||||
@@ -1,50 +0,0 @@
|
||||
# -*- mode: python -*-
|
||||
|
||||
import platform
|
||||
|
||||
block_cipher = None
|
||||
|
||||
binaries = [
|
||||
('./unrar/libunrar.so', './'),
|
||||
]
|
||||
|
||||
if platform.system() == "Windows":
|
||||
# add ssl qt libraries not discovered automatically
|
||||
binaries.extend([
|
||||
('./venv/Lib/site-packages/PyQt5/Qt/bin/libeay32.dll', './PyQt5/Qt/bin'),
|
||||
('./venv/Lib/site-packages/PyQt5/Qt/bin/ssleay32.dll', './PyQt5/Qt/bin')
|
||||
])
|
||||
|
||||
a = Analysis(['comictagger.py'],
|
||||
binaries=binaries,
|
||||
datas=[('comictaggerlib/ui/*.ui', 'ui'), ('comictaggerlib/graphics', 'graphics')],
|
||||
hiddenimports=['PIL'],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher)
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
# single file setup
|
||||
exclude_binaries=False,
|
||||
name='comictagger',
|
||||
debug=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=False,
|
||||
icon="windows/app.ico" )
|
||||
|
||||
app = BUNDLE(exe,
|
||||
name='ComicTagger.app',
|
||||
icon='mac/app.icns',
|
||||
info_plist={
|
||||
'NSHighResolutionCapable': 'True'
|
||||
},
|
||||
bundle_identifier=None)
|
||||
@@ -0,0 +1 @@
|
||||
from __future__ import annotations
|
||||
|
||||
5
comictaggerlib/__main__.py
Normal file
5
comictaggerlib/__main__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comictaggerlib.main import main
|
||||
|
||||
main()
|
||||
11
comictaggerlib/__pyinstaller/__init__.py
Normal file
11
comictaggerlib/__pyinstaller/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
import comicapi.__pyinstaller
|
||||
|
||||
|
||||
def get_hook_dirs() -> list[str]:
|
||||
hooks = [os.path.dirname(__file__)]
|
||||
hooks.extend(comicapi.__pyinstaller.get_hook_dirs())
|
||||
return hooks
|
||||
8
comictaggerlib/__pyinstaller/hook-comictaggerlib.py
Normal file
8
comictaggerlib/__pyinstaller/hook-comictaggerlib.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from PyInstaller.utils.hooks import collect_data_files, collect_entry_point, collect_submodules
|
||||
|
||||
datas, hiddenimports = collect_entry_point("comictagger.talker")
|
||||
hiddenimports += collect_submodules("comictaggerlib")
|
||||
datas += collect_data_files("comictaggerlib.ui")
|
||||
datas += collect_data_files("comictaggerlib.graphics")
|
||||
7
comictaggerlib/__pyinstaller/hook-wordninja.py
Normal file
7
comictaggerlib/__pyinstaller/hook-wordninja.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from PyInstaller.utils.hooks import get_module_file_attribute
|
||||
|
||||
datas = [(os.path.join(os.path.dirname(get_module_file_attribute("wordninja")), "wordninja"), "wordninja")]
|
||||
57
comictaggerlib/applicationlogwindow.py
Normal file
57
comictaggerlib/applicationlogwindow.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import pathlib
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QTextEditLogger(QtCore.QObject, logging.Handler):
|
||||
qlog = QtCore.pyqtSignal(str)
|
||||
|
||||
def __init__(self, formatter: logging.Formatter, level: int) -> None:
|
||||
super().__init__()
|
||||
self.setFormatter(formatter)
|
||||
self.setLevel(level)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
msg = self.format(record)
|
||||
self.qlog.emit(msg.strip())
|
||||
|
||||
|
||||
class ApplicationLogWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self, log_folder: pathlib.Path, log_handler: QTextEditLogger, parent: QtCore.QObject | None = None
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
with (ui_path / "applicationlogwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.log_handler = log_handler
|
||||
self.log_handler.qlog.connect(self.textEdit.append)
|
||||
|
||||
f = QtGui.QFont("menlo")
|
||||
f.setStyleHint(QtGui.QFont.Monospace)
|
||||
self.setFont(f)
|
||||
self._button = QtWidgets.QPushButton(self)
|
||||
self._button.setText("Test Me")
|
||||
|
||||
self.log_folder = log_folder
|
||||
self.lblLogLocation.setText(f'Log Location: <a href="file://{log_folder}">{log_folder}</a>')
|
||||
|
||||
layout = self.layout()
|
||||
layout.addWidget(self._button)
|
||||
|
||||
# Connect signal to slot
|
||||
self._button.clicked.connect(self.test)
|
||||
self.textEdit.setTabStopDistance(self.textEdit.tabStopDistance() * 2)
|
||||
|
||||
def test(self) -> None:
|
||||
logger.debug("damn, a bug")
|
||||
logger.info("something to remember")
|
||||
logger.warning("that's not right")
|
||||
logger.error("foobar")
|
||||
@@ -1,245 +1,261 @@
|
||||
"""A PyQT4 dialog to select from automated issue matches"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
#import sys
|
||||
from typing import Callable
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
#from PyQt5.QtCore import QUrl, pyqtSignal, QByteArray
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from .comicarchive import MetaDataStyle
|
||||
from .coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize
|
||||
#from imagefetcher import ImageFetcher
|
||||
#from comicvinetalker import ComicVineTalker
|
||||
#import utils
|
||||
from comicapi.comicarchive import ComicArchive, metadata_styles
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.resulttypes import IssueResult, Result
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import reduce_widget_font_size
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoTagMatchWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
match_set_list: list[Result],
|
||||
styles: list[str],
|
||||
fetch_func: Callable[[IssueResult], GenericMetadata],
|
||||
config: ct_ns,
|
||||
talker: ComicTalker,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
volume_id = 0
|
||||
with (ui_path / "matchselectionwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
def __init__(self, parent, match_set_list, style, fetch_func):
|
||||
super(AutoTagMatchWindow, self).__init__(parent)
|
||||
self.config = config
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('matchselectionwindow.ui'), self)
|
||||
self.current_match_set: Result = match_set_list[0]
|
||||
|
||||
self.altCoverWidget = CoverImageWidget(
|
||||
self.altCoverContainer, CoverImageWidget.AltCoverMode)
|
||||
self.altCoverContainer, CoverImageWidget.AltCoverMode, config.Runtime_Options__config.user_cache_dir, talker
|
||||
)
|
||||
gridlayout = QtWidgets.QGridLayout(self.altCoverContainer)
|
||||
gridlayout.addWidget(self.altCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.archiveCoverWidget = CoverImageWidget(
|
||||
self.archiveCoverContainer, CoverImageWidget.ArchiveMode)
|
||||
self.archiveCoverWidget = CoverImageWidget(self.archiveCoverContainer, CoverImageWidget.ArchiveMode, None, None)
|
||||
gridlayout = QtWidgets.QGridLayout(self.archiveCoverContainer)
|
||||
gridlayout.addWidget(self.archiveCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
reduceWidgetFontSize(self.twList)
|
||||
reduceWidgetFontSize(self.teDescription, 1)
|
||||
reduce_widget_font_size(self.twList)
|
||||
reduce_widget_font_size(self.teDescription, 1)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
self.skipButton = QtWidgets.QPushButton(self.tr("Skip to Next"))
|
||||
self.buttonBox.addButton(
|
||||
self.skipButton, QtWidgets.QDialogButtonBox.ActionRole)
|
||||
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setText(
|
||||
"Accept and Write Tags")
|
||||
self.skipButton = QtWidgets.QPushButton("Skip to Next")
|
||||
self.buttonBox.addButton(self.skipButton, QtWidgets.QDialogButtonBox.ButtonRole.ActionRole)
|
||||
self.buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Ok).setText("Accept and Write Tags")
|
||||
|
||||
self.match_set_list = match_set_list
|
||||
self.style = style
|
||||
self._styles = styles
|
||||
self.fetch_func = fetch_func
|
||||
|
||||
self.current_match_set_idx = 0
|
||||
|
||||
self.twList.currentItemChanged.connect(self.currentItemChanged)
|
||||
self.twList.cellDoubleClicked.connect(self.cellDoubleClicked)
|
||||
self.skipButton.clicked.connect(self.skipToNext)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed)
|
||||
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
|
||||
self.skipButton.clicked.connect(self.skip_to_next)
|
||||
|
||||
self.updateData()
|
||||
self.update_data()
|
||||
|
||||
def updateData(self):
|
||||
|
||||
self.current_match_set = self.match_set_list[
|
||||
self.current_match_set_idx]
|
||||
def update_data(self) -> None:
|
||||
self.current_match_set = self.match_set_list[self.current_match_set_idx]
|
||||
|
||||
if self.current_match_set_idx + 1 == len(self.match_set_list):
|
||||
self.buttonBox.button(
|
||||
QtWidgets.QDialogButtonBox.Cancel).setDisabled(True)
|
||||
# self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setText("Accept")
|
||||
self.skipButton.setText(self.tr("Skip"))
|
||||
self.buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Cancel).setDisabled(True)
|
||||
self.skipButton.setText("Skip")
|
||||
|
||||
self.setCoverImage()
|
||||
self.populateTable()
|
||||
self.set_cover_image()
|
||||
self.populate_table()
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.selectRow(0)
|
||||
|
||||
path = self.current_match_set.ca.path
|
||||
path = self.current_match_set.original_path
|
||||
self.setWindowTitle(
|
||||
"Select correct match or skip ({0} of {1}): {2}".format(
|
||||
"Select correct match or skip ({} of {}): {}".format(
|
||||
self.current_match_set_idx + 1,
|
||||
len(self.match_set_list),
|
||||
os.path.split(path)[1])
|
||||
os.path.split(path)[1],
|
||||
)
|
||||
)
|
||||
|
||||
def populateTable(self):
|
||||
def populate_table(self) -> None:
|
||||
if not self.current_match_set:
|
||||
return
|
||||
|
||||
while self.twList.rowCount() > 0:
|
||||
self.twList.removeRow(0)
|
||||
self.twList.setRowCount(0)
|
||||
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
row = 0
|
||||
for match in self.current_match_set.matches:
|
||||
for row, match in enumerate(self.current_match_set.online_results):
|
||||
self.twList.insertRow(row)
|
||||
|
||||
item_text = match['series']
|
||||
item_text = match.series
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 0, item)
|
||||
|
||||
if match['publisher'] is not None:
|
||||
item_text = "{0}".format(match['publisher'])
|
||||
if match.publisher is not None:
|
||||
item_text = str(match.publisher)
|
||||
else:
|
||||
item_text = "Unknown"
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 1, item)
|
||||
|
||||
month_str = ""
|
||||
year_str = "????"
|
||||
if match['month'] is not None:
|
||||
month_str = "-{0:02d}".format(int(match['month']))
|
||||
if match['year'] is not None:
|
||||
year_str = "{0}".format(match['year'])
|
||||
if match.month is not None:
|
||||
month_str = f"-{int(match.month):02d}"
|
||||
if match.year is not None:
|
||||
year_str = str(match.year)
|
||||
|
||||
item_text = year_str + month_str
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 2, item)
|
||||
|
||||
item_text = match['issue_title']
|
||||
item_text = match.issue_title
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 3, item)
|
||||
|
||||
row += 1
|
||||
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.sortItems(2, QtCore.Qt.AscendingOrder)
|
||||
self.twList.sortItems(2, QtCore.Qt.SortOrder.AscendingOrder)
|
||||
self.twList.selectRow(0)
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.horizontalHeader().setStretchLastSection(True)
|
||||
|
||||
def cellDoubleClicked(self, r, c):
|
||||
def cell_double_clicked(self, r: int, c: int) -> None:
|
||||
self.accept()
|
||||
|
||||
def currentItemChanged(self, curr, prev):
|
||||
|
||||
def current_item_changed(self, curr: QtCore.QModelIndex, prev: QtCore.QModelIndex) -> None:
|
||||
if curr is None:
|
||||
return
|
||||
return None
|
||||
if prev is not None and prev.row() == curr.row():
|
||||
return
|
||||
return None
|
||||
|
||||
self.altCoverWidget.setIssueID(self.currentMatch()['issue_id'])
|
||||
if self.currentMatch()['description'] is None:
|
||||
match = self.current_match()
|
||||
self.altCoverWidget.set_issue_details(match.issue_id, [match.image_url, *match.alt_image_urls])
|
||||
if match.description is None:
|
||||
self.teDescription.setText("")
|
||||
else:
|
||||
self.teDescription.setText(self.currentMatch()['description'])
|
||||
self.teDescription.setText(match.description)
|
||||
|
||||
def setCoverImage(self):
|
||||
ca = self.current_match_set.ca
|
||||
self.archiveCoverWidget.setArchive(ca)
|
||||
def set_cover_image(self) -> None:
|
||||
ca = ComicArchive(self.current_match_set.original_path)
|
||||
self.archiveCoverWidget.set_archive(ca)
|
||||
|
||||
def currentMatch(self):
|
||||
def current_match(self) -> IssueResult:
|
||||
row = self.twList.currentRow()
|
||||
match = self.twList.item(row, 0).data(
|
||||
QtCore.Qt.UserRole)[0]
|
||||
match: IssueResult = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)[0]
|
||||
return match
|
||||
|
||||
def accept(self):
|
||||
|
||||
self.saveMatch()
|
||||
def accept(self) -> None:
|
||||
self.save_match()
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len(self.match_set_list):
|
||||
# no more items
|
||||
QtWidgets.QDialog.accept(self)
|
||||
else:
|
||||
self.updateData()
|
||||
self.update_data()
|
||||
|
||||
def skipToNext(self):
|
||||
def skip_to_next(self) -> None:
|
||||
self.current_match_set_idx += 1
|
||||
|
||||
if self.current_match_set_idx == len(self.match_set_list):
|
||||
# no more items
|
||||
QtWidgets.QDialog.reject(self)
|
||||
else:
|
||||
self.updateData()
|
||||
self.update_data()
|
||||
|
||||
def reject(self):
|
||||
def reject(self) -> None:
|
||||
reply = QtWidgets.QMessageBox.question(
|
||||
self,
|
||||
self.tr("Cancel Matching"),
|
||||
self.tr("Are you sure you wish to cancel the matching process?"),
|
||||
QtWidgets.QMessageBox.Yes,
|
||||
QtWidgets.QMessageBox.No)
|
||||
"Cancel Matching",
|
||||
"Are you sure you wish to cancel the matching process?",
|
||||
QtWidgets.QMessageBox.StandardButton.Yes,
|
||||
QtWidgets.QMessageBox.StandardButton.No,
|
||||
)
|
||||
|
||||
if reply == QtWidgets.QMessageBox.No:
|
||||
if reply == QtWidgets.QMessageBox.StandardButton.No:
|
||||
return
|
||||
|
||||
QtWidgets.QDialog.reject(self)
|
||||
|
||||
def saveMatch(self):
|
||||
def save_match(self) -> None:
|
||||
match = self.current_match()
|
||||
ca = ComicArchive(self.current_match_set.original_path)
|
||||
|
||||
match = self.currentMatch()
|
||||
ca = self.current_match_set.ca
|
||||
|
||||
md = ca.readMetadata(self.style)
|
||||
if md.isEmpty:
|
||||
md = ca.metadataFromFilename()
|
||||
md = ca.read_metadata(self.config.internal__load_data_style)
|
||||
if md.is_empty:
|
||||
md = ca.metadata_from_filename(
|
||||
self.config.Filename_Parsing__complicated_parser,
|
||||
self.config.Filename_Parsing__remove_c2c,
|
||||
self.config.Filename_Parsing__remove_fcbd,
|
||||
self.config.Filename_Parsing__remove_publisher,
|
||||
)
|
||||
|
||||
# now get the particular issue data
|
||||
cv_md = self.fetch_func(match)
|
||||
if cv_md is None:
|
||||
QtWidgets.QMessageBox.critical(self, self.tr("Network Issue"), self.tr(
|
||||
"Could not connect to Comic Vine to get issue details!"))
|
||||
self.current_match_set.md = ct_md = self.fetch_func(match)
|
||||
if ct_md is None:
|
||||
QtWidgets.QMessageBox.critical(self, "Network Issue", "Could not retrieve issue details!")
|
||||
return
|
||||
|
||||
QtWidgets.QApplication.setOverrideCursor(
|
||||
QtGui.QCursor(QtCore.Qt.WaitCursor))
|
||||
md.overlay(cv_md)
|
||||
success = ca.writeMetadata(md, self.style)
|
||||
ca.loadCache([MetaDataStyle.CBI, MetaDataStyle.CIX])
|
||||
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
|
||||
md.overlay(ct_md)
|
||||
for style in self._styles:
|
||||
success = ca.write_metadata(md, style)
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
if not success:
|
||||
QtWidgets.QMessageBox.warning(
|
||||
self,
|
||||
"Write Error",
|
||||
f"Saving {metadata_styles[style].name()} the tags to the archive seemed to fail!",
|
||||
)
|
||||
break
|
||||
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
|
||||
if not success:
|
||||
QtWidgets.QMessageBox.warning(self, self.tr("Write Error"), self.tr(
|
||||
"Saving the tags to the archive seemed to fail!"))
|
||||
ca.load_cache(list(metadata_styles))
|
||||
|
||||
@@ -1,69 +1,77 @@
|
||||
"""A PyQT4 dialog to show ID log and progress"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import sys
|
||||
#import os
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from .coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize
|
||||
#import utils
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import reduce_widget_font_size
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoTagProgressWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, talker: ComicTalker) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def __init__(self, parent):
|
||||
super(AutoTagProgressWindow, self).__init__(parent)
|
||||
with (ui_path / "autotagprogresswindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('autotagprogresswindow.ui'), self)
|
||||
self.lblSourceName.setText(talker.attribution)
|
||||
|
||||
self.archiveCoverWidget = CoverImageWidget(
|
||||
self.archiveCoverContainer, CoverImageWidget.DataMode, False)
|
||||
self.archiveCoverContainer, CoverImageWidget.DataMode, None, None, False
|
||||
)
|
||||
gridlayout = QtWidgets.QGridLayout(self.archiveCoverContainer)
|
||||
gridlayout.addWidget(self.archiveCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.testCoverWidget = CoverImageWidget(
|
||||
self.testCoverContainer, CoverImageWidget.DataMode, False)
|
||||
self.testCoverWidget = CoverImageWidget(self.testCoverContainer, CoverImageWidget.DataMode, None, None, False)
|
||||
gridlayout = QtWidgets.QGridLayout(self.testCoverContainer)
|
||||
gridlayout.addWidget(self.testCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.isdone = False
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
reduceWidgetFontSize(self.textEdit)
|
||||
reduce_widget_font_size(self.textEdit)
|
||||
|
||||
def setArchiveImage(self, img_data):
|
||||
self.setCoverImage(img_data, self.archiveCoverWidget)
|
||||
def set_archive_image(self, img_data: bytes) -> None:
|
||||
self.set_cover_image(img_data, self.archiveCoverWidget)
|
||||
|
||||
def setTestImage(self, img_data):
|
||||
self.setCoverImage(img_data, self.testCoverWidget)
|
||||
def set_test_image(self, img_data: bytes) -> None:
|
||||
self.set_cover_image(img_data, self.testCoverWidget)
|
||||
|
||||
def setCoverImage(self, img_data, widget):
|
||||
widget.setImageData(img_data)
|
||||
def set_cover_image(self, img_data: bytes, widget: CoverImageWidget) -> None:
|
||||
widget.set_image_data(img_data)
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
def reject(self):
|
||||
def reject(self) -> None:
|
||||
QtWidgets.QDialog.reject(self)
|
||||
self.isdone = True
|
||||
|
||||
@@ -1,127 +1,104 @@
|
||||
"""A PyQT4 dialog to confirm and set options for auto-tag"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
"""A PyQT4 dialog to confirm and set config for auto-tag"""
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import os
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
#from settingswindow import SettingsWindow
|
||||
#from filerenamer import FileRenamer
|
||||
#import utils
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AutoTagStartWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, config: ct_ns, msg: str) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def __init__(self, parent, settings, msg):
|
||||
super(AutoTagStartWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('autotagstartwindow.ui'), self)
|
||||
with (ui_path / "autotagstartwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
self.label.setText(msg)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() &
|
||||
~QtCore.Qt.WindowContextHelpButtonHint)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(self.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint)
|
||||
)
|
||||
|
||||
self.settings = settings
|
||||
self.config = config
|
||||
|
||||
self.cbxSaveOnLowConfidence.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.cbxDontUseYear.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.cbxAssumeIssueOne.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setCheckState(
|
||||
QtCore.Qt.Unchecked)
|
||||
self.cbxRemoveAfterSuccess.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.cbxSpecifySearchString.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.leNameLengthMatchTolerance.setText(
|
||||
str(self.settings.id_length_delta_thresh))
|
||||
self.cbxSpecifySearchString.setChecked(False)
|
||||
self.cbxSplitWords.setChecked(False)
|
||||
self.sbNameMatchSearchThresh.setValue(self.config.Issue_Identifier__series_match_identify_thresh)
|
||||
self.leSearchString.setEnabled(False)
|
||||
|
||||
if self.settings.save_on_low_confidence:
|
||||
self.cbxSaveOnLowConfidence.setCheckState(QtCore.Qt.Checked)
|
||||
if self.settings.dont_use_year_when_identifying:
|
||||
self.cbxDontUseYear.setCheckState(QtCore.Qt.Checked)
|
||||
if self.settings.assume_1_if_no_issue_num:
|
||||
self.cbxAssumeIssueOne.setCheckState(QtCore.Qt.Checked)
|
||||
if self.settings.ignore_leading_numbers_in_filename:
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setCheckState(
|
||||
QtCore.Qt.Checked)
|
||||
if self.settings.remove_archive_after_successful_match:
|
||||
self.cbxRemoveAfterSuccess.setCheckState(QtCore.Qt.Checked)
|
||||
if self.settings.wait_and_retry_on_rate_limit:
|
||||
self.cbxWaitForRateLimit.setCheckState(QtCore.Qt.Checked)
|
||||
self.cbxSaveOnLowConfidence.setChecked(self.config.Auto_Tag__save_on_low_confidence)
|
||||
self.cbxDontUseYear.setChecked(self.config.Auto_Tag__dont_use_year_when_identifying)
|
||||
self.cbxAssumeIssueOne.setChecked(self.config.Auto_Tag__assume_issue_one)
|
||||
self.cbxIgnoreLeadingDigitsInFilename.setChecked(self.config.Auto_Tag__ignore_leading_numbers_in_filename)
|
||||
self.cbxRemoveAfterSuccess.setChecked(self.config.Auto_Tag__remove_archive_after_successful_match)
|
||||
self.cbxAutoImprint.setChecked(self.config.Issue_Identifier__auto_imprint)
|
||||
|
||||
nlmtTip = (
|
||||
""" <html>The <b>Name Length Match Tolerance</b> is for eliminating automatic
|
||||
search matches that are too long compared to your series name search. The higher
|
||||
nlmt_tip = """<html>The <b>Name Match Ratio Threshold: Auto-Identify</b> is for eliminating automatic
|
||||
search matches that are too long compared to your series name search. The lower
|
||||
it is, the more likely to have a good match, but each search will take longer and
|
||||
use more bandwidth. Too low, and only the very closest lexical matches will be
|
||||
explored.</html>""")
|
||||
use more bandwidth. Too high, and only the very closest matches will be explored.</html>"""
|
||||
|
||||
self.leNameLengthMatchTolerance.setToolTip(nlmtTip)
|
||||
self.sbNameMatchSearchThresh.setToolTip(nlmt_tip)
|
||||
|
||||
ssTip = (
|
||||
"""<html>
|
||||
ss_tip = """<html>
|
||||
The <b>series search string</b> specifies the search string to be used for all selected archives.
|
||||
Use this when trying to match archives with hard-to-parse or incorrect filenames. All archives selected
|
||||
should be from the same series.
|
||||
</html>"""
|
||||
)
|
||||
self.leSearchString.setToolTip(ssTip)
|
||||
self.cbxSpecifySearchString.setToolTip(ssTip)
|
||||
self.leSearchString.setToolTip(ss_tip)
|
||||
self.cbxSpecifySearchString.setToolTip(ss_tip)
|
||||
|
||||
validator = QtGui.QIntValidator(0, 99, self)
|
||||
self.leNameLengthMatchTolerance.setValidator(validator)
|
||||
self.cbxSpecifySearchString.stateChanged.connect(self.search_string_toggle)
|
||||
|
||||
self.cbxSpecifySearchString.stateChanged.connect(
|
||||
self.searchStringToggle)
|
||||
self.auto_save_on_low = False
|
||||
self.dont_use_year = False
|
||||
self.assume_issue_one = False
|
||||
self.ignore_leading_digits_in_filename = False
|
||||
self.remove_after_success = False
|
||||
self.search_string = ""
|
||||
self.name_length_match_tolerance = self.config.Issue_Identifier__series_match_search_thresh
|
||||
self.split_words = self.cbxSplitWords.isChecked()
|
||||
|
||||
self.autoSaveOnLow = False
|
||||
self.dontUseYear = False
|
||||
self.assumeIssueOne = False
|
||||
self.ignoreLeadingDigitsInFilename = False
|
||||
self.removeAfterSuccess = False
|
||||
self.waitAndRetryOnRateLimit = False
|
||||
self.searchString = None
|
||||
self.nameLengthMatchTolerance = self.settings.id_length_delta_thresh
|
||||
|
||||
def searchStringToggle(self):
|
||||
def search_string_toggle(self) -> None:
|
||||
enable = self.cbxSpecifySearchString.isChecked()
|
||||
self.leSearchString.setEnabled(enable)
|
||||
|
||||
def accept(self):
|
||||
def accept(self) -> None:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
self.autoSaveOnLow = self.cbxSaveOnLowConfidence.isChecked()
|
||||
self.dontUseYear = self.cbxDontUseYear.isChecked()
|
||||
self.assumeIssueOne = self.cbxAssumeIssueOne.isChecked()
|
||||
self.ignoreLeadingDigitsInFilename = self.cbxIgnoreLeadingDigitsInFilename.isChecked()
|
||||
self.removeAfterSuccess = self.cbxRemoveAfterSuccess.isChecked()
|
||||
self.nameLengthMatchTolerance = int(
|
||||
self.leNameLengthMatchTolerance.text())
|
||||
self.waitAndRetryOnRateLimit = self.cbxWaitForRateLimit.isChecked()
|
||||
self.auto_save_on_low = self.cbxSaveOnLowConfidence.isChecked()
|
||||
self.dont_use_year = self.cbxDontUseYear.isChecked()
|
||||
self.assume_issue_one = self.cbxAssumeIssueOne.isChecked()
|
||||
self.ignore_leading_digits_in_filename = self.cbxIgnoreLeadingDigitsInFilename.isChecked()
|
||||
self.remove_after_success = self.cbxRemoveAfterSuccess.isChecked()
|
||||
self.name_length_match_tolerance = self.sbNameMatchSearchThresh.value()
|
||||
self.split_words = self.cbxSplitWords.isChecked()
|
||||
|
||||
# persist some settings
|
||||
self.settings.save_on_low_confidence = self.autoSaveOnLow
|
||||
self.settings.dont_use_year_when_identifying = self.dontUseYear
|
||||
self.settings.assume_1_if_no_issue_num = self.assumeIssueOne
|
||||
self.settings.ignore_leading_numbers_in_filename = self.ignoreLeadingDigitsInFilename
|
||||
self.settings.remove_archive_after_successful_match = self.removeAfterSuccess
|
||||
self.settings.wait_and_retry_on_rate_limit = self.waitAndRetryOnRateLimit
|
||||
self.config.Auto_Tag__save_on_low_confidence = self.auto_save_on_low
|
||||
self.config.Auto_Tag__dont_use_year_when_identifying = self.dont_use_year
|
||||
self.config.Auto_Tag__assume_issue_one = self.assume_issue_one
|
||||
self.config.Auto_Tag__ignore_leading_numbers_in_filename = self.ignore_leading_digits_in_filename
|
||||
self.config.Auto_Tag__remove_archive_after_successful_match = self.remove_after_success
|
||||
|
||||
if self.cbxSpecifySearchString.isChecked():
|
||||
self.searchString = str(self.leSearchString.text())
|
||||
if len(self.searchString) == 0:
|
||||
self.searchString = None
|
||||
self.search_string = self.leSearchString.text()
|
||||
|
||||
@@ -1,97 +1,89 @@
|
||||
"""A class to manage modifying metadata specifically for CBL/CBI"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import os
|
||||
import logging
|
||||
|
||||
#import utils
|
||||
from comicapi.genericmetadata import Credit, GenericMetadata
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CBLTransformer:
|
||||
|
||||
def __init__(self, metadata, settings):
|
||||
def __init__(self, metadata: GenericMetadata, config: ct_ns) -> None:
|
||||
self.metadata = metadata
|
||||
self.settings = settings
|
||||
|
||||
def apply(self):
|
||||
# helper funcs
|
||||
def append_to_tags_if_unique(item):
|
||||
if item.lower() not in (tag.lower() for tag in self.metadata.tags):
|
||||
self.metadata.tags.append(item)
|
||||
|
||||
def add_string_list_to_tags(str_list):
|
||||
if str_list is not None and str_list != "":
|
||||
items = [s.strip() for s in str_list.split(',')]
|
||||
for item in items:
|
||||
append_to_tags_if_unique(item)
|
||||
|
||||
if self.settings.assume_lone_credit_is_primary:
|
||||
self.config = config
|
||||
|
||||
def apply(self) -> GenericMetadata:
|
||||
if self.config.Comic_Book_Lover__assume_lone_credit_is_primary:
|
||||
# helper
|
||||
def setLonePrimary(role_list):
|
||||
lone_credit = None
|
||||
def set_lone_primary(role_list: list[str]) -> tuple[Credit | None, int]:
|
||||
lone_credit: Credit | None = None
|
||||
count = 0
|
||||
for c in self.metadata.credits:
|
||||
if c['role'].lower() in role_list:
|
||||
if c["role"].casefold() in role_list:
|
||||
count += 1
|
||||
lone_credit = c
|
||||
if count > 1:
|
||||
lone_credit = None
|
||||
break
|
||||
if lone_credit is not None:
|
||||
lone_credit['primary'] = True
|
||||
lone_credit["primary"] = True
|
||||
return lone_credit, count
|
||||
|
||||
# need to loop three times, once for 'writer', 'artist', and then
|
||||
# 'penciler' if no artist
|
||||
setLonePrimary(['writer'])
|
||||
c, count = setLonePrimary(['artist'])
|
||||
set_lone_primary(["writer"])
|
||||
c, count = set_lone_primary(["artist"])
|
||||
if c is None and count == 0:
|
||||
c, count = setLonePrimary(['penciler', 'penciller'])
|
||||
c, count = set_lone_primary(["penciler", "penciller"])
|
||||
if c is not None:
|
||||
c['primary'] = False
|
||||
self.metadata.addCredit(c['person'], 'Artist', True)
|
||||
c["primary"] = False
|
||||
self.metadata.add_credit(c["person"], "Artist", True)
|
||||
|
||||
if self.settings.copy_characters_to_tags:
|
||||
add_string_list_to_tags(self.metadata.characters)
|
||||
if self.config.Comic_Book_Lover__copy_characters_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.characters)
|
||||
|
||||
if self.settings.copy_teams_to_tags:
|
||||
add_string_list_to_tags(self.metadata.teams)
|
||||
if self.config.Comic_Book_Lover__copy_teams_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.teams)
|
||||
|
||||
if self.settings.copy_locations_to_tags:
|
||||
add_string_list_to_tags(self.metadata.locations)
|
||||
if self.config.Comic_Book_Lover__copy_locations_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.locations)
|
||||
|
||||
if self.settings.copy_storyarcs_to_tags:
|
||||
add_string_list_to_tags(self.metadata.storyArc)
|
||||
if self.config.Comic_Book_Lover__copy_storyarcs_to_tags:
|
||||
self.metadata.tags.update(x for x in self.metadata.story_arcs)
|
||||
|
||||
if self.settings.copy_notes_to_comments:
|
||||
if self.config.Comic_Book_Lover__copy_notes_to_comments:
|
||||
if self.metadata.notes is not None:
|
||||
if self.metadata.comments is None:
|
||||
self.metadata.comments = ""
|
||||
if self.metadata.description is None:
|
||||
self.metadata.description = ""
|
||||
else:
|
||||
self.metadata.comments += "\n\n"
|
||||
if self.metadata.notes not in self.metadata.comments:
|
||||
self.metadata.comments += self.metadata.notes
|
||||
self.metadata.description += "\n\n"
|
||||
if self.metadata.notes not in self.metadata.description:
|
||||
self.metadata.description += self.metadata.notes
|
||||
|
||||
if self.settings.copy_weblink_to_comments:
|
||||
if self.metadata.webLink is not None:
|
||||
if self.metadata.comments is None:
|
||||
self.metadata.comments = ""
|
||||
if self.config.Comic_Book_Lover__copy_weblink_to_comments:
|
||||
if self.metadata.web_link is not None:
|
||||
if self.metadata.description is None:
|
||||
self.metadata.description = ""
|
||||
else:
|
||||
self.metadata.comments += "\n\n"
|
||||
if self.metadata.webLink not in self.metadata.comments:
|
||||
self.metadata.comments += self.metadata.webLink
|
||||
self.metadata.description += "\n\n"
|
||||
if self.metadata.web_link not in self.metadata.description:
|
||||
self.metadata.description += self.metadata.web_link
|
||||
|
||||
return self.metadata
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1 +0,0 @@
|
||||
from comicapi.comet import *
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.comicarchive import *
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.comicbookinfo import *
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.comicinfoxml import *
|
||||
@@ -1,469 +0,0 @@
|
||||
"""A python class to manage caching of data from Comic Vine"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sqlite3 as lite
|
||||
import os
|
||||
import datetime
|
||||
#import sys
|
||||
#from pprint import pprint
|
||||
|
||||
from . import ctversion
|
||||
from .settings import ComicTaggerSettings
|
||||
from . import utils
|
||||
|
||||
|
||||
class ComicVineCacher:
|
||||
|
||||
def __init__(self):
|
||||
self.settings_folder = ComicTaggerSettings.getSettingsFolder()
|
||||
self.db_file = os.path.join(self.settings_folder, "cv_cache.db")
|
||||
self.version_file = os.path.join(
|
||||
self.settings_folder, "cache_version.txt")
|
||||
|
||||
# verify that cache is from same version as this one
|
||||
data = ""
|
||||
try:
|
||||
with open(self.version_file, 'rb') as f:
|
||||
data = f.read().decode("utf-8")
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
if data != ctversion.version:
|
||||
self.clearCache()
|
||||
|
||||
if not os.path.exists(self.db_file):
|
||||
self.create_cache_db()
|
||||
|
||||
def clearCache(self):
|
||||
try:
|
||||
os.unlink(self.db_file)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
os.unlink(self.version_file)
|
||||
except:
|
||||
pass
|
||||
|
||||
def create_cache_db(self):
|
||||
|
||||
# create the version file
|
||||
with open(self.version_file, 'w') as f:
|
||||
f.write(ctversion.version)
|
||||
|
||||
# this will wipe out any existing version
|
||||
open(self.db_file, 'w').close()
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
# create tables
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
# name,id,start_year,publisher,image,description,count_of_issues
|
||||
cur.execute(
|
||||
"CREATE TABLE VolumeSearchCache(" +
|
||||
"search_term TEXT," +
|
||||
"id INT," +
|
||||
"name TEXT," +
|
||||
"start_year INT," +
|
||||
"publisher TEXT," +
|
||||
"count_of_issues INT," +
|
||||
"image_url TEXT," +
|
||||
"description TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime'))) ")
|
||||
|
||||
cur.execute(
|
||||
"CREATE TABLE Volumes(" +
|
||||
"id INT," +
|
||||
"name TEXT," +
|
||||
"publisher TEXT," +
|
||||
"count_of_issues INT," +
|
||||
"start_year INT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (id))")
|
||||
|
||||
cur.execute(
|
||||
"CREATE TABLE AltCovers(" +
|
||||
"issue_id INT," +
|
||||
"url_list TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (issue_id))")
|
||||
|
||||
cur.execute(
|
||||
"CREATE TABLE Issues(" +
|
||||
"id INT," +
|
||||
"volume_id INT," +
|
||||
"name TEXT," +
|
||||
"issue_number TEXT," +
|
||||
"super_url TEXT," +
|
||||
"thumb_url TEXT," +
|
||||
"cover_date TEXT," +
|
||||
"site_detail_url TEXT," +
|
||||
"description TEXT," +
|
||||
"timestamp DATE DEFAULT (datetime('now','localtime')), " +
|
||||
"PRIMARY KEY (id))")
|
||||
|
||||
def add_search_results(self, search_term, cv_search_results):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
con.text_factory = str
|
||||
cur = con.cursor()
|
||||
|
||||
# remove all previous entries with this search term
|
||||
cur.execute(
|
||||
"DELETE FROM VolumeSearchCache WHERE search_term = ?", [
|
||||
search_term.lower()])
|
||||
|
||||
# now add in new results
|
||||
for record in cv_search_results:
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
if record['publisher'] is None:
|
||||
pub_name = ""
|
||||
else:
|
||||
pub_name = record['publisher']['name']
|
||||
|
||||
if record['image'] is None:
|
||||
url = ""
|
||||
else:
|
||||
url = record['image']['super_url']
|
||||
|
||||
cur.execute(
|
||||
"INSERT INTO VolumeSearchCache " +
|
||||
"(search_term, id, name, start_year, publisher, count_of_issues, image_url, description) " +
|
||||
"VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(search_term.lower(),
|
||||
record['id'],
|
||||
record['name'],
|
||||
record['start_year'],
|
||||
pub_name,
|
||||
record['count_of_issues'],
|
||||
url,
|
||||
record['description']))
|
||||
|
||||
def get_search_results(self, search_term):
|
||||
|
||||
results = list()
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
con.text_factory = str
|
||||
cur = con.cursor()
|
||||
|
||||
# purge stale search results
|
||||
a_day_ago = datetime.datetime.today() - datetime.timedelta(days=1)
|
||||
cur.execute(
|
||||
"DELETE FROM VolumeSearchCache WHERE timestamp < ?", [
|
||||
str(a_day_ago)])
|
||||
|
||||
# fetch
|
||||
cur.execute(
|
||||
"SELECT * FROM VolumeSearchCache WHERE search_term=?", [search_term.lower()])
|
||||
rows = cur.fetchall()
|
||||
# now process the results
|
||||
for record in rows:
|
||||
|
||||
result = dict()
|
||||
result['id'] = record[1]
|
||||
result['name'] = record[2]
|
||||
result['start_year'] = record[3]
|
||||
result['publisher'] = dict()
|
||||
result['publisher']['name'] = record[4]
|
||||
result['count_of_issues'] = record[5]
|
||||
result['image'] = dict()
|
||||
result['image']['super_url'] = record[6]
|
||||
result['description'] = record[7]
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def add_alt_covers(self, issue_id, url_list):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
con.text_factory = str
|
||||
cur = con.cursor()
|
||||
|
||||
# remove all previous entries with this search term
|
||||
cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [issue_id])
|
||||
|
||||
url_list_str = utils.listToString(url_list)
|
||||
# now add in new record
|
||||
cur.execute("INSERT INTO AltCovers " +
|
||||
"(issue_id, url_list) " +
|
||||
"VALUES(?, ?)",
|
||||
(issue_id,
|
||||
url_list_str)
|
||||
)
|
||||
|
||||
def get_alt_covers(self, issue_id):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = str
|
||||
|
||||
# purge stale issue info - probably issue data won't change
|
||||
# much....
|
||||
a_month_ago = datetime.datetime.today() - \
|
||||
datetime.timedelta(days=30)
|
||||
cur.execute(
|
||||
"DELETE FROM AltCovers WHERE timestamp < ?", [
|
||||
str(a_month_ago)])
|
||||
|
||||
cur.execute(
|
||||
"SELECT url_list FROM AltCovers WHERE issue_id=?", [issue_id])
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
else:
|
||||
url_list_str = row[0]
|
||||
if len(url_list_str) == 0:
|
||||
return []
|
||||
raw_list = url_list_str.split(",")
|
||||
url_list = []
|
||||
for item in raw_list:
|
||||
url_list.append(str(item).strip())
|
||||
return url_list
|
||||
|
||||
def add_volume_info(self, cv_volume_record):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
if cv_volume_record['publisher'] is None:
|
||||
pub_name = ""
|
||||
else:
|
||||
pub_name = cv_volume_record['publisher']['name']
|
||||
|
||||
data = {
|
||||
"name": cv_volume_record['name'],
|
||||
"publisher": pub_name,
|
||||
"count_of_issues": cv_volume_record['count_of_issues'],
|
||||
"start_year": cv_volume_record['start_year'],
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert(cur, "volumes", "id", cv_volume_record['id'], data)
|
||||
|
||||
def add_volume_issues_info(self, volume_id, cv_volume_issues):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
|
||||
cur = con.cursor()
|
||||
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
# add in issues
|
||||
|
||||
for issue in cv_volume_issues:
|
||||
|
||||
data = {
|
||||
"volume_id": volume_id,
|
||||
"name": issue['name'],
|
||||
"issue_number": issue['issue_number'],
|
||||
"site_detail_url": issue['site_detail_url'],
|
||||
"cover_date": issue['cover_date'],
|
||||
"super_url": issue['image']['super_url'],
|
||||
"thumb_url": issue['image']['thumb_url'],
|
||||
"description": issue['description'],
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert(cur, "issues", "id", issue['id'], data)
|
||||
|
||||
def get_volume_info(self, volume_id):
|
||||
|
||||
result = None
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = str
|
||||
|
||||
# purge stale volume info
|
||||
a_week_ago = datetime.datetime.today() - datetime.timedelta(days=7)
|
||||
cur.execute(
|
||||
"DELETE FROM Volumes WHERE timestamp < ?", [str(a_week_ago)])
|
||||
|
||||
# fetch
|
||||
cur.execute(
|
||||
"SELECT id,name,publisher,count_of_issues,start_year FROM Volumes WHERE id = ?",
|
||||
[volume_id])
|
||||
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None:
|
||||
return result
|
||||
|
||||
result = dict()
|
||||
|
||||
# since ID is primary key, there is only one row
|
||||
result['id'] = row[0]
|
||||
result['name'] = row[1]
|
||||
result['publisher'] = dict()
|
||||
result['publisher']['name'] = row[2]
|
||||
result['count_of_issues'] = row[3]
|
||||
result['start_year'] = row[4]
|
||||
result['issues'] = list()
|
||||
|
||||
return result
|
||||
|
||||
def get_volume_issues_info(self, volume_id):
|
||||
|
||||
result = None
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = str
|
||||
|
||||
# purge stale issue info - probably issue data won't change
|
||||
# much....
|
||||
a_week_ago = datetime.datetime.today() - datetime.timedelta(days=7)
|
||||
cur.execute(
|
||||
"DELETE FROM Issues WHERE timestamp < ?", [str(a_week_ago)])
|
||||
|
||||
# fetch
|
||||
results = list()
|
||||
|
||||
cur.execute(
|
||||
"SELECT id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description FROM Issues WHERE volume_id = ?",
|
||||
[volume_id])
|
||||
rows = cur.fetchall()
|
||||
|
||||
# now process the results
|
||||
for row in rows:
|
||||
record = dict()
|
||||
|
||||
record['id'] = row[0]
|
||||
record['name'] = row[1]
|
||||
record['issue_number'] = row[2]
|
||||
record['site_detail_url'] = row[3]
|
||||
record['cover_date'] = row[4]
|
||||
record['image'] = dict()
|
||||
record['image']['super_url'] = row[5]
|
||||
record['image']['thumb_url'] = row[6]
|
||||
record['description'] = row[7]
|
||||
|
||||
results.append(record)
|
||||
|
||||
if len(results) == 0:
|
||||
return None
|
||||
|
||||
return results
|
||||
|
||||
def add_issue_select_details(
|
||||
self,
|
||||
issue_id,
|
||||
image_url,
|
||||
thumb_image_url,
|
||||
cover_date,
|
||||
site_detail_url):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = str
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
data = {
|
||||
"super_url": image_url,
|
||||
"thumb_url": thumb_image_url,
|
||||
"cover_date": cover_date,
|
||||
"site_detail_url": site_detail_url,
|
||||
"timestamp": timestamp
|
||||
}
|
||||
self.upsert(cur, "issues", "id", issue_id, data)
|
||||
|
||||
def get_issue_select_details(self, issue_id):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
cur = con.cursor()
|
||||
con.text_factory = str
|
||||
|
||||
cur.execute(
|
||||
"SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=?",
|
||||
[issue_id])
|
||||
row = cur.fetchone()
|
||||
|
||||
details = dict()
|
||||
if row is None or row[0] is None:
|
||||
details['image_url'] = None
|
||||
details['thumb_image_url'] = None
|
||||
details['cover_date'] = None
|
||||
details['site_detail_url'] = None
|
||||
|
||||
else:
|
||||
details['image_url'] = row[0]
|
||||
details['thumb_image_url'] = row[1]
|
||||
details['cover_date'] = row[2]
|
||||
details['site_detail_url'] = row[3]
|
||||
|
||||
return details
|
||||
|
||||
def upsert(self, cur, tablename, pkname, pkval, data):
|
||||
"""This does an insert if the given PK doesn't exist, and an
|
||||
update it if does
|
||||
|
||||
TODO: look into checking if UPDATE is needed
|
||||
TODO: should the cursor be created here, and not up the stack?
|
||||
"""
|
||||
|
||||
ins_count = len(data) + 1
|
||||
|
||||
keys = ""
|
||||
vals = list()
|
||||
ins_slots = ""
|
||||
set_slots = ""
|
||||
|
||||
for key in data:
|
||||
|
||||
if keys != "":
|
||||
keys += ", "
|
||||
if ins_slots != "":
|
||||
ins_slots += ", "
|
||||
if set_slots != "":
|
||||
set_slots += ", "
|
||||
|
||||
keys += key
|
||||
vals.append(data[key])
|
||||
ins_slots += "?"
|
||||
set_slots += key + " = ?"
|
||||
|
||||
keys += ", " + pkname
|
||||
vals.append(pkval)
|
||||
ins_slots += ", ?"
|
||||
condition = pkname + " = ?"
|
||||
|
||||
sql_ins = ("INSERT OR IGNORE INTO " + tablename +
|
||||
" (" + keys + ") " +
|
||||
" VALUES (" + ins_slots + ")")
|
||||
cur.execute(sql_ins, vals)
|
||||
|
||||
sql_upd = ("UPDATE " + tablename +
|
||||
" SET " + set_slots + " WHERE " + condition)
|
||||
cur.execute(sql_upd, vals)
|
||||
@@ -1,848 +0,0 @@
|
||||
"""A python class to manage communication with Comic Vine's REST API"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import re
|
||||
import time
|
||||
import datetime
|
||||
import sys
|
||||
import ssl
|
||||
#from pprint import pprint
|
||||
#import math
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
try:
|
||||
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
|
||||
from PyQt5.QtCore import QUrl, pyqtSignal, QObject, QByteArray
|
||||
except ImportError:
|
||||
# No Qt, so define a few dummy QObjects to help us compile
|
||||
class QObject():
|
||||
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
class pyqtSignal():
|
||||
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def emit(a, b, c):
|
||||
pass
|
||||
|
||||
from . import ctversion
|
||||
from . import utils
|
||||
from .comicvinecacher import ComicVineCacher
|
||||
from .genericmetadata import GenericMetadata
|
||||
from .issuestring import IssueString
|
||||
#from settings import ComicTaggerSettings
|
||||
|
||||
|
||||
class CVTypeID:
|
||||
Volume = "4050"
|
||||
Issue = "4000"
|
||||
|
||||
|
||||
class ComicVineTalkerException(Exception):
|
||||
Unknown = -1
|
||||
Network = -2
|
||||
InvalidKey = 100
|
||||
RateLimit = 107
|
||||
|
||||
def __init__(self, code=-1, desc=""):
|
||||
self.desc = desc
|
||||
self.code = code
|
||||
|
||||
def __str__(self):
|
||||
if (self.code == ComicVineTalkerException.Unknown or
|
||||
self.code == ComicVineTalkerException.Network):
|
||||
return self.desc
|
||||
else:
|
||||
return "CV error #{0}: [{1}]. \n".format(self.code, self.desc)
|
||||
|
||||
|
||||
class ComicVineTalker(QObject):
|
||||
|
||||
logo_url = "http://static.comicvine.com/bundles/comicvinesite/images/logo.png"
|
||||
api_key = ""
|
||||
|
||||
@staticmethod
|
||||
def getRateLimitMessage():
|
||||
if ComicVineTalker.api_key == "":
|
||||
return "Comic Vine rate limit exceeded. You should configue your own Comic Vine API key."
|
||||
else:
|
||||
return "Comic Vine rate limit exceeded. Please wait a bit."
|
||||
|
||||
def __init__(self):
|
||||
QObject.__init__(self)
|
||||
|
||||
self.api_base_url = "https://comicvine.gamespot.com/api"
|
||||
self.wait_for_rate_limit = False
|
||||
|
||||
# key that is registered to comictagger
|
||||
default_api_key = '27431e6787042105bd3e47e169a624521f89f3a4'
|
||||
|
||||
if ComicVineTalker.api_key == "":
|
||||
self.api_key = default_api_key
|
||||
else:
|
||||
self.api_key = ComicVineTalker.api_key
|
||||
|
||||
self.log_func = None
|
||||
|
||||
# always use a tls context for urlopen
|
||||
self.ssl = ssl.SSLContext(ssl.PROTOCOL_TLS)
|
||||
|
||||
def setLogFunc(self, log_func):
|
||||
self.log_func = log_func
|
||||
|
||||
def writeLog(self, text):
|
||||
if self.log_func is None:
|
||||
# sys.stdout.write(text.encode(errors='replace'))
|
||||
# sys.stdout.flush()
|
||||
print(text, file=sys.stderr)
|
||||
else:
|
||||
self.log_func(text)
|
||||
|
||||
def parseDateStr(self, date_str):
|
||||
day = None
|
||||
month = None
|
||||
year = None
|
||||
if date_str is not None:
|
||||
parts = date_str.split('-')
|
||||
year = parts[0]
|
||||
if len(parts) > 1:
|
||||
month = parts[1]
|
||||
if len(parts) > 2:
|
||||
day = parts[2]
|
||||
return day, month, year
|
||||
|
||||
def testKey(self, key):
|
||||
|
||||
try:
|
||||
test_url = self.api_base_url + "/issue/1/?api_key=" + \
|
||||
key + "&format=json&field_list=name"
|
||||
resp = urllib.request.urlopen(test_url, context=self.ssl)
|
||||
content = resp.read()
|
||||
|
||||
cv_response = json.loads(content.decode('utf-8'))
|
||||
|
||||
# Bogus request, but if the key is wrong, you get error 100: "Invalid
|
||||
# API Key"
|
||||
return cv_response['status_code'] != 100
|
||||
except:
|
||||
return False
|
||||
|
||||
"""
|
||||
Get the contect from the CV server. If we're in "wait mode" and status code is a rate limit error
|
||||
sleep for a bit and retry.
|
||||
"""
|
||||
|
||||
def getCVContent(self, url):
|
||||
total_time_waited = 0
|
||||
limit_wait_time = 1
|
||||
counter = 0
|
||||
wait_times = [1, 2, 3, 4]
|
||||
while True:
|
||||
content = self.getUrlContent(url)
|
||||
cv_response = json.loads(content.decode('utf-8'))
|
||||
if self.wait_for_rate_limit and cv_response[
|
||||
'status_code'] == ComicVineTalkerException.RateLimit:
|
||||
self.writeLog(
|
||||
"Rate limit encountered. Waiting for {0} minutes\n".format(limit_wait_time))
|
||||
time.sleep(limit_wait_time * 60)
|
||||
total_time_waited += limit_wait_time
|
||||
limit_wait_time = wait_times[counter]
|
||||
if counter < 3:
|
||||
counter += 1
|
||||
# don't wait much more than 20 minutes
|
||||
if total_time_waited < 20:
|
||||
continue
|
||||
if cv_response['status_code'] != 1:
|
||||
self.writeLog(
|
||||
"Comic Vine query failed with error #{0}: [{1}]. \n".format(
|
||||
cv_response['status_code'],
|
||||
cv_response['error']))
|
||||
raise ComicVineTalkerException(
|
||||
cv_response['status_code'], cv_response['error'])
|
||||
else:
|
||||
# it's all good
|
||||
break
|
||||
return cv_response
|
||||
|
||||
def getUrlContent(self, url):
|
||||
# connect to server:
|
||||
# if there is a 500 error, try a few more times before giving up
|
||||
# any other error, just bail
|
||||
#print("---", url)
|
||||
for tries in range(3):
|
||||
try:
|
||||
resp = urllib.request.urlopen(url, context=self.ssl)
|
||||
return resp.read()
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.getcode() == 500:
|
||||
self.writeLog("Try #{0}: ".format(tries + 1))
|
||||
time.sleep(1)
|
||||
self.writeLog(str(e) + "\n")
|
||||
|
||||
if e.getcode() != 500:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
self.writeLog(str(e) + "\n")
|
||||
raise ComicVineTalkerException(
|
||||
ComicVineTalkerException.Network, "Network Error!")
|
||||
|
||||
raise ComicVineTalkerException(
|
||||
ComicVineTalkerException.Unknown, "Error on Comic Vine server")
|
||||
|
||||
def searchForSeries(self, series_name, callback=None, refresh_cache=False):
|
||||
|
||||
# remove cruft from the search string
|
||||
series_name = utils.removearticles(series_name).lower().strip()
|
||||
|
||||
# before we search online, look in our cache, since we might have
|
||||
# done this same search recently
|
||||
cvc = ComicVineCacher()
|
||||
if not refresh_cache:
|
||||
cached_search_results = cvc.get_search_results(series_name)
|
||||
|
||||
if len(cached_search_results) > 0:
|
||||
return cached_search_results
|
||||
|
||||
original_series_name = series_name
|
||||
|
||||
# Split and rejoin to remove extra internal spaces
|
||||
query_word_list = series_name.split()
|
||||
query_string = " ".join( query_word_list ).strip()
|
||||
#print ("Query string = ", query_string)
|
||||
|
||||
query_string = urllib.parse.quote_plus(query_string.encode("utf-8"))
|
||||
|
||||
search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + \
|
||||
query_string + \
|
||||
"&field_list=name,id,start_year,publisher,image,description,count_of_issues"
|
||||
cv_response = self.getCVContent(search_url + "&page=1")
|
||||
|
||||
search_results = list()
|
||||
|
||||
# see http://api.comicvine.com/documentation/#handling_responses
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
|
||||
# 8 Dec 2018 - Comic Vine changed query results again. Terms are now
|
||||
# ORed together, and we get thousands of results. Good news is the
|
||||
# results are sorted by relevance, so we can be smart about halting
|
||||
# the search.
|
||||
# 1. Don't fetch more than some sane amount of pages.
|
||||
max_results = 500
|
||||
# 2. Halt when not all of our search terms are present in a result
|
||||
# 3. Halt when the results contain more (plus threshold) words than
|
||||
# our search
|
||||
result_word_count_max = len(query_word_list) + 3
|
||||
|
||||
total_result_count = min(total_result_count, max_results)
|
||||
|
||||
if callback is None:
|
||||
self.writeLog(
|
||||
"Found {0} of {1} results\n".format(
|
||||
cv_response['number_of_page_results'],
|
||||
cv_response['number_of_total_results']))
|
||||
search_results.extend(cv_response['results'])
|
||||
page = 1
|
||||
|
||||
if callback is not None:
|
||||
callback(current_result_count, total_result_count)
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
stop_searching = False
|
||||
while (current_result_count < total_result_count):
|
||||
|
||||
last_result = search_results[-1]['name']
|
||||
|
||||
# See if the last result's name has all the of the search terms.
|
||||
# if not, break out of this, loop, we're done.
|
||||
#print("Searching for {} in '{}'".format(query_word_list, last_result))
|
||||
for term in query_word_list:
|
||||
if term not in last_result.lower():
|
||||
#print("Term '{}' not in last result. Halting search result fetching".format(term))
|
||||
stop_searching = True
|
||||
break
|
||||
|
||||
# Also, stop searching when the word count of last results is too much longer
|
||||
# than our search terms list
|
||||
if len(utils.removearticles(last_result).split()) > result_word_count_max:
|
||||
#print("Last result '{}' is too long. Halting search result fetching".format(last_result))
|
||||
stop_searching = True
|
||||
|
||||
if stop_searching:
|
||||
break
|
||||
|
||||
if callback is None:
|
||||
self.writeLog(
|
||||
"getting another page of results {0} of {1}...\n".format(
|
||||
current_result_count,
|
||||
total_result_count))
|
||||
page += 1
|
||||
|
||||
cv_response = self.getCVContent(search_url + "&page=" + str(page))
|
||||
|
||||
search_results.extend(cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
if callback is not None:
|
||||
callback(current_result_count, total_result_count)
|
||||
|
||||
# Remove any search results that don't contain all the search terms
|
||||
# (iterate backwards for easy removal)
|
||||
for i in range(len(search_results) - 1, -1, -1):
|
||||
record = search_results[i]
|
||||
for term in query_word_list:
|
||||
if term not in record['name'].lower():
|
||||
del search_results[i]
|
||||
break
|
||||
|
||||
# for record in search_results:
|
||||
#print(u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year']))
|
||||
# print(record)
|
||||
#record['count_of_issues'] = record['count_of_isssues']
|
||||
#print(u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year']))
|
||||
|
||||
# cache these search results
|
||||
cvc.add_search_results(original_series_name, search_results)
|
||||
|
||||
return search_results
|
||||
|
||||
def fetchVolumeData(self, series_id):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher()
|
||||
cached_volume_result = cvc.get_volume_info(series_id)
|
||||
|
||||
if cached_volume_result is not None:
|
||||
return cached_volume_result
|
||||
|
||||
volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + \
|
||||
str(series_id) + "/?api_key=" + self.api_key + \
|
||||
"&field_list=name,id,start_year,publisher,count_of_issues&format=json"
|
||||
|
||||
cv_response = self.getCVContent(volume_url)
|
||||
|
||||
volume_results = cv_response['results']
|
||||
|
||||
cvc.add_volume_info(volume_results)
|
||||
|
||||
return volume_results
|
||||
|
||||
def fetchIssuesByVolume(self, series_id):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher()
|
||||
cached_volume_issues_result = cvc.get_volume_issues_info(series_id)
|
||||
|
||||
if cached_volume_issues_result is not None:
|
||||
return cached_volume_issues_result
|
||||
|
||||
#---------------------------------
|
||||
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + "&filter=volume:" + \
|
||||
str(series_id) + \
|
||||
"&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
|
||||
cv_response = self.getCVContent(issues_url)
|
||||
|
||||
#------------------------------------
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
#print("total_result_count", total_result_count)
|
||||
|
||||
#print("Found {0} of {1} results".format(cv_response['number_of_page_results'], cv_response['number_of_total_results']))
|
||||
volume_issues_result = cv_response['results']
|
||||
page = 1
|
||||
offset = 0
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while (current_result_count < total_result_count):
|
||||
#print("getting another page of issue results {0} of {1}...".format(current_result_count, total_result_count))
|
||||
page += 1
|
||||
offset += cv_response['number_of_page_results']
|
||||
|
||||
# print issues_url+ "&offset="+str(offset)
|
||||
cv_response = self.getCVContent(
|
||||
issues_url + "&offset=" + str(offset))
|
||||
|
||||
volume_issues_result.extend(cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
self.repairUrls(volume_issues_result)
|
||||
|
||||
cvc.add_volume_issues_info(series_id, volume_issues_result)
|
||||
|
||||
return volume_issues_result
|
||||
|
||||
def fetchIssuesByVolumeIssueNumAndYear(
|
||||
self, volume_id_list, issue_number, year):
|
||||
volume_filter = "volume:"
|
||||
for vid in volume_id_list:
|
||||
volume_filter += str(vid) + "|"
|
||||
|
||||
year_filter = ""
|
||||
if year is not None and str(year).isdigit():
|
||||
year_filter = ",cover_date:{0}-1-1|{1}-1-1".format(
|
||||
year, int(year) + 1)
|
||||
|
||||
issue_number = urllib.parse.quote_plus(str(issue_number).encode("utf-8"))
|
||||
|
||||
filter = "&filter=" + volume_filter + \
|
||||
year_filter + ",issue_number:" + issue_number
|
||||
|
||||
issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + filter + \
|
||||
"&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json"
|
||||
|
||||
cv_response = self.getCVContent(issues_url)
|
||||
|
||||
#------------------------------------
|
||||
|
||||
limit = cv_response['limit']
|
||||
current_result_count = cv_response['number_of_page_results']
|
||||
total_result_count = cv_response['number_of_total_results']
|
||||
#print("total_result_count", total_result_count)
|
||||
|
||||
#print("Found {0} of {1} results\n".format(cv_response['number_of_page_results'], cv_response['number_of_total_results']))
|
||||
filtered_issues_result = cv_response['results']
|
||||
page = 1
|
||||
offset = 0
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while (current_result_count < total_result_count):
|
||||
#print("getting another page of issue results {0} of {1}...\n".format(current_result_count, total_result_count))
|
||||
page += 1
|
||||
offset += cv_response['number_of_page_results']
|
||||
|
||||
# print issues_url+ "&offset="+str(offset)
|
||||
cv_response = self.getCVContent(
|
||||
issues_url + "&offset=" + str(offset))
|
||||
|
||||
filtered_issues_result.extend(cv_response['results'])
|
||||
current_result_count += cv_response['number_of_page_results']
|
||||
|
||||
self.repairUrls(filtered_issues_result)
|
||||
|
||||
return filtered_issues_result
|
||||
|
||||
def fetchIssueData(self, series_id, issue_number, settings):
|
||||
|
||||
volume_results = self.fetchVolumeData(series_id)
|
||||
issues_list_results = self.fetchIssuesByVolume(series_id)
|
||||
|
||||
found = False
|
||||
for record in issues_list_results:
|
||||
if IssueString(issue_number).asString() is None:
|
||||
issue_number = 1
|
||||
if IssueString(record['issue_number']).asString().lower() == IssueString(
|
||||
issue_number).asString().lower():
|
||||
found = True
|
||||
break
|
||||
|
||||
if (found):
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
|
||||
str(record['id']) + "/?api_key=" + \
|
||||
self.api_key + "&format=json"
|
||||
|
||||
cv_response = self.getCVContent(issue_url)
|
||||
issue_results = cv_response['results']
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
# Now, map the Comic Vine data to generic metadata
|
||||
return self.mapCVDataToMetadata(
|
||||
volume_results, issue_results, settings)
|
||||
|
||||
def fetchIssueDataByIssueID(self, issue_id, settings):
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
|
||||
str(issue_id) + "/?api_key=" + self.api_key + "&format=json"
|
||||
cv_response = self.getCVContent(issue_url)
|
||||
|
||||
issue_results = cv_response['results']
|
||||
|
||||
volume_results = self.fetchVolumeData(issue_results['volume']['id'])
|
||||
|
||||
# Now, map the Comic Vine data to generic metadata
|
||||
md = self.mapCVDataToMetadata(volume_results, issue_results, settings)
|
||||
md.isEmpty = False
|
||||
return md
|
||||
|
||||
def mapCVDataToMetadata(self, volume_results, issue_results, settings):
|
||||
|
||||
# Now, map the Comic Vine data to generic metadata
|
||||
metadata = GenericMetadata()
|
||||
|
||||
metadata.series = issue_results['volume']['name']
|
||||
|
||||
num_s = IssueString(issue_results['issue_number']).asString()
|
||||
metadata.issue = num_s
|
||||
metadata.title = issue_results['name']
|
||||
|
||||
metadata.publisher = volume_results['publisher']['name']
|
||||
metadata.day, metadata.month, metadata.year = self.parseDateStr(
|
||||
issue_results['cover_date'])
|
||||
|
||||
#metadata.issueCount = volume_results['count_of_issues']
|
||||
metadata.comments = self.cleanup_html(
|
||||
issue_results['description'], settings.remove_html_tables)
|
||||
if settings.use_series_start_as_volume:
|
||||
metadata.volume = volume_results['start_year']
|
||||
|
||||
metadata.notes = "Tagged with ComicTagger {0} using info from Comic Vine on {1}. [Issue ID {2}]".format(
|
||||
ctversion.version,
|
||||
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
issue_results['id'])
|
||||
#metadata.notes += issue_results['site_detail_url']
|
||||
|
||||
metadata.webLink = issue_results['site_detail_url']
|
||||
|
||||
person_credits = issue_results['person_credits']
|
||||
for person in person_credits:
|
||||
if 'role' in person:
|
||||
roles = person['role'].split(',')
|
||||
for role in roles:
|
||||
# can we determine 'primary' from CV??
|
||||
metadata.addCredit(
|
||||
person['name'], role.title().strip(), False)
|
||||
|
||||
character_credits = issue_results['character_credits']
|
||||
character_list = list()
|
||||
for character in character_credits:
|
||||
character_list.append(character['name'])
|
||||
metadata.characters = utils.listToString(character_list)
|
||||
|
||||
team_credits = issue_results['team_credits']
|
||||
team_list = list()
|
||||
for team in team_credits:
|
||||
team_list.append(team['name'])
|
||||
metadata.teams = utils.listToString(team_list)
|
||||
|
||||
location_credits = issue_results['location_credits']
|
||||
location_list = list()
|
||||
for location in location_credits:
|
||||
location_list.append(location['name'])
|
||||
metadata.locations = utils.listToString(location_list)
|
||||
|
||||
story_arc_credits = issue_results['story_arc_credits']
|
||||
arc_list = []
|
||||
for arc in story_arc_credits:
|
||||
arc_list.append(arc['name'])
|
||||
if len(arc_list) > 0:
|
||||
metadata.storyArc = utils.listToString(arc_list)
|
||||
|
||||
return metadata
|
||||
|
||||
def cleanup_html(self, string, remove_html_tables):
|
||||
"""
|
||||
converter = html2text.HTML2Text()
|
||||
#converter.emphasis_mark = '*'
|
||||
#converter.ignore_links = True
|
||||
converter.body_width = 0
|
||||
|
||||
print(html2text.html2text(string))
|
||||
return string
|
||||
#return converter.handle(string)
|
||||
"""
|
||||
|
||||
if string is None:
|
||||
return ""
|
||||
# find any tables
|
||||
soup = BeautifulSoup(string, "html.parser")
|
||||
tables = soup.findAll('table')
|
||||
|
||||
# remove all newlines first
|
||||
string = string.replace("\n", "")
|
||||
|
||||
# put in our own
|
||||
string = string.replace("<br>", "\n")
|
||||
string = string.replace("</p>", "\n\n")
|
||||
string = string.replace("<h4>", "*")
|
||||
string = string.replace("</h4>", "*\n")
|
||||
|
||||
# remove the tables
|
||||
p = re.compile(r'<table[^<]*?>.*?<\/table>')
|
||||
if remove_html_tables:
|
||||
string = p.sub('', string)
|
||||
string = string.replace("*List of covers and their creators:*", "")
|
||||
else:
|
||||
string = p.sub('{}', string)
|
||||
|
||||
# now strip all other tags
|
||||
p = re.compile(r'<[^<]*?>')
|
||||
newstring = p.sub('', string)
|
||||
|
||||
newstring = newstring.replace(' ', ' ')
|
||||
newstring = newstring.replace('&', '&')
|
||||
|
||||
newstring = newstring.strip()
|
||||
|
||||
if not remove_html_tables:
|
||||
# now rebuild the tables into text from BSoup
|
||||
try:
|
||||
table_strings = []
|
||||
for table in tables:
|
||||
rows = []
|
||||
hdrs = []
|
||||
col_widths = []
|
||||
for hdr in table.findAll('th'):
|
||||
item = hdr.string.strip()
|
||||
hdrs.append(item)
|
||||
col_widths.append(len(item))
|
||||
rows.append(hdrs)
|
||||
|
||||
for row in table.findAll('tr'):
|
||||
cols = []
|
||||
col = row.findAll('td')
|
||||
i = 0
|
||||
for c in col:
|
||||
item = c.string.strip()
|
||||
cols.append(item)
|
||||
if len(item) > col_widths[i]:
|
||||
col_widths[i] = len(item)
|
||||
i += 1
|
||||
if len(cols) != 0:
|
||||
rows.append(cols)
|
||||
# now we have the data, make it into text
|
||||
fmtstr = ""
|
||||
for w in col_widths:
|
||||
fmtstr += " {{:{}}}|".format(w + 1)
|
||||
width = sum(col_widths) + len(col_widths) * 2
|
||||
print("width=", width)
|
||||
table_text = ""
|
||||
counter = 0
|
||||
for row in rows:
|
||||
table_text += fmtstr.format(*row) + "\n"
|
||||
if counter == 0 and len(hdrs) != 0:
|
||||
table_text += "-" * width + "\n"
|
||||
counter += 1
|
||||
|
||||
table_strings.append(table_text)
|
||||
|
||||
newstring = newstring.format(*table_strings)
|
||||
except:
|
||||
# we caught an error rebuilding the table.
|
||||
# just bail and remove the formatting
|
||||
print("table parse error")
|
||||
newstring.replace("{}", "")
|
||||
|
||||
return newstring
|
||||
|
||||
def fetchIssueDate(self, issue_id):
|
||||
details = self.fetchIssueSelectDetails(issue_id)
|
||||
day, month, year = self.parseDateStr(details['cover_date'])
|
||||
return month, year
|
||||
|
||||
def fetchIssueCoverURLs(self, issue_id):
|
||||
details = self.fetchIssueSelectDetails(issue_id)
|
||||
return details['image_url'], details['thumb_image_url']
|
||||
|
||||
def fetchIssuePageURL(self, issue_id):
|
||||
details = self.fetchIssueSelectDetails(issue_id)
|
||||
return details['site_detail_url']
|
||||
|
||||
def fetchIssueSelectDetails(self, issue_id):
|
||||
|
||||
#cached_image_url,cached_thumb_url,cached_month,cached_year = self.fetchCachedIssueSelectDetails(issue_id)
|
||||
cached_details = self.fetchCachedIssueSelectDetails(issue_id)
|
||||
if cached_details['image_url'] is not None:
|
||||
return cached_details
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
|
||||
str(issue_id) + "/?api_key=" + self.api_key + \
|
||||
"&format=json&field_list=image,cover_date,site_detail_url"
|
||||
|
||||
details = dict()
|
||||
details['image_url'] = None
|
||||
details['thumb_image_url'] = None
|
||||
details['cover_date'] = None
|
||||
details['site_detail_url'] = None
|
||||
|
||||
cv_response = self.getCVContent(issue_url)
|
||||
|
||||
details['image_url'] = cv_response['results']['image']['super_url']
|
||||
details['thumb_image_url'] = cv_response[
|
||||
'results']['image']['thumb_url']
|
||||
details['cover_date'] = cv_response['results']['cover_date']
|
||||
details['site_detail_url'] = cv_response['results']['site_detail_url']
|
||||
|
||||
if details['image_url'] is not None:
|
||||
self.cacheIssueSelectDetails(issue_id,
|
||||
details['image_url'],
|
||||
details['thumb_image_url'],
|
||||
details['cover_date'],
|
||||
details['site_detail_url'])
|
||||
# print(details['site_detail_url'])
|
||||
return details
|
||||
|
||||
def fetchCachedIssueSelectDetails(self, issue_id):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher()
|
||||
return cvc.get_issue_select_details(issue_id)
|
||||
|
||||
def cacheIssueSelectDetails(
|
||||
self, issue_id, image_url, thumb_url, cover_date, page_url):
|
||||
cvc = ComicVineCacher()
|
||||
cvc.add_issue_select_details(
|
||||
issue_id, image_url, thumb_url, cover_date, page_url)
|
||||
|
||||
def fetchAlternateCoverURLs(self, issue_id, issue_page_url):
|
||||
url_list = self.fetchCachedAlternateCoverURLs(issue_id)
|
||||
if url_list is not None:
|
||||
return url_list
|
||||
|
||||
# scrape the CV issue page URL to get the alternate cover URLs
|
||||
resp = urllib.request.urlopen(issue_page_url, context=self.ssl)
|
||||
content = resp.read()
|
||||
alt_cover_url_list = self.parseOutAltCoverUrls(content)
|
||||
|
||||
# cache this alt cover URL list
|
||||
self.cacheAlternateCoverURLs(issue_id, alt_cover_url_list)
|
||||
|
||||
return alt_cover_url_list
|
||||
|
||||
def parseOutAltCoverUrls(self, page_html):
|
||||
soup = BeautifulSoup(page_html, "html.parser")
|
||||
|
||||
alt_cover_url_list = []
|
||||
|
||||
# Using knowledge of the layout of the Comic Vine issue page here:
|
||||
# look for the divs that are in the classes 'imgboxart' and
|
||||
# 'issue-cover'
|
||||
div_list = soup.find_all('div')
|
||||
covers_found = 0
|
||||
for d in div_list:
|
||||
if 'class' in d.attrs:
|
||||
c = d['class']
|
||||
if ('imgboxart' in c and
|
||||
'issue-cover' in c and
|
||||
d.img['src'].startswith("http")
|
||||
):
|
||||
|
||||
covers_found += 1
|
||||
if covers_found != 1:
|
||||
alt_cover_url_list.append(d.img['src'])
|
||||
|
||||
return alt_cover_url_list
|
||||
|
||||
def fetchCachedAlternateCoverURLs(self, issue_id):
|
||||
|
||||
# before we search online, look in our cache, since we might already
|
||||
# have this info
|
||||
cvc = ComicVineCacher()
|
||||
url_list = cvc.get_alt_covers(issue_id)
|
||||
if url_list is not None:
|
||||
return url_list
|
||||
else:
|
||||
return None
|
||||
|
||||
def cacheAlternateCoverURLs(self, issue_id, url_list):
|
||||
cvc = ComicVineCacher()
|
||||
cvc.add_alt_covers(issue_id, url_list)
|
||||
|
||||
#-------------------------------------------------------------------------
|
||||
urlFetchComplete = pyqtSignal(str, str, int)
|
||||
|
||||
def asyncFetchIssueCoverURLs(self, issue_id):
|
||||
|
||||
self.issue_id = issue_id
|
||||
details = self.fetchCachedIssueSelectDetails(issue_id)
|
||||
if details['image_url'] is not None:
|
||||
self.urlFetchComplete.emit(
|
||||
details['image_url'],
|
||||
details['thumb_image_url'],
|
||||
self.issue_id)
|
||||
return
|
||||
|
||||
issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \
|
||||
str(issue_id) + "/?api_key=" + self.api_key + \
|
||||
"&format=json&field_list=image,cover_date,site_detail_url"
|
||||
self.nam = QNetworkAccessManager()
|
||||
self.nam.finished.connect(self.asyncFetchIssueCoverURLComplete)
|
||||
self.nam.get(QNetworkRequest(QUrl(issue_url)))
|
||||
|
||||
def asyncFetchIssueCoverURLComplete(self, reply):
|
||||
|
||||
# read in the response
|
||||
data = reply.readAll()
|
||||
|
||||
try:
|
||||
cv_response = json.loads(bytes(data))
|
||||
except Exception as e:
|
||||
print("Comic Vine query failed to get JSON data", file=sys.stderr)
|
||||
print(str(data), file=sys.stderr)
|
||||
return
|
||||
|
||||
if cv_response['status_code'] != 1:
|
||||
print("Comic Vine query failed with error: [{0}]. ".format(
|
||||
cv_response['error']), file=sys.stderr)
|
||||
return
|
||||
|
||||
image_url = cv_response['results']['image']['super_url']
|
||||
thumb_url = cv_response['results']['image']['thumb_url']
|
||||
cover_date = cv_response['results']['cover_date']
|
||||
page_url = cv_response['results']['site_detail_url']
|
||||
|
||||
self.cacheIssueSelectDetails(
|
||||
self.issue_id, image_url, thumb_url, cover_date, page_url)
|
||||
|
||||
self.urlFetchComplete.emit(image_url, thumb_url, self.issue_id)
|
||||
|
||||
altUrlListFetchComplete = pyqtSignal(list, int)
|
||||
|
||||
def asyncFetchAlternateCoverURLs(self, issue_id, issue_page_url):
|
||||
# This async version requires the issue page url to be provided!
|
||||
self.issue_id = issue_id
|
||||
url_list = self.fetchCachedAlternateCoverURLs(issue_id)
|
||||
if url_list is not None:
|
||||
self.altUrlListFetchComplete.emit(url_list, int(self.issue_id))
|
||||
return
|
||||
|
||||
self.nam = QNetworkAccessManager()
|
||||
self.nam.finished.connect(self.asyncFetchAlternateCoverURLsComplete)
|
||||
self.nam.get(QNetworkRequest(QUrl(str(issue_page_url))))
|
||||
|
||||
def asyncFetchAlternateCoverURLsComplete(self, reply):
|
||||
# read in the response
|
||||
html = str(reply.readAll())
|
||||
alt_cover_url_list = self.parseOutAltCoverUrls(html)
|
||||
|
||||
# cache this alt cover URL list
|
||||
self.cacheAlternateCoverURLs(self.issue_id, alt_cover_url_list)
|
||||
|
||||
self.altUrlListFetchComplete.emit(
|
||||
alt_cover_url_list, int(self.issue_id))
|
||||
|
||||
def repairUrls(self, issue_list):
|
||||
# make sure there are URLs for the image fields
|
||||
for issue in issue_list:
|
||||
if issue['image'] is None:
|
||||
issue['image'] = dict()
|
||||
issue['image']['super_url'] = ComicVineTalker.logo_url
|
||||
issue['image']['thumb_url'] = ComicVineTalker.logo_url
|
||||
@@ -1,214 +1,213 @@
|
||||
"""A PyQt5 widget to display cover images
|
||||
|
||||
Display cover images from either a local archive, or from Comic Vine.
|
||||
Display cover images from either a local archive, or from comic source metadata.
|
||||
TODO: This should be re-factored using subclasses!
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import os
|
||||
import logging
|
||||
import pathlib
|
||||
|
||||
from PyQt5.QtCore import *
|
||||
from PyQt5.QtWidgets import *
|
||||
from PyQt5.QtGui import *
|
||||
from PyQt5 import uic
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from .comicvinetalker import ComicVineTalker, ComicVineTalkerException
|
||||
from .imagefetcher import ImageFetcher
|
||||
from .pageloader import PageLoader
|
||||
from .imagepopup import ImagePopup
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize, getQImageFromData
|
||||
#from genericmetadata import GenericMetadata, PageType
|
||||
#from comicarchive import MetaDataStyle
|
||||
#import utils
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictaggerlib.imagefetcher import ImageFetcher
|
||||
from comictaggerlib.imagepopup import ImagePopup
|
||||
from comictaggerlib.pageloader import PageLoader
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import get_qimage_from_data, reduce_widget_font_size
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def clickable(widget):
|
||||
"""# Allow a label to be clickable"""
|
||||
def clickable(widget: QtWidgets.QWidget) -> QtCore.pyqtBoundSignal:
|
||||
"""Allow a label to be clickable"""
|
||||
|
||||
class Filter(QObject):
|
||||
|
||||
dblclicked = pyqtSignal()
|
||||
|
||||
def eventFilter(self, obj, event):
|
||||
class Filter(QtCore.QObject):
|
||||
dblclicked = QtCore.pyqtSignal()
|
||||
|
||||
def eventFilter(self, obj: QtCore.QObject, event: QtCore.QEvent) -> bool:
|
||||
if obj == widget:
|
||||
if event.type() == QEvent.MouseButtonDblClick:
|
||||
if event.type() == QtCore.QEvent.Type.MouseButtonDblClick:
|
||||
self.dblclicked.emit()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
filter = Filter(widget)
|
||||
widget.installEventFilter(filter)
|
||||
return filter.dblclicked
|
||||
flt = Filter(widget)
|
||||
widget.installEventFilter(flt)
|
||||
return flt.dblclicked
|
||||
|
||||
|
||||
class CoverImageWidget(QWidget):
|
||||
|
||||
class CoverImageWidget(QtWidgets.QWidget):
|
||||
ArchiveMode = 0
|
||||
AltCoverMode = 1
|
||||
URLMode = 1
|
||||
DataMode = 3
|
||||
|
||||
def __init__(self, parent, mode, expand_on_click=True):
|
||||
super(CoverImageWidget, self).__init__(parent)
|
||||
image_fetch_complete = QtCore.pyqtSignal(str, QtCore.QByteArray)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('coverimagewidget.ui'), self)
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
mode: int,
|
||||
cache_folder: pathlib.Path | None,
|
||||
talker: ComicTalker | None,
|
||||
expand_on_click: bool = True,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
reduceWidgetFontSize(self.label)
|
||||
if mode not in (self.AltCoverMode, self.URLMode) or cache_folder is None:
|
||||
self.cover_fetcher = None
|
||||
self.talker = None
|
||||
else:
|
||||
self.cover_fetcher = ImageFetcher(cache_folder)
|
||||
self.talker = None
|
||||
with (ui_path / "coverimagewidget.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.mode = mode
|
||||
self.comicVine = ComicVineTalker()
|
||||
self.page_loader = None
|
||||
reduce_widget_font_size(self.label)
|
||||
|
||||
self.cache_folder = cache_folder
|
||||
self.mode: int = mode
|
||||
self.page_loader: PageLoader | None = None
|
||||
self.showControls = True
|
||||
|
||||
self.btnLeft.setIcon(QIcon(ComicTaggerSettings.getGraphic('left.png')))
|
||||
self.btnRight.setIcon(
|
||||
QIcon(ComicTaggerSettings.getGraphic('right.png')))
|
||||
self.current_pixmap = QtGui.QPixmap()
|
||||
|
||||
self.btnLeft.clicked.connect(self.decrementImage)
|
||||
self.btnRight.clicked.connect(self.incrementImage)
|
||||
self.resetWidget()
|
||||
self.comic_archive: ComicArchive | None = None
|
||||
self.issue_id: str = ""
|
||||
self.issue_url: str | None = None
|
||||
self.url_list: list[str] = []
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = None
|
||||
self.imageIndex = -1
|
||||
self.imageCount = 1
|
||||
self.imageData = b""
|
||||
|
||||
self.btnLeft.setIcon(QtGui.QIcon(str(graphics_path / "left.png")))
|
||||
self.btnRight.setIcon(QtGui.QIcon(str(graphics_path / "right.png")))
|
||||
|
||||
self.btnLeft.clicked.connect(self.decrement_image)
|
||||
self.btnRight.clicked.connect(self.increment_image)
|
||||
self.image_fetch_complete.connect(self.cover_remote_fetch_complete)
|
||||
if expand_on_click:
|
||||
clickable(self.lblImage).connect(self.showPopup)
|
||||
clickable(self.lblImage).connect(self.show_popup)
|
||||
else:
|
||||
self.lblImage.setToolTip("")
|
||||
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def resetWidget(self):
|
||||
def reset_widget(self) -> None:
|
||||
self.comic_archive = None
|
||||
self.issue_id = None
|
||||
self.comicVine = None
|
||||
self.cover_fetcher = None
|
||||
self.issue_id = ""
|
||||
self.issue_url = None
|
||||
self.url_list = []
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = None
|
||||
self.imageIndex = -1
|
||||
self.imageCount = 1
|
||||
self.imageData = None
|
||||
self.imageData = b""
|
||||
|
||||
def clear(self):
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
def clear(self) -> None:
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
|
||||
def incrementImage(self):
|
||||
def increment_image(self) -> None:
|
||||
self.imageIndex += 1
|
||||
if self.imageIndex == self.imageCount:
|
||||
self.imageIndex = 0
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def decrementImage(self):
|
||||
def decrement_image(self) -> None:
|
||||
self.imageIndex -= 1
|
||||
if self.imageIndex == -1:
|
||||
self.imageIndex = self.imageCount - 1
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def setArchive(self, ca, page=0):
|
||||
def set_archive(self, ca: ComicArchive, page: int = 0) -> None:
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.resetWidget()
|
||||
self.reset_widget()
|
||||
self.comic_archive = ca
|
||||
self.imageIndex = page
|
||||
self.imageCount = ca.getNumberOfPages()
|
||||
self.updateContent()
|
||||
self.imageCount = ca.get_number_of_pages()
|
||||
self.update_content()
|
||||
|
||||
def setURL(self, url):
|
||||
def set_url(self, url: str) -> None:
|
||||
if self.mode == CoverImageWidget.URLMode:
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
|
||||
self.url_list = [url]
|
||||
self.imageIndex = 0
|
||||
self.imageCount = 1
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def setIssueID(self, issue_id):
|
||||
def set_issue_details(self, issue_id: str, url_list: list[str]) -> None:
|
||||
if self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.resetWidget()
|
||||
self.updateContent()
|
||||
|
||||
self.reset_widget()
|
||||
self.update_content()
|
||||
self.issue_id = issue_id
|
||||
|
||||
self.comicVine = ComicVineTalker()
|
||||
self.comicVine.urlFetchComplete.connect(
|
||||
self.primaryUrlFetchComplete)
|
||||
self.comicVine.asyncFetchIssueCoverURLs(int(self.issue_id))
|
||||
self.set_url_list(url_list)
|
||||
|
||||
def setImageData(self, image_data):
|
||||
def set_image_data(self, image_data: bytes) -> None:
|
||||
if self.mode == CoverImageWidget.DataMode:
|
||||
self.resetWidget()
|
||||
self.reset_widget()
|
||||
|
||||
if image_data is None:
|
||||
self.imageIndex = -1
|
||||
else:
|
||||
if image_data:
|
||||
self.imageIndex = 0
|
||||
self.imageData = image_data
|
||||
else:
|
||||
self.imageIndex = -1
|
||||
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def primaryUrlFetchComplete(self, primary_url, thumb_url, issue_id):
|
||||
self.url_list.append(str(primary_url))
|
||||
def set_url_list(self, url_list: list[str]) -> None:
|
||||
self.url_list = url_list
|
||||
self.imageIndex = 0
|
||||
self.imageCount = len(self.url_list)
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
self.update_controls()
|
||||
|
||||
# defer the alt cover search
|
||||
QTimer.singleShot(1, self.startAltCoverSearch)
|
||||
|
||||
def startAltCoverSearch(self):
|
||||
|
||||
# now we need to get the list of alt cover URLs
|
||||
self.label.setText("Searching for alt. covers...")
|
||||
|
||||
# page URL should already be cached, so no need to defer
|
||||
self.comicVine = ComicVineTalker()
|
||||
issue_page_url = self.comicVine.fetchIssuePageURL(self.issue_id)
|
||||
self.comicVine.altUrlListFetchComplete.connect(
|
||||
self.altCoverUrlListFetchComplete)
|
||||
self.comicVine.asyncFetchAlternateCoverURLs(
|
||||
int(self.issue_id), issue_page_url)
|
||||
|
||||
def altCoverUrlListFetchComplete(self, url_list, issue_id):
|
||||
if len(url_list) > 0:
|
||||
self.url_list.extend(url_list)
|
||||
self.imageCount = len(self.url_list)
|
||||
self.updateControls()
|
||||
|
||||
def setPage(self, pagenum):
|
||||
def set_page(self, pagenum: int) -> None:
|
||||
if self.mode == CoverImageWidget.ArchiveMode:
|
||||
self.imageIndex = pagenum
|
||||
self.updateContent()
|
||||
self.update_content()
|
||||
|
||||
def updateContent(self):
|
||||
self.updateImage()
|
||||
self.updateControls()
|
||||
def update_content(self) -> None:
|
||||
self.update_image()
|
||||
self.update_controls()
|
||||
|
||||
def updateImage(self):
|
||||
def update_image(self) -> None:
|
||||
if self.imageIndex == -1:
|
||||
self.loadDefault()
|
||||
self.load_default()
|
||||
elif self.mode in [CoverImageWidget.AltCoverMode, CoverImageWidget.URLMode]:
|
||||
self.loadURL()
|
||||
self.load_url()
|
||||
elif self.mode == CoverImageWidget.DataMode:
|
||||
self.coverRemoteFetchComplete(self.imageData, 0)
|
||||
self.cover_remote_fetch_complete("", self.imageData)
|
||||
else:
|
||||
self.loadPage()
|
||||
self.load_page()
|
||||
|
||||
def updateControls(self):
|
||||
def update_controls(self) -> None:
|
||||
if not self.showControls or self.mode == CoverImageWidget.DataMode:
|
||||
self.btnLeft.hide()
|
||||
self.btnRight.hide()
|
||||
@@ -229,71 +228,51 @@ class CoverImageWidget(QWidget):
|
||||
if self.imageIndex == -1 or self.imageCount == 1:
|
||||
self.label.setText("")
|
||||
elif self.mode == CoverImageWidget.AltCoverMode:
|
||||
self.label.setText(
|
||||
"Cover {0} (of {1})".format(
|
||||
self.imageIndex + 1,
|
||||
self.imageCount))
|
||||
self.label.setText(f"Cover {self.imageIndex + 1} (of {self.imageCount})")
|
||||
else:
|
||||
self.label.setText(
|
||||
"Page {0} (of {1})".format(
|
||||
self.imageIndex + 1,
|
||||
self.imageCount))
|
||||
self.label.setText(f"Page {self.imageIndex + 1} (of {self.imageCount})")
|
||||
|
||||
def loadURL(self):
|
||||
self.loadDefault()
|
||||
self.cover_fetcher = ImageFetcher()
|
||||
self.cover_fetcher.fetchComplete.connect(self.coverRemoteFetchComplete)
|
||||
self.cover_fetcher.fetch(self.url_list[self.imageIndex])
|
||||
#print("ATB cover fetch started...")
|
||||
def load_url(self) -> None:
|
||||
assert isinstance(self.cache_folder, pathlib.Path)
|
||||
self.load_default()
|
||||
self.cover_fetcher = ImageFetcher(self.cache_folder)
|
||||
ImageFetcher.image_fetch_complete = self.image_fetch_complete.emit
|
||||
if data := self.cover_fetcher.fetch(self.url_list[self.imageIndex]):
|
||||
self.cover_remote_fetch_complete(self.url_list[self.imageIndex], data)
|
||||
|
||||
# called when the image is done loading from internet
|
||||
def coverRemoteFetchComplete(self, image_data, issue_id):
|
||||
img = getQImageFromData(image_data)
|
||||
self.current_pixmap = QPixmap(img)
|
||||
self.setDisplayPixmap(0, 0)
|
||||
#print("ATB cover fetch complete!")
|
||||
def cover_remote_fetch_complete(self, url: str, image_data: bytes) -> None:
|
||||
if url and url not in self.url_list:
|
||||
return
|
||||
img = get_qimage_from_data(image_data)
|
||||
self.current_pixmap = QtGui.QPixmap.fromImage(img)
|
||||
self.set_display_pixmap()
|
||||
|
||||
def loadPage(self):
|
||||
def load_page(self) -> None:
|
||||
if self.comic_archive is not None:
|
||||
if self.page_loader is not None:
|
||||
self.page_loader.abandoned = True
|
||||
self.page_loader = PageLoader(self.comic_archive, self.imageIndex)
|
||||
self.page_loader.loadComplete.connect(self.pageLoadComplete)
|
||||
self.page_loader.loadComplete.connect(self.page_load_complete)
|
||||
self.page_loader.start()
|
||||
|
||||
def pageLoadComplete(self, img):
|
||||
self.current_pixmap = QPixmap(img)
|
||||
self.setDisplayPixmap(0, 0)
|
||||
def page_load_complete(self, image_data: bytes) -> None:
|
||||
img = get_qimage_from_data(image_data)
|
||||
self.current_pixmap = QtGui.QPixmap.fromImage(img)
|
||||
self.set_display_pixmap()
|
||||
self.page_loader = None
|
||||
|
||||
def loadDefault(self):
|
||||
self.current_pixmap = QPixmap(
|
||||
ComicTaggerSettings.getGraphic('nocover.png'))
|
||||
#print("loadDefault called")
|
||||
self.setDisplayPixmap(0, 0)
|
||||
def load_default(self) -> None:
|
||||
self.current_pixmap = QtGui.QPixmap(str(graphics_path / "nocover.png"))
|
||||
self.set_display_pixmap()
|
||||
|
||||
def resizeEvent(self, resize_event):
|
||||
def resizeEvent(self, resize_event: QtGui.QResizeEvent) -> None:
|
||||
if self.current_pixmap is not None:
|
||||
delta_w = resize_event.size().width() - \
|
||||
resize_event.oldSize().width()
|
||||
delta_h = resize_event.size().height() - \
|
||||
resize_event.oldSize().height()
|
||||
# print "ATB resizeEvent deltas", resize_event.size().width(),
|
||||
# resize_event.size().height()
|
||||
self.setDisplayPixmap(delta_w, delta_h)
|
||||
self.set_display_pixmap()
|
||||
|
||||
def setDisplayPixmap(self, delta_w, delta_h):
|
||||
def set_display_pixmap(self) -> None:
|
||||
"""The deltas let us know what the new width and height of the label will be"""
|
||||
|
||||
#new_h = self.frame.height() + delta_h
|
||||
#new_w = self.frame.width() + delta_w
|
||||
# print "ATB setDisplayPixmap deltas", delta_w , delta_h
|
||||
# print "ATB self.frame", self.frame.width(), self.frame.height()
|
||||
# print "ATB self.", self.width(), self.height()
|
||||
|
||||
#frame_w = new_w
|
||||
#frame_h = new_h
|
||||
|
||||
new_h = self.frame.height()
|
||||
new_w = self.frame.width()
|
||||
frame_w = self.frame.width()
|
||||
@@ -302,25 +281,20 @@ class CoverImageWidget(QWidget):
|
||||
new_h -= 4
|
||||
new_w -= 4
|
||||
|
||||
if new_h < 0:
|
||||
new_h = 0
|
||||
if new_w < 0:
|
||||
new_w = 0
|
||||
|
||||
# print "ATB setDisplayPixmap deltas", delta_w , delta_h
|
||||
# print "ATB self.frame", frame_w, frame_h
|
||||
# print "ATB new size", new_w, new_h
|
||||
new_h = max(new_h, 0)
|
||||
new_w = max(new_w, 0)
|
||||
|
||||
# scale the pixmap to fit in the frame
|
||||
scaled_pixmap = self.current_pixmap.scaled(
|
||||
new_w, new_h, Qt.KeepAspectRatio)
|
||||
new_w, new_h, QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.SmoothTransformation
|
||||
)
|
||||
self.lblImage.setPixmap(scaled_pixmap)
|
||||
|
||||
# move and resize the label to be centered in the fame
|
||||
img_w = scaled_pixmap.width()
|
||||
img_h = scaled_pixmap.height()
|
||||
self.lblImage.resize(img_w, img_h)
|
||||
self.lblImage.move((frame_w - img_w) / 2, (frame_h - img_h) / 2)
|
||||
self.lblImage.move(int((frame_w - img_w) / 2), int((frame_h - img_h) / 2))
|
||||
|
||||
def showPopup(self):
|
||||
self.popup = ImagePopup(self, self.current_pixmap)
|
||||
def show_popup(self) -> None:
|
||||
ImagePopup(self, self.current_pixmap)
|
||||
|
||||
@@ -1,36 +1,40 @@
|
||||
"""A PyQT4 dialog to edit credits"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import os
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CreditEditorWindow(QtWidgets.QDialog):
|
||||
|
||||
ModeEdit = 0
|
||||
ModeNew = 1
|
||||
|
||||
def __init__(self, parent, mode, role, name, primary):
|
||||
super(CreditEditorWindow, self).__init__(parent)
|
||||
def __init__(self, parent: QtWidgets.QWidget, mode: int, role: str, name: str, primary: bool) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('crediteditorwindow.ui'), self)
|
||||
with (ui_path / "crediteditorwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.mode = mode
|
||||
|
||||
@@ -62,35 +66,33 @@ class CreditEditorWindow(QtWidgets.QDialog):
|
||||
else:
|
||||
self.cbRole.setCurrentIndex(i)
|
||||
|
||||
if primary:
|
||||
self.cbPrimary.setCheckState(QtCore.Qt.Checked)
|
||||
self.cbPrimary.setChecked(primary)
|
||||
|
||||
self.cbRole.currentIndexChanged.connect(self.roleChanged)
|
||||
self.cbRole.editTextChanged.connect(self.roleChanged)
|
||||
self.cbRole.currentIndexChanged.connect(self.role_changed)
|
||||
self.cbRole.editTextChanged.connect(self.role_changed)
|
||||
|
||||
self.updatePrimaryButton()
|
||||
self.update_primary_button()
|
||||
|
||||
def updatePrimaryButton(self):
|
||||
enabled = self.currentRoleCanBePrimary()
|
||||
def update_primary_button(self) -> None:
|
||||
enabled = self.current_role_can_be_primary()
|
||||
self.cbPrimary.setEnabled(enabled)
|
||||
|
||||
def currentRoleCanBePrimary(self):
|
||||
def current_role_can_be_primary(self) -> bool:
|
||||
role = self.cbRole.currentText()
|
||||
if str(role).lower() == "writer" or str(role).lower() == "artist":
|
||||
if role.casefold() in ("artist", "writer"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def roleChanged(self, s):
|
||||
self.updatePrimaryButton()
|
||||
return False
|
||||
|
||||
def getCredits(self):
|
||||
primary = self.currentRoleCanBePrimary() and self.cbPrimary.isChecked()
|
||||
def role_changed(self, s: Any) -> None:
|
||||
self.update_primary_button()
|
||||
|
||||
def get_credits(self) -> tuple[str, str, bool]:
|
||||
primary = self.current_role_can_be_primary() and self.cbPrimary.isChecked()
|
||||
return self.cbRole.currentText(), self.leName.text(), primary
|
||||
|
||||
def accept(self):
|
||||
def accept(self) -> None:
|
||||
if self.cbRole.currentText() == "" or self.leName.text() == "":
|
||||
QtWidgets.QMessageBox.warning(self, self.tr("Whoops"), self.tr(
|
||||
"You need to enter both role and name for a credit."))
|
||||
QtWidgets.QMessageBox.warning(self, "Whoops", "You need to enter both role and name for a credit.")
|
||||
else:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
112
comictaggerlib/ctsettings/__init__.py
Normal file
112
comictaggerlib/ctsettings/__init__.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import pathlib
|
||||
from typing import Any
|
||||
|
||||
import settngs
|
||||
|
||||
from comictaggerlib.ctsettings.commandline import (
|
||||
initial_commandline_parser,
|
||||
register_commandline_settings,
|
||||
validate_commandline_settings,
|
||||
)
|
||||
from comictaggerlib.ctsettings.file import register_file_settings, validate_file_settings
|
||||
from comictaggerlib.ctsettings.plugin import group_for_plugin, register_plugin_settings, validate_plugin_settings
|
||||
from comictaggerlib.ctsettings.settngs_namespace import settngs_namespace as ct_ns
|
||||
from comictaggerlib.ctsettings.types import ComicTaggerPaths
|
||||
from comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
talkers: dict[str, ComicTalker] = {}
|
||||
|
||||
__all__ = [
|
||||
"initial_commandline_parser",
|
||||
"register_commandline_settings",
|
||||
"register_file_settings",
|
||||
"register_plugin_settings",
|
||||
"validate_commandline_settings",
|
||||
"validate_file_settings",
|
||||
"validate_plugin_settings",
|
||||
"ComicTaggerPaths",
|
||||
"ct_ns",
|
||||
"group_for_plugin",
|
||||
]
|
||||
|
||||
|
||||
class SettingsEncoder(json.JSONEncoder):
|
||||
def default(self, obj: Any) -> Any:
|
||||
if isinstance(obj, pathlib.Path):
|
||||
return str(obj)
|
||||
|
||||
# Let the base class default method raise the TypeError
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def validate_types(config: settngs.Config[settngs.Values]) -> settngs.Config[settngs.Values]:
|
||||
# Go through each setting
|
||||
for group in config.definitions.values():
|
||||
for setting in group.v.values():
|
||||
# Get the value and if it is the default
|
||||
value, default = settngs.get_option(config.values, setting)
|
||||
if not default:
|
||||
if setting.type is not None:
|
||||
# If it is not the default and the type attribute is not None
|
||||
# use it to convert the loaded string into the expected value
|
||||
if isinstance(value, str):
|
||||
config.values[setting.group][setting.dest] = setting.type(value)
|
||||
return config
|
||||
|
||||
|
||||
def parse_config(
|
||||
manager: settngs.Manager,
|
||||
config_path: pathlib.Path,
|
||||
args: list[str] | None = None,
|
||||
) -> tuple[settngs.Config[settngs.Values], bool]:
|
||||
"""
|
||||
Function to parse options from a json file and passes the resulting Config object to parse_cmdline.
|
||||
|
||||
Args:
|
||||
manager: settngs Manager object
|
||||
config_path: A `pathlib.Path` object
|
||||
args: Passed to argparse.ArgumentParser.parse_args
|
||||
"""
|
||||
file_options, success = settngs.parse_file(manager.definitions, config_path)
|
||||
file_options = validate_types(file_options)
|
||||
cmdline_options = settngs.parse_cmdline(
|
||||
manager.definitions,
|
||||
manager.description,
|
||||
manager.epilog,
|
||||
args,
|
||||
file_options,
|
||||
)
|
||||
|
||||
final_options = settngs.normalize_config(cmdline_options, file=True, cmdline=True)
|
||||
return final_options, success
|
||||
|
||||
|
||||
def save_file(
|
||||
config: settngs.Config[settngs.T],
|
||||
filename: pathlib.Path,
|
||||
) -> bool:
|
||||
"""
|
||||
Helper function to save options from a json dictionary to a file
|
||||
|
||||
Args:
|
||||
config: The options to save to a json dictionary
|
||||
filename: A pathlib.Path object to save the json dictionary to
|
||||
"""
|
||||
file_options = settngs.clean_config(config, file=True)
|
||||
try:
|
||||
if not filename.exists():
|
||||
filename.parent.mkdir(exist_ok=True, parents=True)
|
||||
filename.touch()
|
||||
|
||||
json_str = json.dumps(file_options, cls=SettingsEncoder, indent=2)
|
||||
filename.write_text(json_str + "\n", encoding="utf-8")
|
||||
except Exception:
|
||||
logger.exception("Failed to save config file: %s", filename)
|
||||
return False
|
||||
return True
|
||||
329
comictaggerlib/ctsettings/commandline.py
Normal file
329
comictaggerlib/ctsettings/commandline.py
Normal file
@@ -0,0 +1,329 @@
|
||||
"""CLI settings for ComicTagger"""
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
import settngs
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import metadata_styles
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comictaggerlib import ctversion
|
||||
from comictaggerlib.ctsettings.settngs_namespace import settngs_namespace as ct_ns
|
||||
from comictaggerlib.ctsettings.types import (
|
||||
ComicTaggerPaths,
|
||||
metadata_type,
|
||||
metadata_type_single,
|
||||
parse_metadata_from_string,
|
||||
)
|
||||
from comictaggerlib.resulttypes import Action
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def initial_commandline_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
# Ensure this stays up to date with register_runtime
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
help="Config directory defaults to ~/.ComicTagger\non Linux/Mac and %%APPDATA%% on Windows\n",
|
||||
type=ComicTaggerPaths,
|
||||
default=ComicTaggerPaths(),
|
||||
)
|
||||
parser.add_argument("-v", "--verbose", action="count", default=0, help="Be noisy when doing what it does.")
|
||||
return parser
|
||||
|
||||
|
||||
def register_runtime(parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
"--config",
|
||||
help="Config directory defaults to ~/.Config/ComicTagger\non Linux, ~/Library/Application Support/ComicTagger on Mac and %%APPDATA%%\\ComicTagger on Windows\n",
|
||||
type=ComicTaggerPaths,
|
||||
default=ComicTaggerPaths(),
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Be noisy when doing what it does.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--abort-on-conflict",
|
||||
action="store_true",
|
||||
help="""Don't export to zip if intended new filename\nexists (otherwise, creates a new unique filename).\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--delete-original",
|
||||
action="store_true",
|
||||
help="""Delete original archive after successful\nexport to Zip. (only relevant for -e)""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-f",
|
||||
"--parse-filename",
|
||||
"--parsefilename",
|
||||
action="store_true",
|
||||
help="""Parse the filename to get some info,\nspecifically series name, issue number,\nvolume, and publication year.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--id",
|
||||
dest="issue_id",
|
||||
type=str,
|
||||
help="""Use the issue ID when searching online.\nOverrides all other metadata.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-o",
|
||||
"--online",
|
||||
action="store_true",
|
||||
help="""Search online and attempt to identify file\nusing existing metadata and images in archive.\nMay be used in conjunction with -f and -m.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-m",
|
||||
"--metadata",
|
||||
default=GenericMetadata(),
|
||||
type=parse_metadata_from_string,
|
||||
help="""Explicitly define, as a list, some tags to be used. e.g.:\n"series=Plastic Man, publisher=Quality Comics"\n"series=Kickers^, Inc., issue=1, year=1986"\nName-Value pairs are comma separated. Use a\n"^" to escape an "=" or a ",", as shown in\nthe example above. Some names that can be\nused: series, issue, issue_count, year,\npublisher, title\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-i",
|
||||
"--interactive",
|
||||
action="store_true",
|
||||
help="""Interactively query the user when there are\nmultiple matches for an online search. Disabled json output\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--abort",
|
||||
dest="abort_on_low_confidence",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="""Abort save operation when online match\nis of low confidence.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--summary",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Show the summary after a save operation.\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--raw",
|
||||
action="store_true",
|
||||
help="""With -p, will print out the raw tag block(s)\nfrom the file.\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-R",
|
||||
"--recursive",
|
||||
action="store_true",
|
||||
help="Recursively include files in sub-folders.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-n",
|
||||
"--dryrun",
|
||||
action="store_true",
|
||||
help="Don't actually modify file (only relevant for -d, -s, or -r).\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("--darkmode", action="store_true", help="Windows only. Force a dark pallet", file=False)
|
||||
parser.add_setting("-g", "--glob", action="store_true", help="Windows only. Enable globbing", file=False)
|
||||
parser.add_setting("--quiet", "-q", action="store_true", help="Don't say much (for print mode).", file=False)
|
||||
parser.add_setting(
|
||||
"--json", "-j", action="store_true", help="Output json on stdout. Ignored in interactive mode.", file=False
|
||||
)
|
||||
|
||||
parser.add_setting(
|
||||
"-t",
|
||||
"--type",
|
||||
metavar=f"{{{','.join(metadata_styles).upper()}}}",
|
||||
default=[],
|
||||
type=metadata_type,
|
||||
help="""Specify TYPE as either CR, CBL or COMET\n(as either ComicRack, ComicBookLover,\nor CoMet style tags, respectively).\nUse commas for multiple types.\nFor searching the metadata will use the first listed:\neg '-t cbl,cr' with no CBL tags, CR will be used if they exist\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--overwrite",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help="""Apply metadata to already tagged archives, otherwise skips archives with existing metadata (relevant for -s or -c).""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("--no-gui", action="store_true", help="Do not open the GUI, force the commandline", file=False)
|
||||
parser.add_setting("files", nargs="*", file=False)
|
||||
|
||||
|
||||
def register_commands(parser: settngs.Manager) -> None:
|
||||
parser.add_setting("--version", action="store_true", help="Display version.", file=False)
|
||||
|
||||
parser.add_setting(
|
||||
"-p",
|
||||
"--print",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.print,
|
||||
help="""Print out tag info from file. Specify type\n(via -t) to get only info of that tag type.\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-d",
|
||||
"--delete",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.delete,
|
||||
help="Deletes the tag block of specified type (via -t).\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-c",
|
||||
"--copy",
|
||||
type=metadata_type_single,
|
||||
metavar=f"{{{','.join(metadata_styles).upper()}}}",
|
||||
help="Copy the specified source tag block to\ndestination style specified via -t\n(potentially lossy operation).\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-s",
|
||||
"--save",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.save,
|
||||
help="Save out tags as specified type (via -t).\nMust specify also at least -o, -f, or -m.\n\n",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-r",
|
||||
"--rename",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.rename,
|
||||
help="Rename the file based on specified tag style.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-e",
|
||||
"--export-to-zip",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.export,
|
||||
help="Export RAR archive to Zip format.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--only-save-config",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.save_config,
|
||||
help="Only save the configuration (eg, Comic Vine API key) and quit.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--list-plugins",
|
||||
dest="command",
|
||||
action="store_const",
|
||||
const=Action.list_plugins,
|
||||
help="List the available plugins.\n\n",
|
||||
file=False,
|
||||
)
|
||||
|
||||
|
||||
def register_commandline_settings(parser: settngs.Manager) -> None:
|
||||
parser.add_group("Commands", register_commands, True)
|
||||
parser.add_persistent_group("Runtime Options", register_runtime)
|
||||
|
||||
|
||||
def validate_commandline_settings(config: settngs.Config[ct_ns], parser: settngs.Manager) -> settngs.Config[ct_ns]:
|
||||
if config[0].Commands__version:
|
||||
parser.exit(
|
||||
status=1,
|
||||
message=f"ComicTagger {ctversion.version}: Copyright (c) 2012-2022 ComicTagger Team\n"
|
||||
+ "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n",
|
||||
)
|
||||
|
||||
config[0].Runtime_Options__no_gui = any(
|
||||
(config[0].Commands__command, config[0].Runtime_Options__no_gui, config[0].Commands__copy)
|
||||
)
|
||||
|
||||
if platform.system() == "Windows" and config[0].Runtime_Options__glob:
|
||||
# no globbing on windows shell, so do it for them
|
||||
import glob
|
||||
|
||||
globs = config[0].Runtime_Options__files
|
||||
config[0].Runtime_Options__files = []
|
||||
for item in globs:
|
||||
config[0].Runtime_Options__files.extend(glob.glob(item))
|
||||
|
||||
if config[0].Runtime_Options__json and config[0].Runtime_Options__interactive:
|
||||
config[0].Runtime_Options__json = False
|
||||
|
||||
if (
|
||||
config[0].Commands__command not in (Action.save_config, Action.list_plugins)
|
||||
and config[0].Runtime_Options__no_gui
|
||||
and not config[0].Runtime_Options__files
|
||||
):
|
||||
parser.exit(message="Command requires at least one filename!\n", status=1)
|
||||
|
||||
if config[0].Commands__command == Action.delete and not config[0].Runtime_Options__type:
|
||||
parser.exit(message="Please specify the type to delete with -t\n", status=1)
|
||||
|
||||
if config[0].Commands__command == Action.save and not config[0].Runtime_Options__type:
|
||||
parser.exit(message="Please specify the type to save with -t\n", status=1)
|
||||
|
||||
if config[0].Commands__copy:
|
||||
config[0].Commands__command = Action.copy
|
||||
if not config[0].Runtime_Options__type:
|
||||
parser.exit(message="Please specify the type to copy to with -t\n", status=1)
|
||||
|
||||
if config[0].Runtime_Options__recursive:
|
||||
config[0].Runtime_Options__files = utils.get_recursive_filelist(config[0].Runtime_Options__files)
|
||||
|
||||
# take a crack at finding rar exe if it's not in the path
|
||||
if not utils.which("rar"):
|
||||
if platform.system() == "Windows":
|
||||
letters = ["C"]
|
||||
letters.extend({f"{d}" for d in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" if os.path.exists(f"{d}:\\")} - {"C"})
|
||||
for letter in letters:
|
||||
# look in some likely places for Windows machines
|
||||
utils.add_to_path(rf"{letters}:\Program Files\WinRAR")
|
||||
utils.add_to_path(rf"{letters}:\Program Files (x86)\WinRAR")
|
||||
else:
|
||||
if platform.system() == "Darwin":
|
||||
result = subprocess.run(("/usr/libexec/path_helper", "-s"), capture_output=True)
|
||||
for path in reversed(
|
||||
shlex.split(result.stdout.decode("utf-8", errors="ignore"))[0]
|
||||
.partition("=")[2]
|
||||
.rstrip(";")
|
||||
.split(os.pathsep)
|
||||
):
|
||||
utils.add_to_path(path)
|
||||
utils.add_to_path("/opt/homebrew/bin")
|
||||
|
||||
return config
|
||||
295
comictaggerlib/ctsettings/file.py
Normal file
295
comictaggerlib/ctsettings/file.py
Normal file
@@ -0,0 +1,295 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import uuid
|
||||
|
||||
import settngs
|
||||
|
||||
from comictaggerlib.ctsettings.settngs_namespace import settngs_namespace as ct_ns
|
||||
from comictaggerlib.defaults import DEFAULT_REPLACEMENTS, Replacement, Replacements
|
||||
|
||||
|
||||
def general(parser: settngs.Manager) -> None:
|
||||
# General Settings
|
||||
parser.add_setting("check_for_new_version", default=False, cmdline=False)
|
||||
parser.add_setting(
|
||||
"--disable-cr",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Disable the ComicRack metadata type",
|
||||
)
|
||||
parser.add_setting("use_short_metadata_names", default=False, action=argparse.BooleanOptionalAction, cmdline=False)
|
||||
|
||||
|
||||
def internal(parser: settngs.Manager) -> None:
|
||||
# automatic settings
|
||||
parser.add_setting("install_id", default=uuid.uuid4().hex, cmdline=False)
|
||||
parser.add_setting("save_data_style", default=["cbi"], cmdline=False)
|
||||
parser.add_setting("load_data_style", default="cbi", cmdline=False)
|
||||
parser.add_setting("last_opened_folder", default="", cmdline=False)
|
||||
parser.add_setting("window_width", default=0, cmdline=False)
|
||||
parser.add_setting("window_height", default=0, cmdline=False)
|
||||
parser.add_setting("window_x", default=0, cmdline=False)
|
||||
parser.add_setting("window_y", default=0, cmdline=False)
|
||||
parser.add_setting("form_width", default=-1, cmdline=False)
|
||||
parser.add_setting("list_width", default=-1, cmdline=False)
|
||||
parser.add_setting("sort_column", default=-1, cmdline=False)
|
||||
parser.add_setting("sort_direction", default=0, cmdline=False)
|
||||
|
||||
|
||||
def identifier(parser: settngs.Manager) -> None:
|
||||
# identifier settings
|
||||
parser.add_setting("--series-match-identify-thresh", default=91, type=int, help="")
|
||||
parser.add_setting(
|
||||
"-b",
|
||||
"--border-crop-percent",
|
||||
default=10,
|
||||
type=int,
|
||||
help="ComicTagger will automatically add an additional cover that has any black borders cropped. If the difference in height is less than %(default)s%% the cover will not be cropped.",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--publisher-filter",
|
||||
default=["Panini Comics", "Abril", "Planeta DeAgostini", "Editorial Televisa", "Dino Comics"],
|
||||
action="extend",
|
||||
nargs="+",
|
||||
help="When enabled, filters the listed publishers from all search results. Ending a publisher with a '-' removes a publisher from this list",
|
||||
)
|
||||
parser.add_setting("--series-match-search-thresh", default=90, type=int)
|
||||
parser.add_setting(
|
||||
"--clear-metadata",
|
||||
default=False,
|
||||
help="Clears all existing metadata during import, default is to merge metadata.\nMay be used in conjunction with -o, -f and -m.\n\n",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
)
|
||||
parser.add_setting(
|
||||
"-a",
|
||||
"--auto-imprint",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help="Enables the auto imprint functionality.\ne.g. if the publisher is set to 'vertigo' it\nwill be updated to 'DC Comics' and the imprint\nproperty will be set to 'Vertigo'.\n\n",
|
||||
)
|
||||
|
||||
parser.add_setting(
|
||||
"--sort-series-by-year", default=True, action=argparse.BooleanOptionalAction, help="Sorts series by year"
|
||||
)
|
||||
parser.add_setting(
|
||||
"--exact-series-matches-first",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Puts series that are an exact match at the top of the list",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--always-use-publisher-filter",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables the publisher filter",
|
||||
)
|
||||
|
||||
|
||||
def dialog(parser: settngs.Manager) -> None:
|
||||
# Show/ask dialog flags
|
||||
parser.add_setting("show_disclaimer", default=True, cmdline=False)
|
||||
parser.add_setting("dont_notify_about_this_version", default="", cmdline=False)
|
||||
parser.add_setting("ask_about_usage_stats", default=True, cmdline=False)
|
||||
|
||||
|
||||
def filename(parser: settngs.Manager) -> None:
|
||||
# filename parsing settings
|
||||
parser.add_setting(
|
||||
"--complicated-parser",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables the new parser which tries to extract more information from filenames",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-c2c",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Removes c2c from filenames. Requires --complicated-parser",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-fcbd",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Removes FCBD/free comic book day from filenames. Requires --complicated-parser",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-publisher",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Attempts to remove publisher names from filenames, currently limited to Marvel and DC. Requires --complicated-parser",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--split-words",
|
||||
action="store_true",
|
||||
help="""Splits words before parsing the filename.\ne.g. 'judgedredd' to 'judge dredd'\n\n""",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--protofolius-issue-number-scheme",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Use an issue number scheme devised by protofolius for encoding format informatino as a letter in front of an issue number. Implies --allow-issue-start-with-letter. Requires --complicated-parser",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--allow-issue-start-with-letter",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Allows an issue number to start with a single letter (e.g. '#X01'). Requires --complicated-parser",
|
||||
)
|
||||
|
||||
|
||||
def talker(parser: settngs.Manager) -> None:
|
||||
# General settings for talkers
|
||||
parser.add_setting(
|
||||
"--source",
|
||||
default="comicvine",
|
||||
help="Use a specified source by source ID (use --list-plugins to list all sources)",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--remove-html-tables",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
display_name="Remove HTML tables",
|
||||
help="Removes html tables instead of converting them to text",
|
||||
)
|
||||
|
||||
|
||||
def cbl(parser: settngs.Manager) -> None:
|
||||
# CBL Transform settings
|
||||
parser.add_setting("--assume-lone-credit-is-primary", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-characters-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-teams-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-locations-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-storyarcs-to-tags", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-notes-to-comments", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--copy-weblink-to-comments", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--apply-transform-on-import", default=False, action=argparse.BooleanOptionalAction)
|
||||
parser.add_setting("--apply-transform-on-bulk-operation", default=False, action=argparse.BooleanOptionalAction)
|
||||
|
||||
|
||||
def rename(parser: settngs.Manager) -> None:
|
||||
# Rename settings
|
||||
parser.add_setting("--template", default="{series} #{issue} ({year})", help="The teplate to use when renaming")
|
||||
parser.add_setting(
|
||||
"--issue-number-padding",
|
||||
default=3,
|
||||
type=int,
|
||||
help="The minimum number of digits to use for the issue number when renaming",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--use-smart-string-cleanup",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Attempts to intelligently cleanup whitespace when renaming",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--auto-extension",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Automatically sets the extension based on the archive type e.g. cbr for rar, cbz for zip",
|
||||
)
|
||||
parser.add_setting("--dir", default="", help="The directory to move renamed files to")
|
||||
parser.add_setting(
|
||||
"--move",
|
||||
dest="move_to_dir",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enables moving renamed files to a separate directory",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--strict",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Ensures that filenames are valid for all OSs",
|
||||
)
|
||||
parser.add_setting("replacements", default=DEFAULT_REPLACEMENTS, cmdline=False)
|
||||
|
||||
|
||||
def autotag(parser: settngs.Manager) -> None:
|
||||
# Auto-tag stickies
|
||||
parser.add_setting(
|
||||
"--save-on-low-confidence",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Automatically save metadata on low-confidence matches",
|
||||
)
|
||||
parser.add_setting(
|
||||
"--dont-use-year-when-identifying",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Ignore the year metadata attribute when identifying a comic",
|
||||
)
|
||||
parser.add_setting(
|
||||
"-1",
|
||||
"--assume-issue-one",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Assume issue number is 1 if not found (relevant for -s).\n\n",
|
||||
default=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--ignore-leading-numbers-in-filename",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="When searching ignore leading numbers in the filename",
|
||||
)
|
||||
parser.add_setting("remove_archive_after_successful_match", default=False, cmdline=False)
|
||||
|
||||
|
||||
def parse_filter(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
new_filter = []
|
||||
remove = []
|
||||
for x in config[0].Issue_Identifier__publisher_filter:
|
||||
x = x.strip()
|
||||
if x: # ignore empty arguments
|
||||
if x[-1] == "-": # this publisher needs to be removed. We remove after all publishers have been enumerated
|
||||
remove.append(x.strip("-"))
|
||||
else:
|
||||
if x not in new_filter:
|
||||
new_filter.append(x)
|
||||
for x in remove: # remove publishers
|
||||
if x in new_filter:
|
||||
new_filter.remove(x)
|
||||
config[0].Issue_Identifier__publisher_filter = new_filter
|
||||
return config
|
||||
|
||||
|
||||
def migrate_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
original_types = ("cbi", "cr", "comet")
|
||||
save_style = config[0].internal__save_data_style
|
||||
if not isinstance(save_style, list):
|
||||
if isinstance(save_style, int) and save_style in (0, 1, 2):
|
||||
config[0].internal__save_data_style = [original_types[save_style]]
|
||||
elif isinstance(save_style, str):
|
||||
config[0].internal__save_data_style = [save_style]
|
||||
else:
|
||||
config[0].internal__save_data_style = ["cbi"]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def validate_file_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
config = parse_filter(config)
|
||||
|
||||
config = migrate_settings(config)
|
||||
|
||||
if config[0].Filename_Parsing__protofolius_issue_number_scheme:
|
||||
config[0].Filename_Parsing__allow_issue_start_with_letter = True
|
||||
|
||||
config[0].File_Rename__replacements = Replacements(
|
||||
[Replacement(x[0], x[1], x[2]) for x in config[0].File_Rename__replacements[0]],
|
||||
[Replacement(x[0], x[1], x[2]) for x in config[0].File_Rename__replacements[1]],
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
def register_file_settings(parser: settngs.Manager) -> None:
|
||||
parser.add_group("internal", internal, False)
|
||||
parser.add_group("Issue Identifier", identifier, False)
|
||||
parser.add_group("Filename Parsing", filename, False)
|
||||
parser.add_group("Sources", talker, False)
|
||||
parser.add_group("Comic Book Lover", cbl, False)
|
||||
parser.add_group("File Rename", rename, False)
|
||||
parser.add_group("Auto-Tag", autotag, False)
|
||||
parser.add_group("General", general, False)
|
||||
parser.add_group("Dialog Flags", dialog, False)
|
||||
107
comictaggerlib/ctsettings/plugin.py
Normal file
107
comictaggerlib/ctsettings/plugin.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import cast
|
||||
|
||||
import settngs
|
||||
|
||||
import comicapi.comicarchive
|
||||
import comicapi.utils
|
||||
import comictaggerlib.ctsettings
|
||||
from comicapi.comicarchive import Archiver
|
||||
from comictaggerlib.ctsettings.settngs_namespace import settngs_namespace as ct_ns
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
|
||||
|
||||
def group_for_plugin(plugin: Archiver | ComicTalker | type[Archiver]) -> str:
|
||||
if isinstance(plugin, ComicTalker):
|
||||
return f"Source {plugin.id}"
|
||||
if isinstance(plugin, Archiver) or plugin == Archiver:
|
||||
return "Archive"
|
||||
raise NotImplementedError(f"Invalid plugin received: {plugin=}")
|
||||
|
||||
|
||||
def archiver(manager: settngs.Manager) -> None:
|
||||
for archiver in comicapi.comicarchive.archivers:
|
||||
if archiver.exe:
|
||||
# add_setting will overwrite anything with the same name.
|
||||
# So we only end up with one option even if multiple archivers use the same exe.
|
||||
manager.add_setting(
|
||||
f"--{settngs.sanitize_name(archiver.exe)}",
|
||||
default=archiver.exe,
|
||||
help="Path to the %(default)s executable\n\n",
|
||||
)
|
||||
|
||||
|
||||
def register_talker_settings(manager: settngs.Manager, talkers: dict[str, ComicTalker]) -> None:
|
||||
for talker in talkers.values():
|
||||
|
||||
def api_options(manager: settngs.Manager) -> None:
|
||||
# The default needs to be unset or None.
|
||||
# This allows this setting to be unset with the empty string, allowing the default to change
|
||||
manager.add_setting(
|
||||
f"--{talker.id}-key",
|
||||
display_name="API Key",
|
||||
help=f"API Key for {talker.name} (default: {talker.default_api_key})",
|
||||
)
|
||||
manager.add_setting(
|
||||
f"--{talker.id}-url",
|
||||
display_name="URL",
|
||||
help=f"URL for {talker.name} (default: {talker.default_api_url})",
|
||||
)
|
||||
|
||||
try:
|
||||
manager.add_persistent_group(group_for_plugin(talker), api_options, False)
|
||||
if hasattr(talker, "register_settings"):
|
||||
manager.add_persistent_group(group_for_plugin(talker), talker.register_settings, False)
|
||||
except Exception:
|
||||
logger.exception("Failed to register settings for %s", talker.id)
|
||||
|
||||
|
||||
def validate_archive_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
cfg = settngs.normalize_config(config, file=True, cmdline=True, default=False)
|
||||
for archiver in comicapi.comicarchive.archivers:
|
||||
group = group_for_plugin(archiver())
|
||||
exe_name = settngs.sanitize_name(archiver.exe)
|
||||
if not exe_name:
|
||||
continue
|
||||
|
||||
if exe_name in cfg[0][group] and cfg[0][group][exe_name]:
|
||||
path = cfg[0][group][exe_name]
|
||||
name = os.path.basename(path)
|
||||
# If the path is not the basename then this is a relative or absolute path.
|
||||
# Ensure it is absolute
|
||||
if path != name:
|
||||
path = os.path.abspath(path)
|
||||
|
||||
archiver.exe = path
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def validate_talker_settings(config: settngs.Config[ct_ns], talkers: dict[str, ComicTalker]) -> settngs.Config[ct_ns]:
|
||||
# Apply talker settings from config file
|
||||
cfg = settngs.normalize_config(config, True, True)
|
||||
for talker in list(talkers.values()):
|
||||
try:
|
||||
cfg[0][group_for_plugin(talker)] = talker.parse_settings(cfg[0][group_for_plugin(talker)])
|
||||
except Exception as e:
|
||||
# Remove talker as we failed to apply the settings
|
||||
del comictaggerlib.ctsettings.talkers[talker.id]
|
||||
logger.exception("Failed to initialize talker settings: %s", e)
|
||||
|
||||
return cast(settngs.Config[ct_ns], settngs.get_namespace(cfg, file=True, cmdline=True))
|
||||
|
||||
|
||||
def validate_plugin_settings(config: settngs.Config[ct_ns], talkers: dict[str, ComicTalker]) -> settngs.Config[ct_ns]:
|
||||
config = validate_archive_settings(config)
|
||||
config = validate_talker_settings(config, talkers)
|
||||
return config
|
||||
|
||||
|
||||
def register_plugin_settings(manager: settngs.Manager, talkers: dict[str, ComicTalker]) -> None:
|
||||
manager.add_persistent_group("Archive", archiver, False)
|
||||
register_talker_settings(manager, talkers)
|
||||
153
comictaggerlib/ctsettings/plugin_finder.py
Normal file
153
comictaggerlib/ctsettings/plugin_finder.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Functions related to finding and loading plugins."""
|
||||
|
||||
# Lifted from flake8 https://github.com/PyCQA/flake8/blob/main/src/flake8/plugins/finder.py#L127
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import configparser
|
||||
import importlib.metadata
|
||||
import logging
|
||||
import pathlib
|
||||
import re
|
||||
from collections.abc import Generator
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NORMALIZE_PACKAGE_NAME_RE = re.compile(r"[-_.]+")
|
||||
PLUGIN_GROUPS = frozenset(("comictagger.talker", "comicapi.archiver", "comicapi.metadata"))
|
||||
|
||||
|
||||
class FailedToLoadPlugin(Exception):
|
||||
"""Exception raised when a plugin fails to load."""
|
||||
|
||||
FORMAT = 'ComicTagger failed to load local plugin "{name}" due to {exc}.'
|
||||
|
||||
def __init__(self, plugin_name: str, exception: Exception) -> None:
|
||||
"""Initialize our FailedToLoadPlugin exception."""
|
||||
self.plugin_name = plugin_name
|
||||
self.original_exception = exception
|
||||
super().__init__(plugin_name, exception)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Format our exception message."""
|
||||
return self.FORMAT.format(
|
||||
name=self.plugin_name,
|
||||
exc=self.original_exception,
|
||||
)
|
||||
|
||||
|
||||
def normalize_pypi_name(s: str) -> str:
|
||||
"""Normalize a distribution name according to PEP 503."""
|
||||
return NORMALIZE_PACKAGE_NAME_RE.sub("-", s).lower()
|
||||
|
||||
|
||||
class Plugin(NamedTuple):
|
||||
"""A plugin before loading."""
|
||||
|
||||
package: str
|
||||
version: str
|
||||
entry_point: importlib.metadata.EntryPoint
|
||||
path: pathlib.Path
|
||||
|
||||
|
||||
class LoadedPlugin(NamedTuple):
|
||||
"""Represents a plugin after being imported."""
|
||||
|
||||
plugin: Plugin
|
||||
obj: Any
|
||||
|
||||
@property
|
||||
def entry_name(self) -> str:
|
||||
"""Return the name given in the packaging metadata."""
|
||||
return self.plugin.entry_point.name
|
||||
|
||||
@property
|
||||
def display_name(self) -> str:
|
||||
"""Return the name for use in user-facing / error messages."""
|
||||
return f"{self.plugin.package}[{self.entry_name}]"
|
||||
|
||||
|
||||
class Plugins(NamedTuple):
|
||||
"""Classified plugins."""
|
||||
|
||||
archivers: list[Plugin]
|
||||
metadata: list[Plugin]
|
||||
talkers: list[Plugin]
|
||||
|
||||
def all_plugins(self) -> Generator[Plugin, None, None]:
|
||||
"""Return an iterator over all :class:`LoadedPlugin`s."""
|
||||
yield from self.archivers
|
||||
yield from self.metadata
|
||||
yield from self.talkers
|
||||
|
||||
def versions_str(self) -> str:
|
||||
"""Return a user-displayed list of plugin versions."""
|
||||
return ", ".join(sorted({f"{plugin.package}: {plugin.version}" for plugin in self.all_plugins()}))
|
||||
|
||||
|
||||
def _find_local_plugins(plugin_path: pathlib.Path) -> Generator[Plugin, None, None]:
|
||||
|
||||
cfg = configparser.ConfigParser(interpolation=None)
|
||||
cfg.read(plugin_path / "setup.cfg")
|
||||
|
||||
for group in PLUGIN_GROUPS:
|
||||
for plugin_s in cfg.get("options.entry_points", group, fallback="").splitlines():
|
||||
if not plugin_s:
|
||||
continue
|
||||
|
||||
name, _, entry_str = plugin_s.partition("=")
|
||||
name, entry_str = name.strip(), entry_str.strip()
|
||||
ep = importlib.metadata.EntryPoint(name, entry_str, group)
|
||||
yield Plugin(plugin_path.name, cfg.get("metadata", "version", fallback="0.0.1"), ep, plugin_path)
|
||||
|
||||
|
||||
def _check_required_plugins(plugins: list[Plugin], expected: frozenset[str]) -> None:
|
||||
plugin_names = {normalize_pypi_name(plugin.package) for plugin in plugins}
|
||||
expected_names = {normalize_pypi_name(name) for name in expected}
|
||||
missing_plugins = expected_names - plugin_names
|
||||
|
||||
if missing_plugins:
|
||||
raise Exception(
|
||||
"required plugins were not installed!\n"
|
||||
+ f"- installed: {', '.join(sorted(plugin_names))}\n"
|
||||
+ f"- expected: {', '.join(sorted(expected_names))}\n"
|
||||
+ f"- missing: {', '.join(sorted(missing_plugins))}"
|
||||
)
|
||||
|
||||
|
||||
def find_plugins(plugin_folder: pathlib.Path) -> Plugins:
|
||||
"""Discovers all plugins (but does not load them)."""
|
||||
ret: list[Plugin] = []
|
||||
for plugin_path in plugin_folder.glob("*/setup.cfg"):
|
||||
try:
|
||||
ret.extend(_find_local_plugins(plugin_path.parent))
|
||||
except Exception as err:
|
||||
FailedToLoadPlugin(plugin_path.parent.name, err)
|
||||
|
||||
# for determinism, sort the list
|
||||
ret.sort()
|
||||
|
||||
return _classify_plugins(ret)
|
||||
|
||||
|
||||
def _classify_plugins(plugins: list[Plugin]) -> Plugins:
|
||||
archivers = []
|
||||
metadata = []
|
||||
talkers = []
|
||||
|
||||
for p in plugins:
|
||||
if p.entry_point.group == "comictagger.talker":
|
||||
talkers.append(p)
|
||||
elif p.entry_point.group == "comicapi.metadata":
|
||||
metadata.append(p)
|
||||
elif p.entry_point.group == "comicapi.archiver":
|
||||
archivers.append(p)
|
||||
else:
|
||||
logger.warning(NotImplementedError(f"what plugin type? {p}"))
|
||||
|
||||
return Plugins(
|
||||
metadata=metadata,
|
||||
archivers=archivers,
|
||||
talkers=talkers,
|
||||
)
|
||||
110
comictaggerlib/ctsettings/settngs_namespace.py
Normal file
110
comictaggerlib/ctsettings/settngs_namespace.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import settngs
|
||||
|
||||
import comicapi.genericmetadata
|
||||
import comictaggerlib.ctsettings.types
|
||||
import comictaggerlib.defaults
|
||||
import comictaggerlib.resulttypes
|
||||
|
||||
|
||||
class settngs_namespace(settngs.TypedNS):
|
||||
Commands__version: bool
|
||||
Commands__command: comictaggerlib.resulttypes.Action
|
||||
Commands__copy: str
|
||||
|
||||
Runtime_Options__config: comictaggerlib.ctsettings.types.ComicTaggerPaths
|
||||
Runtime_Options__verbose: int
|
||||
Runtime_Options__abort_on_conflict: bool
|
||||
Runtime_Options__delete_original: bool
|
||||
Runtime_Options__parse_filename: bool
|
||||
Runtime_Options__issue_id: str
|
||||
Runtime_Options__online: bool
|
||||
Runtime_Options__metadata: comicapi.genericmetadata.GenericMetadata
|
||||
Runtime_Options__interactive: bool
|
||||
Runtime_Options__abort_on_low_confidence: bool
|
||||
Runtime_Options__summary: bool
|
||||
Runtime_Options__raw: bool
|
||||
Runtime_Options__recursive: bool
|
||||
Runtime_Options__dryrun: bool
|
||||
Runtime_Options__darkmode: bool
|
||||
Runtime_Options__glob: bool
|
||||
Runtime_Options__quiet: bool
|
||||
Runtime_Options__json: bool
|
||||
Runtime_Options__type: list[str]
|
||||
Runtime_Options__overwrite: bool
|
||||
Runtime_Options__no_gui: bool
|
||||
Runtime_Options__files: list[str]
|
||||
|
||||
internal__install_id: str
|
||||
internal__save_data_style: list[str]
|
||||
internal__load_data_style: str
|
||||
internal__last_opened_folder: str
|
||||
internal__window_width: int
|
||||
internal__window_height: int
|
||||
internal__window_x: int
|
||||
internal__window_y: int
|
||||
internal__form_width: int
|
||||
internal__list_width: int
|
||||
internal__sort_column: int
|
||||
internal__sort_direction: int
|
||||
|
||||
Issue_Identifier__series_match_identify_thresh: int
|
||||
Issue_Identifier__border_crop_percent: int
|
||||
Issue_Identifier__publisher_filter: list[str]
|
||||
Issue_Identifier__series_match_search_thresh: int
|
||||
Issue_Identifier__clear_metadata: bool
|
||||
Issue_Identifier__auto_imprint: bool
|
||||
Issue_Identifier__sort_series_by_year: bool
|
||||
Issue_Identifier__exact_series_matches_first: bool
|
||||
Issue_Identifier__always_use_publisher_filter: bool
|
||||
|
||||
Filename_Parsing__complicated_parser: bool
|
||||
Filename_Parsing__remove_c2c: bool
|
||||
Filename_Parsing__remove_fcbd: bool
|
||||
Filename_Parsing__remove_publisher: bool
|
||||
Filename_Parsing__split_words: bool
|
||||
Filename_Parsing__protofolius_issue_number_scheme: bool
|
||||
Filename_Parsing__allow_issue_start_with_letter: bool
|
||||
|
||||
Sources__source: str
|
||||
Sources__remove_html_tables: bool
|
||||
|
||||
Comic_Book_Lover__assume_lone_credit_is_primary: bool
|
||||
Comic_Book_Lover__copy_characters_to_tags: bool
|
||||
Comic_Book_Lover__copy_teams_to_tags: bool
|
||||
Comic_Book_Lover__copy_locations_to_tags: bool
|
||||
Comic_Book_Lover__copy_storyarcs_to_tags: bool
|
||||
Comic_Book_Lover__copy_notes_to_comments: bool
|
||||
Comic_Book_Lover__copy_weblink_to_comments: bool
|
||||
Comic_Book_Lover__apply_transform_on_import: bool
|
||||
Comic_Book_Lover__apply_transform_on_bulk_operation: bool
|
||||
|
||||
File_Rename__template: str
|
||||
File_Rename__issue_number_padding: int
|
||||
File_Rename__use_smart_string_cleanup: bool
|
||||
File_Rename__auto_extension: bool
|
||||
File_Rename__dir: str
|
||||
File_Rename__move_to_dir: bool
|
||||
File_Rename__strict: bool
|
||||
File_Rename__replacements: comictaggerlib.defaults.Replacements
|
||||
|
||||
Auto_Tag__save_on_low_confidence: bool
|
||||
Auto_Tag__dont_use_year_when_identifying: bool
|
||||
Auto_Tag__assume_issue_one: bool
|
||||
Auto_Tag__ignore_leading_numbers_in_filename: bool
|
||||
Auto_Tag__remove_archive_after_successful_match: bool
|
||||
|
||||
General__check_for_new_version: bool
|
||||
General__disable_cr: bool
|
||||
General__use_short_metadata_names: bool
|
||||
|
||||
Dialog_Flags__show_disclaimer: bool
|
||||
Dialog_Flags__dont_notify_about_this_version: str
|
||||
Dialog_Flags__ask_about_usage_stats: bool
|
||||
|
||||
Archive__rar: str
|
||||
|
||||
Source_comicvine__comicvine_key: str
|
||||
Source_comicvine__comicvine_url: str
|
||||
Source_comicvine__cv_use_series_start_as_volume: bool
|
||||
133
comictaggerlib/ctsettings/types.py
Normal file
133
comictaggerlib/ctsettings/types.py
Normal file
@@ -0,0 +1,133 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import pathlib
|
||||
|
||||
from appdirs import AppDirs
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import metadata_styles
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
|
||||
|
||||
class ComicTaggerPaths(AppDirs):
|
||||
def __init__(self, config_path: pathlib.Path | str | None = None) -> None:
|
||||
super().__init__("ComicTagger", None, None, False, False)
|
||||
self.path: pathlib.Path | None = None
|
||||
if config_path:
|
||||
self.path = pathlib.Path(config_path).absolute()
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_data_dir)
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_config_dir)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
path = self.path / "cache"
|
||||
return path
|
||||
return pathlib.Path(super().user_cache_dir)
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path
|
||||
return pathlib.Path(super().user_state_dir)
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
path = self.path / "log"
|
||||
return path
|
||||
return pathlib.Path(super().user_log_dir)
|
||||
|
||||
@property
|
||||
def user_plugin_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
path = self.path / "plugins"
|
||||
return path
|
||||
return pathlib.Path(super().user_config_dir)
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> pathlib.Path:
|
||||
return pathlib.Path(super().site_data_dir)
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> pathlib.Path:
|
||||
return pathlib.Path(super().site_config_dir)
|
||||
|
||||
|
||||
def metadata_type_single(types: str) -> str:
|
||||
result = metadata_type(types)
|
||||
if len(result) > 1:
|
||||
raise argparse.ArgumentTypeError(f"invalid choice: {result} (only one metadata style allowed)")
|
||||
return result[0]
|
||||
|
||||
|
||||
def metadata_type(types: str) -> list[str]:
|
||||
result = []
|
||||
types = types.casefold()
|
||||
for typ in utils.split(types, ","):
|
||||
if typ not in metadata_styles:
|
||||
choices = ", ".join(metadata_styles)
|
||||
raise argparse.ArgumentTypeError(f"invalid choice: {typ} (choose from {choices.upper()})")
|
||||
result.append(metadata_styles[typ].short_name)
|
||||
return result
|
||||
|
||||
|
||||
def parse_metadata_from_string(mdstr: str) -> GenericMetadata:
|
||||
"""The metadata string is a comma separated list of name-value pairs
|
||||
The names match the attributes of the internal metadata struct (for now)
|
||||
The caret is the special "escape character", since it's not common in
|
||||
natural language text
|
||||
|
||||
example = "series=Kickers^, Inc. ,issue=1, year=1986"
|
||||
"""
|
||||
|
||||
escaped_comma = "^,"
|
||||
escaped_equals = "^="
|
||||
replacement_token = "<_~_>"
|
||||
|
||||
md = GenericMetadata()
|
||||
|
||||
# First, replace escaped commas with with a unique token (to be changed back later)
|
||||
mdstr = mdstr.replace(escaped_comma, replacement_token)
|
||||
tmp_list = utils.split(mdstr, ",")
|
||||
md_list = []
|
||||
for item in tmp_list:
|
||||
item = item.replace(replacement_token, ",")
|
||||
md_list.append(item)
|
||||
|
||||
# Now build a nice dict from the list
|
||||
md_dict = {}
|
||||
for item in md_list:
|
||||
# Make sure to fix any escaped equal signs
|
||||
i = item.replace(escaped_equals, replacement_token)
|
||||
key, _, value = i.partition("=")
|
||||
value = value.replace(replacement_token, "=").strip()
|
||||
key = key.strip()
|
||||
if key.casefold() == "credit":
|
||||
cred_attribs = utils.split(value, ":")
|
||||
role = cred_attribs[0]
|
||||
person = cred_attribs[1] if len(cred_attribs) > 1 else ""
|
||||
primary = len(cred_attribs) > 2
|
||||
md.add_credit(person.strip(), role.strip(), primary)
|
||||
else:
|
||||
md_dict[key] = value
|
||||
|
||||
# Map the dict to the metadata object
|
||||
for key, value in md_dict.items():
|
||||
if not hasattr(md, key):
|
||||
raise argparse.ArgumentTypeError(f"'{key}' is not a valid tag name")
|
||||
else:
|
||||
md.is_empty = False
|
||||
setattr(md, key, value)
|
||||
return md
|
||||
@@ -1,3 +0,0 @@
|
||||
# This file should contain only these comments, and the line below.
|
||||
# Used by packaging makefiles and app
|
||||
version = "1.2.0+1"
|
||||
29
comictaggerlib/defaults.py
Normal file
29
comictaggerlib/defaults.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class Replacement(NamedTuple):
|
||||
find: str
|
||||
replce: str
|
||||
strict_only: bool
|
||||
|
||||
|
||||
class Replacements(NamedTuple):
|
||||
literal_text: list[Replacement]
|
||||
format_value: list[Replacement]
|
||||
|
||||
|
||||
DEFAULT_REPLACEMENTS = Replacements(
|
||||
literal_text=[
|
||||
Replacement(": ", " - ", True),
|
||||
Replacement(":", "-", True),
|
||||
],
|
||||
format_value=[
|
||||
Replacement(": ", " - ", True),
|
||||
Replacement(":", "-", True),
|
||||
Replacement("/", "-", False),
|
||||
Replacement("//", "--", False),
|
||||
Replacement("\\", "-", True),
|
||||
],
|
||||
)
|
||||
@@ -1,27 +1,28 @@
|
||||
"""A PyQT4 dialog to confirm and set options for export to zip"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import os
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
#from settingswindow import SettingsWindow
|
||||
#from filerenamer import FileRenamer
|
||||
#import utils
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExportConflictOpts:
|
||||
@@ -31,27 +32,26 @@ class ExportConflictOpts:
|
||||
|
||||
|
||||
class ExportWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, msg: str) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def __init__(self, parent, settings, msg):
|
||||
super(ExportWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('exportwindow.ui'), self)
|
||||
with (ui_path / "exportwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
self.label.setText(msg)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() &
|
||||
~QtCore.Qt.WindowContextHelpButtonHint)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(self.windowFlags() & ~QtCore.Qt.WindowType.WindowContextHelpButtonHint)
|
||||
)
|
||||
|
||||
self.settings = settings
|
||||
|
||||
self.cbxDeleteOriginal.setCheckState(QtCore.Qt.Unchecked)
|
||||
self.cbxAddToList.setCheckState(QtCore.Qt.Checked)
|
||||
self.cbxDeleteOriginal.setChecked(False)
|
||||
self.cbxAddToList.setChecked(True)
|
||||
self.radioDontCreate.setChecked(True)
|
||||
|
||||
self.deleteOriginal = False
|
||||
self.addToList = True
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
|
||||
def accept(self):
|
||||
def accept(self) -> None:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
self.deleteOriginal = self.cbxDeleteOriginal.isChecked()
|
||||
@@ -60,5 +60,3 @@ class ExportWindow(QtWidgets.QDialog):
|
||||
self.fileConflictBehavior = ExportConflictOpts.dontCreate
|
||||
elif self.radioCreateNew.isChecked():
|
||||
self.fileConflictBehavior = ExportConflictOpts.createUnique
|
||||
# else:
|
||||
# self.fileConflictBehavior = ExportConflictOpts.overwrite
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.filenameparser import *
|
||||
@@ -1,156 +1,244 @@
|
||||
"""Functions for renaming files based on metadata"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import calendar
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import datetime
|
||||
import pathlib
|
||||
import string
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from . import utils
|
||||
from .issuestring import IssueString
|
||||
from pathvalidate import Platform, normalize_platform, sanitize_filename
|
||||
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.issuestring import IssueString
|
||||
from comictaggerlib.defaults import DEFAULT_REPLACEMENTS, Replacement, Replacements
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_rename_dir(ca: ComicArchive, rename_dir: str | pathlib.Path | None) -> pathlib.Path:
|
||||
folder = ca.path.parent.absolute()
|
||||
if rename_dir is not None:
|
||||
if isinstance(rename_dir, str):
|
||||
rename_dir = rename_dir.strip()
|
||||
folder = pathlib.Path(rename_dir).absolute()
|
||||
return folder
|
||||
|
||||
|
||||
class MetadataFormatter(string.Formatter):
|
||||
def __init__(
|
||||
self, smart_cleanup: bool = False, platform: str = "auto", replacements: Replacements = DEFAULT_REPLACEMENTS
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.smart_cleanup = smart_cleanup
|
||||
self.platform = normalize_platform(platform)
|
||||
self.replacements = replacements
|
||||
|
||||
def format_field(self, value: Any, format_spec: str) -> str:
|
||||
if value is None or value == "":
|
||||
return ""
|
||||
return cast(str, super().format_field(value, format_spec))
|
||||
|
||||
def convert_field(self, value: Any, conversion: str) -> str:
|
||||
if conversion == "u":
|
||||
return str(value).upper()
|
||||
if conversion == "l":
|
||||
return str(value).casefold()
|
||||
if conversion == "c":
|
||||
return str(value).capitalize()
|
||||
if conversion == "S":
|
||||
return str(value).swapcase()
|
||||
if conversion == "t":
|
||||
return str(value).title()
|
||||
if conversion == "j":
|
||||
return ", ".join(list(value))
|
||||
return cast(str, super().convert_field(value, conversion))
|
||||
|
||||
def handle_replacements(self, string: str, replacements: list[Replacement]) -> str:
|
||||
for find, replace, strict_only in replacements:
|
||||
if self.is_strict() or not strict_only:
|
||||
string = string.replace(find, replace)
|
||||
return string
|
||||
|
||||
def none_replacement(self, value: Any, replacement: str, r: str) -> Any:
|
||||
if r == "-" and value is None or value == "":
|
||||
return replacement
|
||||
if r == "+" and value is not None:
|
||||
return replacement
|
||||
return value
|
||||
|
||||
def split_replacement(self, field_name: str) -> tuple[str, str, str]:
|
||||
if "-" in field_name:
|
||||
return field_name.rpartition("-")
|
||||
if "+" in field_name:
|
||||
return field_name.rpartition("+")
|
||||
return field_name, "", ""
|
||||
|
||||
def is_strict(self) -> bool:
|
||||
return self.platform in [Platform.UNIVERSAL, Platform.WINDOWS]
|
||||
|
||||
def _vformat(
|
||||
self,
|
||||
format_string: str,
|
||||
args: Sequence[Any],
|
||||
kwargs: Mapping[str, Any],
|
||||
used_args: set[Any],
|
||||
recursion_depth: int,
|
||||
auto_arg_index: int = 0,
|
||||
) -> tuple[str, int]:
|
||||
if recursion_depth < 0:
|
||||
raise ValueError("Max string recursion exceeded")
|
||||
result = []
|
||||
lstrip = False
|
||||
for literal_text, field_name, format_spec, conversion in self.parse(format_string):
|
||||
# output the literal text
|
||||
if literal_text:
|
||||
if lstrip:
|
||||
literal_text = literal_text.lstrip("-_)}]#")
|
||||
if self.smart_cleanup:
|
||||
literal_text = self.handle_replacements(literal_text, self.replacements.literal_text)
|
||||
lspace = literal_text[0].isspace() if literal_text else False
|
||||
rspace = literal_text[-1].isspace() if literal_text else False
|
||||
literal_text = " ".join(literal_text.split())
|
||||
if literal_text == "":
|
||||
literal_text = " "
|
||||
else:
|
||||
if lspace:
|
||||
literal_text = " " + literal_text
|
||||
if rspace:
|
||||
literal_text += " "
|
||||
result.append(literal_text)
|
||||
|
||||
lstrip = False
|
||||
# if there's a field, output it
|
||||
if field_name is not None and field_name != "":
|
||||
field_name, r, replacement = self.split_replacement(field_name)
|
||||
field_name = field_name.casefold()
|
||||
# this is some markup, find the object and do the formatting
|
||||
|
||||
# handle arg indexing when digit field_names are given.
|
||||
if field_name.isdigit():
|
||||
raise ValueError("cannot use a number as a field name")
|
||||
|
||||
# given the field_name, find the object it references
|
||||
# and the argument it came from
|
||||
obj, arg_used = self.get_field(field_name, args, kwargs)
|
||||
used_args.add(arg_used)
|
||||
|
||||
obj = self.none_replacement(obj, replacement, r)
|
||||
|
||||
# do any conversion on the resulting object
|
||||
obj = self.convert_field(obj, conversion) # type: ignore
|
||||
|
||||
# expand the format spec, if needed
|
||||
format_spec, _ = self._vformat(
|
||||
cast(str, format_spec), args, kwargs, used_args, recursion_depth - 1, auto_arg_index=False
|
||||
)
|
||||
|
||||
# format the object and append to the result
|
||||
fmt_obj = self.format_field(obj, format_spec)
|
||||
if fmt_obj == "" and result and self.smart_cleanup and literal_text:
|
||||
if self.str_contains(result[-1], "({["):
|
||||
lstrip = True
|
||||
if result:
|
||||
if " " in result[-1]:
|
||||
result[-1], _, _ = result[-1].rstrip().rpartition(" ")
|
||||
result[-1] = result[-1].rstrip("-_({[#")
|
||||
if self.smart_cleanup:
|
||||
# colons and slashes get special treatment
|
||||
fmt_obj = self.handle_replacements(fmt_obj, self.replacements.format_value)
|
||||
fmt_obj = " ".join(fmt_obj.split())
|
||||
fmt_obj = str(sanitize_filename(fmt_obj, platform=self.platform))
|
||||
result.append(fmt_obj)
|
||||
|
||||
return "".join(result), False
|
||||
|
||||
def str_contains(self, chars: str, string: str) -> bool:
|
||||
for char in chars:
|
||||
if char in string:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FileRenamer:
|
||||
|
||||
def __init__(self, metadata):
|
||||
self.setMetadata(metadata)
|
||||
self.setTemplate(
|
||||
"%series% v%volume% #%issue% (of %issuecount%) (%year%)")
|
||||
def __init__(
|
||||
self,
|
||||
metadata: GenericMetadata | None,
|
||||
platform: str = "auto",
|
||||
replacements: Replacements = DEFAULT_REPLACEMENTS,
|
||||
) -> None:
|
||||
self.template = "{publisher}/{series}/{series} v{volume} #{issue} (of {issue_count}) ({year})"
|
||||
self.smart_cleanup = True
|
||||
self.issue_zero_padding = 3
|
||||
self.metadata = metadata or GenericMetadata()
|
||||
self.move = False
|
||||
self.platform = platform
|
||||
self.replacements = replacements
|
||||
|
||||
def setMetadata(self, metadata):
|
||||
self.metdata = metadata
|
||||
def set_metadata(self, metadata: GenericMetadata) -> None:
|
||||
self.metadata = metadata
|
||||
|
||||
def setIssueZeroPadding(self, count):
|
||||
def set_issue_zero_padding(self, count: int) -> None:
|
||||
self.issue_zero_padding = count
|
||||
|
||||
def setSmartCleanup(self, on):
|
||||
def set_smart_cleanup(self, on: bool) -> None:
|
||||
self.smart_cleanup = on
|
||||
|
||||
def setTemplate(self, template):
|
||||
def set_template(self, template: str) -> None:
|
||||
self.template = template
|
||||
|
||||
def replaceToken(self, text, value, token):
|
||||
# helper func
|
||||
def isToken(word):
|
||||
return (word[0] == "%" and word[-1:] == "%")
|
||||
def determine_name(self, ext: str) -> str:
|
||||
class Default(dict[str, Any]):
|
||||
def __missing__(self, key: str) -> str:
|
||||
return "{" + key + "}"
|
||||
|
||||
if value is not None:
|
||||
return text.replace(token, str(value))
|
||||
md = self.metadata
|
||||
|
||||
template = self.template
|
||||
|
||||
new_name = ""
|
||||
|
||||
fmt = MetadataFormatter(self.smart_cleanup, platform=self.platform, replacements=self.replacements)
|
||||
md_dict = vars(md)
|
||||
md_dict["issue"] = IssueString(md.issue).as_string(pad=self.issue_zero_padding)
|
||||
for role in ["writer", "penciller", "inker", "colorist", "letterer", "cover artist", "editor"]:
|
||||
md_dict[role] = md.get_primary_credit(role)
|
||||
|
||||
if (isinstance(md.month, int) or isinstance(md.month, str) and md.month.isdigit()) and 0 < int(md.month) < 13:
|
||||
md_dict["month_name"] = calendar.month_name[int(md.month)]
|
||||
md_dict["month_abbr"] = calendar.month_abbr[int(md.month)]
|
||||
else:
|
||||
if self.smart_cleanup:
|
||||
# smart cleanup means we want to remove anything appended to token if it's empty
|
||||
# (e.g "#%issue%" or "v%volume%")
|
||||
# (TODO: This could fail if there is more than one token appended together, I guess)
|
||||
text_list = text.split()
|
||||
md_dict["month_name"] = ""
|
||||
md_dict["month_abbr"] = ""
|
||||
|
||||
# special case for issuecount, remove preceding non-token word,
|
||||
# as in "...(of %issuecount%)..."
|
||||
if token == '%issuecount%':
|
||||
for idx, word in enumerate(text_list):
|
||||
if token in word and not isToken(text_list[idx - 1]):
|
||||
text_list[idx - 1] = ""
|
||||
|
||||
text_list = [x for x in text_list if token not in x]
|
||||
return " ".join(text_list)
|
||||
else:
|
||||
return text.replace(token, "")
|
||||
|
||||
def determineName(self, filename, ext=None):
|
||||
|
||||
md = self.metdata
|
||||
new_name = self.template
|
||||
preferred_encoding = utils.get_actual_preferred_encoding()
|
||||
|
||||
# print(u"{0}".format(md))
|
||||
|
||||
new_name = self.replaceToken(new_name, md.series, '%series%')
|
||||
new_name = self.replaceToken(new_name, md.volume, '%volume%')
|
||||
|
||||
if md.issue is not None:
|
||||
issue_str = "{0}".format(
|
||||
IssueString(md.issue).asString(pad=self.issue_zero_padding))
|
||||
else:
|
||||
issue_str = None
|
||||
new_name = self.replaceToken(new_name, issue_str, '%issue%')
|
||||
|
||||
new_name = self.replaceToken(new_name, md.issueCount, '%issuecount%')
|
||||
new_name = self.replaceToken(new_name, md.year, '%year%')
|
||||
new_name = self.replaceToken(new_name, md.publisher, '%publisher%')
|
||||
new_name = self.replaceToken(new_name, md.title, '%title%')
|
||||
new_name = self.replaceToken(new_name, md.month, '%month%')
|
||||
month_name = None
|
||||
if md.month is not None:
|
||||
if (isinstance(md.month, str) and md.month.isdigit()) or isinstance(
|
||||
md.month, int):
|
||||
if int(md.month) in range(1, 13):
|
||||
dt = datetime.datetime(1970, int(md.month), 1, 0, 0)
|
||||
#month_name = dt.strftime("%B".encode(preferred_encoding)).decode(preferred_encoding)
|
||||
month_name = dt.strftime("%B")
|
||||
new_name = self.replaceToken(new_name, month_name, '%month_name%')
|
||||
|
||||
new_name = self.replaceToken(new_name, md.genre, '%genre%')
|
||||
new_name = self.replaceToken(new_name, md.language, '%language_code%')
|
||||
new_name = self.replaceToken(
|
||||
new_name, md.criticalRating, '%criticalrating%')
|
||||
new_name = self.replaceToken(
|
||||
new_name, md.alternateSeries, '%alternateseries%')
|
||||
new_name = self.replaceToken(
|
||||
new_name, md.alternateNumber, '%alternatenumber%')
|
||||
new_name = self.replaceToken(
|
||||
new_name, md.alternateCount, '%alternatecount%')
|
||||
new_name = self.replaceToken(new_name, md.imprint, '%imprint%')
|
||||
new_name = self.replaceToken(new_name, md.format, '%format%')
|
||||
new_name = self.replaceToken(
|
||||
new_name, md.maturityRating, '%maturityrating%')
|
||||
new_name = self.replaceToken(new_name, md.storyArc, '%storyarc%')
|
||||
new_name = self.replaceToken(new_name, md.seriesGroup, '%seriesgroup%')
|
||||
new_name = self.replaceToken(new_name, md.scanInfo, '%scaninfo%')
|
||||
|
||||
if self.smart_cleanup:
|
||||
|
||||
# remove empty braces,brackets, parentheses
|
||||
new_name = re.sub("\(\s*[-:]*\s*\)", "", new_name)
|
||||
new_name = re.sub("\[\s*[-:]*\s*\]", "", new_name)
|
||||
new_name = re.sub("\{\s*[-:]*\s*\}", "", new_name)
|
||||
|
||||
# remove duplicate spaces
|
||||
new_name = " ".join(new_name.split())
|
||||
|
||||
# remove remove duplicate -, _,
|
||||
new_name = re.sub("[-_]{2,}\s+", "-- ", new_name)
|
||||
new_name = re.sub("(\s--)+", " --", new_name)
|
||||
new_name = re.sub("(\s-)+", " -", new_name)
|
||||
|
||||
# remove dash or double dash at end of line
|
||||
new_name = re.sub("[-]{1,2}\s*$", "", new_name)
|
||||
|
||||
# remove duplicate spaces (again!)
|
||||
new_name = " ".join(new_name.split())
|
||||
|
||||
if ext is None:
|
||||
ext = os.path.splitext(filename)[1]
|
||||
new_basename = ""
|
||||
for component in pathlib.PureWindowsPath(template).parts:
|
||||
new_basename = str(
|
||||
sanitize_filename(fmt.vformat(component, args=[], kwargs=Default(md_dict)), platform=self.platform)
|
||||
).strip()
|
||||
new_name = os.path.join(new_name, new_basename)
|
||||
|
||||
new_name += ext
|
||||
new_basename += ext
|
||||
|
||||
# some tweaks to keep various filesystems happy
|
||||
new_name = new_name.replace("/", "-")
|
||||
new_name = new_name.replace(" :", " -")
|
||||
new_name = new_name.replace(": ", " - ")
|
||||
new_name = new_name.replace(":", "-")
|
||||
new_name = new_name.replace("?", "")
|
||||
|
||||
return new_name
|
||||
if self.move:
|
||||
return new_name.strip()
|
||||
return new_basename.strip()
|
||||
|
||||
@@ -1,154 +1,140 @@
|
||||
# coding=utf-8
|
||||
"""A PyQt5 widget for managing list of comic archive files"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import platform
|
||||
import logging
|
||||
import os
|
||||
#import os
|
||||
import sys
|
||||
import platform
|
||||
from typing import Callable, cast
|
||||
|
||||
from PyQt5.QtCore import *
|
||||
from PyQt5.QtGui import *
|
||||
from PyQt5.QtWidgets import *
|
||||
from PyQt5 import uic
|
||||
from PyQt5.QtCore import pyqtSignal
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from .comicarchive import ComicArchive
|
||||
from .optionalmsgdialog import OptionalMessageDialog
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize, centerWindowOnParent
|
||||
from . import utils
|
||||
#from comicarchive import MetaDataStyle
|
||||
#from genericmetadata import GenericMetadata, PageType
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictaggerlib.optionalmsgdialog import OptionalMessageDialog
|
||||
from comictaggerlib.settingswindow import linuxRarHelp, macRarHelp, windowsRarHelp
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import center_window_on_parent, reduce_widget_font_size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileTableWidgetItem(QTableWidgetItem):
|
||||
|
||||
def __lt__(self, other):
|
||||
#return (self.data(Qt.UserRole).toBool() <
|
||||
# other.data(Qt.UserRole).toBool())
|
||||
return (self.data(Qt.UserRole) <
|
||||
other.data(Qt.UserRole))
|
||||
|
||||
|
||||
class FileInfo():
|
||||
|
||||
def __init__(self, ca):
|
||||
self.ca = ca
|
||||
|
||||
|
||||
class FileSelectionList(QWidget):
|
||||
|
||||
selectionChanged = pyqtSignal(QVariant)
|
||||
listCleared = pyqtSignal()
|
||||
class FileSelectionList(QtWidgets.QWidget):
|
||||
selectionChanged = QtCore.pyqtSignal(QtCore.QVariant)
|
||||
listCleared = QtCore.pyqtSignal()
|
||||
|
||||
fileColNum = 0
|
||||
CRFlagColNum = 1
|
||||
CBLFlagColNum = 2
|
||||
typeColNum = 3
|
||||
readonlyColNum = 4
|
||||
folderColNum = 5
|
||||
MDFlagColNum = 1
|
||||
typeColNum = 2
|
||||
readonlyColNum = 3
|
||||
folderColNum = 4
|
||||
dataColNum = fileColNum
|
||||
|
||||
def __init__(self, parent, settings):
|
||||
super(FileSelectionList, self).__init__(parent)
|
||||
def __init__(
|
||||
self, parent: QtWidgets.QWidget, config: ct_ns, dirty_flag_verification: Callable[[str, str], bool]
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('fileselectionlist.ui'), self)
|
||||
with (ui_path / "fileselectionlist.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.settings = settings
|
||||
self.config = config
|
||||
|
||||
reduceWidgetFontSize(self.twList)
|
||||
reduce_widget_font_size(self.twList)
|
||||
|
||||
self.twList.setColumnCount(6)
|
||||
#self.twlist.setHorizontalHeaderLabels (["File", "Folder", "CR", "CBL", ""])
|
||||
# self.twList.horizontalHeader().setStretchLastSection(True)
|
||||
self.twList.currentItemChanged.connect(self.currentItemChangedCB)
|
||||
self.twList.horizontalHeader().setMinimumSectionSize(50)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
|
||||
self.currentItem = None
|
||||
self.setContextMenuPolicy(Qt.ActionsContextMenu)
|
||||
self.modifiedFlag = False
|
||||
self.setContextMenuPolicy(QtCore.Qt.ContextMenuPolicy.ActionsContextMenu)
|
||||
self.dirty_flag = False
|
||||
|
||||
selectAllAction = QAction("Select All", self)
|
||||
removeAction = QAction("Remove Selected Items", self)
|
||||
self.separator = QAction("", self)
|
||||
select_all_action = QtWidgets.QAction("Select All", self)
|
||||
remove_action = QtWidgets.QAction("Remove Selected Items", self)
|
||||
self.separator = QtWidgets.QAction("", self)
|
||||
self.separator.setSeparator(True)
|
||||
|
||||
selectAllAction.setShortcut('Ctrl+A')
|
||||
removeAction.setShortcut('Ctrl+X')
|
||||
select_all_action.setShortcut("Ctrl+A")
|
||||
remove_action.setShortcut("Ctrl+X")
|
||||
|
||||
selectAllAction.triggered.connect(self.selectAll)
|
||||
removeAction.triggered.connect(self.removeSelection)
|
||||
select_all_action.triggered.connect(self.select_all)
|
||||
remove_action.triggered.connect(self.remove_selection)
|
||||
|
||||
self.addAction(selectAllAction)
|
||||
self.addAction(removeAction)
|
||||
self.addAction(select_all_action)
|
||||
self.addAction(remove_action)
|
||||
self.addAction(self.separator)
|
||||
|
||||
def getSorting(self):
|
||||
self.dirty_flag_verification = dirty_flag_verification
|
||||
self.rar_ro_shown = False
|
||||
|
||||
def get_sorting(self) -> tuple[int, int]:
|
||||
col = self.twList.horizontalHeader().sortIndicatorSection()
|
||||
order = self.twList.horizontalHeader().sortIndicatorOrder()
|
||||
return col, order
|
||||
return int(col), int(order)
|
||||
|
||||
def setSorting(self, col, order):
|
||||
col = self.twList.horizontalHeader().setSortIndicator(col, order)
|
||||
def set_sorting(self, col: int, order: QtCore.Qt.SortOrder) -> None:
|
||||
self.twList.horizontalHeader().setSortIndicator(col, order)
|
||||
|
||||
def addAppAction(self, action):
|
||||
self.insertAction(None, action)
|
||||
def add_app_action(self, action: QtWidgets.QAction) -> None:
|
||||
self.insertAction(QtWidgets.QAction(), action)
|
||||
|
||||
def setModifiedFlag(self, modified):
|
||||
self.modifiedFlag = modified
|
||||
def set_modified_flag(self, modified: bool) -> None:
|
||||
self.dirty_flag = modified
|
||||
|
||||
def selectAll(self):
|
||||
self.twList.setRangeSelected(
|
||||
QTableWidgetSelectionRange(
|
||||
0,
|
||||
0,
|
||||
self.twList.rowCount() -
|
||||
1,
|
||||
5),
|
||||
True)
|
||||
def select_all(self) -> None:
|
||||
self.twList.setRangeSelected(QtWidgets.QTableWidgetSelectionRange(0, 0, self.twList.rowCount() - 1, 5), True)
|
||||
|
||||
def deselectAll(self):
|
||||
self.twList.setRangeSelected(
|
||||
QTableWidgetSelectionRange(
|
||||
0,
|
||||
0,
|
||||
self.twList.rowCount() -
|
||||
1,
|
||||
5),
|
||||
False)
|
||||
def deselect_all(self) -> None:
|
||||
self.twList.setRangeSelected(QtWidgets.QTableWidgetSelectionRange(0, 0, self.twList.rowCount() - 1, 5), False)
|
||||
|
||||
def removeArchiveList(self, ca_list):
|
||||
def remove_archive_list(self, ca_list: list[ComicArchive]) -> None:
|
||||
self.twList.setSortingEnabled(False)
|
||||
current_removed = False
|
||||
for ca in ca_list:
|
||||
for row in range(self.twList.rowCount()):
|
||||
row_ca = self.getArchiveByRow(row)
|
||||
row_ca = self.get_archive_by_row(row)
|
||||
if row_ca == ca:
|
||||
if row == self.twList.currentRow():
|
||||
current_removed = True
|
||||
self.twList.removeRow(row)
|
||||
break
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
def getArchiveByRow(self, row):
|
||||
fi = self.twList.item(row, FileSelectionList.dataColNum).data(
|
||||
Qt.UserRole)
|
||||
return fi.ca
|
||||
if self.twList.rowCount() > 0 and current_removed:
|
||||
# since on a removal, we select row 0, make sure callback occurs if
|
||||
# we're already there
|
||||
if self.twList.currentRow() == 0:
|
||||
self.current_item_changed_cb(self.twList.currentItem(), None)
|
||||
self.twList.selectRow(0)
|
||||
elif self.twList.rowCount() <= 0:
|
||||
self.listCleared.emit()
|
||||
|
||||
def getCurrentArchive(self):
|
||||
return self.getArchiveByRow(self.twList.currentRow())
|
||||
def get_archive_by_row(self, row: int) -> ComicArchive | None:
|
||||
if row >= 0:
|
||||
ca: ComicArchive = self.twList.item(row, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
return ca
|
||||
return None
|
||||
|
||||
def removeSelection(self):
|
||||
def get_current_archive(self) -> ComicArchive | None:
|
||||
return self.get_archive_by_row(self.twList.currentRow())
|
||||
|
||||
def remove_selection(self) -> None:
|
||||
row_list = []
|
||||
for item in self.twList.selectedItems():
|
||||
if item.column() == 0:
|
||||
@@ -158,106 +144,82 @@ class FileSelectionList(QWidget):
|
||||
return
|
||||
|
||||
if self.twList.currentRow() in row_list:
|
||||
if not self.modifiedFlagVerification(
|
||||
"Remove Archive",
|
||||
"If you close this archive, data in the form will be lost. Are you sure?"):
|
||||
if not self.dirty_flag_verification(
|
||||
"Remove Archive", "If you close this archive, data in the form will be lost. Are you sure?"
|
||||
):
|
||||
return
|
||||
|
||||
row_list.sort()
|
||||
row_list.reverse()
|
||||
|
||||
self.twList.currentItemChanged.disconnect(self.currentItemChangedCB)
|
||||
self.twList.currentItemChanged.disconnect(self.current_item_changed_cb)
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
for i in row_list:
|
||||
self.twList.removeRow(i)
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.currentItemChanged.connect(self.currentItemChangedCB)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
|
||||
if self.twList.rowCount() > 0:
|
||||
# since on a removal, we select row 0, make sure callback occurs if
|
||||
# we're already there
|
||||
if self.twList.currentRow() == 0:
|
||||
self.currentItemChangedCB(self.twList.currentItem(), None)
|
||||
self.current_item_changed_cb(self.twList.currentItem(), None)
|
||||
self.twList.selectRow(0)
|
||||
else:
|
||||
self.listCleared.emit()
|
||||
|
||||
def addPathList(self, pathlist):
|
||||
|
||||
def add_path_list(self, pathlist: list[str]) -> None:
|
||||
filelist = utils.get_recursive_filelist(pathlist)
|
||||
# we now have a list of files to add
|
||||
|
||||
# Prog dialog on Linux flakes out for small range, so scale up
|
||||
progdialog = QProgressDialog("", "Cancel", 0, len(filelist), parent=self)
|
||||
progdialog = QtWidgets.QProgressDialog("", "Cancel", 0, len(filelist), parent=self)
|
||||
progdialog.setWindowTitle("Adding Files")
|
||||
progdialog.setWindowModality(Qt.ApplicationModal)
|
||||
progdialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
progdialog.setMinimumDuration(300)
|
||||
centerWindowOnParent(progdialog)
|
||||
#QCoreApplication.processEvents()
|
||||
#progdialog.show()
|
||||
|
||||
QCoreApplication.processEvents()
|
||||
firstAdded = None
|
||||
center_window_on_parent(progdialog)
|
||||
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
first_added = None
|
||||
rar_added_ro = False
|
||||
self.twList.setSortingEnabled(False)
|
||||
for idx, f in enumerate(filelist):
|
||||
QCoreApplication.processEvents()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
if progdialog.wasCanceled():
|
||||
break
|
||||
progdialog.setValue(idx+1)
|
||||
progdialog.setValue(idx + 1)
|
||||
progdialog.setLabelText(f)
|
||||
centerWindowOnParent(progdialog)
|
||||
QCoreApplication.processEvents()
|
||||
row = self.addPathItem(f)
|
||||
if firstAdded is None and row is not None:
|
||||
firstAdded = row
|
||||
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
row = self.add_path_item(f)
|
||||
if row is not None:
|
||||
ca = self.get_archive_by_row(row)
|
||||
rar_added_ro = bool(ca and ca.archiver.name() == "RAR" and not ca.archiver.is_writable())
|
||||
if first_added is None and row != -1:
|
||||
first_added = row
|
||||
|
||||
progdialog.hide()
|
||||
QCoreApplication.processEvents()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
if (self.settings.show_no_unrar_warning and
|
||||
self.settings.unrar_lib_path == "" and
|
||||
not ComicTaggerSettings.haveOwnUnrarLib()):
|
||||
for f in filelist:
|
||||
ext = os.path.splitext(f)[1].lower()
|
||||
if ext == ".rar" or ext == ".cbr":
|
||||
checked = OptionalMessageDialog.msg(self, "No UnRAR Ability",
|
||||
"""
|
||||
It looks like you've tried to open at least one CBR or RAR file.<br><br>
|
||||
In order for ComicTagger to read this kind of file, you will have to configure
|
||||
the location of the unrar library in the settings. Until then, ComicTagger
|
||||
will not be able read these kind of files. See the "RAR Tools" tab in the
|
||||
settings/preferences for more info.
|
||||
"""
|
||||
)
|
||||
self.settings.show_no_unrar_warning = not checked
|
||||
break
|
||||
|
||||
if firstAdded is not None:
|
||||
self.twList.selectRow(firstAdded)
|
||||
if first_added is not None:
|
||||
self.twList.selectRow(first_added)
|
||||
else:
|
||||
if len(pathlist) == 1 and os.path.isfile(pathlist[0]):
|
||||
ext = os.path.splitext(pathlist[0])[1].lower()
|
||||
if ext == ".rar" or ext == ".cbr" and self.settings.unrar_lib_path == "":
|
||||
QMessageBox.information(self, self.tr("File Open"), self.tr(
|
||||
"Selected file seems to be a rar file, "
|
||||
"and can't be read until the unrar library is configured."))
|
||||
else:
|
||||
QMessageBox.information(self, self.tr("File Open"), self.tr(
|
||||
"Selected file doesn't seem to be a comic archive."))
|
||||
QtWidgets.QMessageBox.information(
|
||||
self, "File Open", "Selected file doesn't seem to be a comic archive."
|
||||
)
|
||||
else:
|
||||
QMessageBox.information(
|
||||
self,
|
||||
self.tr("File/Folder Open"),
|
||||
self.tr("No readable comic archives were found."))
|
||||
QtWidgets.QMessageBox.information(self, "File/Folder Open", "No readable comic archives were found.")
|
||||
|
||||
if rar_added_ro:
|
||||
self.rar_ro_message()
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
# Adjust column size
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setColumnWidth(FileSelectionList.CRFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.CBLFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.MDFlagColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.readonlyColNum, 35)
|
||||
self.twList.setColumnWidth(FileSelectionList.typeColNum, 45)
|
||||
if self.twList.columnWidth(FileSelectionList.fileColNum) > 250:
|
||||
@@ -265,207 +227,159 @@ class FileSelectionList(QWidget):
|
||||
if self.twList.columnWidth(FileSelectionList.folderColNum) > 200:
|
||||
self.twList.setColumnWidth(FileSelectionList.folderColNum, 200)
|
||||
|
||||
def isListDupe(self, path):
|
||||
r = 0
|
||||
while r < self.twList.rowCount():
|
||||
ca = self.getArchiveByRow(r)
|
||||
if ca.path == path:
|
||||
return True
|
||||
r = r + 1
|
||||
def rar_ro_message(self) -> None:
|
||||
if not self.rar_ro_shown:
|
||||
if platform.system() == "Windows":
|
||||
rar_help = windowsRarHelp
|
||||
|
||||
return False
|
||||
elif platform.system() == "Darwin":
|
||||
rar_help = macRarHelp
|
||||
|
||||
def getCurrentListRow(self, path):
|
||||
r = 0
|
||||
while r < self.twList.rowCount():
|
||||
ca = self.getArchiveByRow(r)
|
||||
if ca.path == path:
|
||||
else:
|
||||
rar_help = linuxRarHelp
|
||||
|
||||
OptionalMessageDialog.msg_no_checkbox(
|
||||
self,
|
||||
"RAR Files are Read-Only",
|
||||
"It looks like you have opened a RAR/CBR archive,\n"
|
||||
"however ComicTagger cannot currently write to them without the rar program and are marked read only!\n\n"
|
||||
f"{rar_help}",
|
||||
)
|
||||
self.rar_ro_shown = True
|
||||
|
||||
def is_list_dupe(self, path: str) -> bool:
|
||||
return self.get_current_list_row(path) >= 0
|
||||
|
||||
def get_current_list_row(self, path: str) -> int:
|
||||
for r in range(self.twList.rowCount()):
|
||||
ca = cast(ComicArchive, self.get_archive_by_row(r))
|
||||
if str(ca.path) == path:
|
||||
return r
|
||||
r = r + 1
|
||||
|
||||
return -1
|
||||
|
||||
def addPathItem(self, path):
|
||||
def add_path_item(self, path: str) -> int:
|
||||
path = str(path)
|
||||
path = os.path.abspath(path)
|
||||
# print "processing", path
|
||||
|
||||
if self.isListDupe(path):
|
||||
return self.getCurrentListRow(path)
|
||||
if self.is_list_dupe(path):
|
||||
return self.get_current_list_row(path)
|
||||
|
||||
ca = ComicArchive(
|
||||
path,
|
||||
self.settings.rar_exe_path,
|
||||
ComicTaggerSettings.getGraphic('nocover.png'))
|
||||
ca = ComicArchive(path, str(graphics_path / "nocover.png"))
|
||||
|
||||
if ca.seemsToBeAComicArchive():
|
||||
row = self.twList.rowCount()
|
||||
if ca.seems_to_be_a_comic_archive():
|
||||
row: int = self.twList.rowCount()
|
||||
self.twList.insertRow(row)
|
||||
|
||||
fi = FileInfo(ca)
|
||||
filename_item = QtWidgets.QTableWidgetItem()
|
||||
folder_item = QtWidgets.QTableWidgetItem()
|
||||
md_item = QtWidgets.QTableWidgetItem()
|
||||
readonly_item = QtWidgets.QTableWidgetItem()
|
||||
type_item = QtWidgets.QTableWidgetItem()
|
||||
|
||||
filename_item = QTableWidgetItem()
|
||||
folder_item = QTableWidgetItem()
|
||||
cix_item = FileTableWidgetItem()
|
||||
cbi_item = FileTableWidgetItem()
|
||||
readonly_item = FileTableWidgetItem()
|
||||
type_item = QTableWidgetItem()
|
||||
filename_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
filename_item.setData(QtCore.Qt.ItemDataRole.UserRole, ca)
|
||||
self.twList.setItem(row, FileSelectionList.fileColNum, filename_item)
|
||||
|
||||
filename_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
filename_item.setData(Qt.UserRole, fi)
|
||||
self.twList.setItem(
|
||||
row, FileSelectionList.fileColNum, filename_item)
|
||||
folder_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.folderColNum, folder_item)
|
||||
|
||||
folder_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
self.twList.setItem(
|
||||
row, FileSelectionList.folderColNum, folder_item)
|
||||
|
||||
type_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
type_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, FileSelectionList.typeColNum, type_item)
|
||||
|
||||
cix_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
cix_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.CRFlagColNum, cix_item)
|
||||
md_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
md_item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.MDFlagColNum, md_item)
|
||||
|
||||
cbi_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
cbi_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.CBLFlagColNum, cbi_item)
|
||||
readonly_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
readonly_item.setTextAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)
|
||||
self.twList.setItem(row, FileSelectionList.readonlyColNum, readonly_item)
|
||||
|
||||
readonly_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
|
||||
readonly_item.setTextAlignment(Qt.AlignHCenter)
|
||||
self.twList.setItem(
|
||||
row, FileSelectionList.readonlyColNum, readonly_item)
|
||||
|
||||
self.updateRow(row)
|
||||
self.update_row(row)
|
||||
|
||||
return row
|
||||
return -1
|
||||
|
||||
def updateRow(self, row):
|
||||
fi = self.twList.item(row, FileSelectionList.dataColNum).data(
|
||||
Qt.UserRole) #.toPyObject()
|
||||
def update_row(self, row: int) -> None:
|
||||
if row >= 0:
|
||||
ca: ComicArchive = self.twList.item(row, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
|
||||
filename_item = self.twList.item(row, FileSelectionList.fileColNum)
|
||||
folder_item = self.twList.item(row, FileSelectionList.folderColNum)
|
||||
cix_item = self.twList.item(row, FileSelectionList.CRFlagColNum)
|
||||
cbi_item = self.twList.item(row, FileSelectionList.CBLFlagColNum)
|
||||
type_item = self.twList.item(row, FileSelectionList.typeColNum)
|
||||
readonly_item = self.twList.item(row, FileSelectionList.readonlyColNum)
|
||||
filename_item = self.twList.item(row, FileSelectionList.fileColNum)
|
||||
folder_item = self.twList.item(row, FileSelectionList.folderColNum)
|
||||
md_item = self.twList.item(row, FileSelectionList.MDFlagColNum)
|
||||
type_item = self.twList.item(row, FileSelectionList.typeColNum)
|
||||
readonly_item = self.twList.item(row, FileSelectionList.readonlyColNum)
|
||||
|
||||
item_text = os.path.split(fi.ca.path)[0]
|
||||
folder_item.setText(item_text)
|
||||
folder_item.setData(Qt.ToolTipRole, item_text)
|
||||
item_text = os.path.split(ca.path)[0]
|
||||
folder_item.setText(item_text)
|
||||
folder_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
item_text = os.path.split(fi.ca.path)[1]
|
||||
filename_item.setText(item_text)
|
||||
filename_item.setData(Qt.ToolTipRole, item_text)
|
||||
item_text = os.path.split(ca.path)[1]
|
||||
filename_item.setText(item_text)
|
||||
filename_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
if fi.ca.isZip():
|
||||
item_text = "ZIP"
|
||||
elif fi.ca.isRar():
|
||||
item_text = "RAR"
|
||||
else:
|
||||
item_text = ""
|
||||
type_item.setText(item_text)
|
||||
type_item.setData(Qt.ToolTipRole, item_text)
|
||||
item_text = ca.archiver.name()
|
||||
type_item.setText(item_text)
|
||||
type_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
|
||||
if fi.ca.hasCIX():
|
||||
cix_item.setCheckState(Qt.Checked)
|
||||
cix_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
cix_item.setData(Qt.UserRole, False)
|
||||
cix_item.setCheckState(Qt.Unchecked)
|
||||
styles = ", ".join(x for x in ca.get_supported_metadata() if ca.has_metadata(x))
|
||||
md_item.setText(styles)
|
||||
|
||||
if fi.ca.hasCBI():
|
||||
cbi_item.setCheckState(Qt.Checked)
|
||||
cbi_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
cbi_item.setData(Qt.UserRole, False)
|
||||
cbi_item.setCheckState(Qt.Unchecked)
|
||||
if not ca.is_writable():
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Checked)
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, True)
|
||||
readonly_item.setText(" ")
|
||||
else:
|
||||
readonly_item.setData(QtCore.Qt.ItemDataRole.UserRole, False)
|
||||
readonly_item.setCheckState(QtCore.Qt.CheckState.Unchecked)
|
||||
# This is a nbsp it sorts after a space ' '
|
||||
readonly_item.setText("\xa0")
|
||||
|
||||
if not fi.ca.isWritable():
|
||||
readonly_item.setCheckState(Qt.Checked)
|
||||
readonly_item.setData(Qt.UserRole, True)
|
||||
else:
|
||||
readonly_item.setData(Qt.UserRole, False)
|
||||
readonly_item.setCheckState(Qt.Unchecked)
|
||||
|
||||
# Reading these will force them into the ComicArchive's cache
|
||||
fi.ca.readCIX()
|
||||
fi.ca.hasCBI()
|
||||
|
||||
def getSelectedArchiveList(self):
|
||||
ca_list = []
|
||||
def get_selected_archive_list(self) -> list[ComicArchive]:
|
||||
ca_list: list[ComicArchive] = []
|
||||
for r in range(self.twList.rowCount()):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if item.isSelected():
|
||||
fi = item.data(Qt.UserRole)
|
||||
ca_list.append(fi.ca)
|
||||
ca: ComicArchive = item.data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
ca_list.append(ca)
|
||||
|
||||
return ca_list
|
||||
|
||||
def updateCurrentRow(self):
|
||||
self.updateRow(self.twList.currentRow())
|
||||
def update_current_row(self) -> None:
|
||||
self.update_row(self.twList.currentRow())
|
||||
|
||||
def updateSelectedRows(self):
|
||||
def update_selected_rows(self) -> None:
|
||||
self.twList.setSortingEnabled(False)
|
||||
for r in range(self.twList.rowCount()):
|
||||
item = self.twList.item(r, FileSelectionList.dataColNum)
|
||||
if item.isSelected():
|
||||
self.updateRow(r)
|
||||
self.update_row(r)
|
||||
self.twList.setSortingEnabled(True)
|
||||
|
||||
def currentItemChangedCB(self, curr, prev):
|
||||
def current_item_changed_cb(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
|
||||
if curr is not None:
|
||||
new_idx = curr.row()
|
||||
old_idx = -1
|
||||
if prev is not None:
|
||||
old_idx = prev.row()
|
||||
|
||||
new_idx = curr.row()
|
||||
old_idx = -1
|
||||
if prev is not None:
|
||||
old_idx = prev.row()
|
||||
#print("old {0} new {1}".format(old_idx, new_idx))
|
||||
|
||||
if old_idx == new_idx:
|
||||
return
|
||||
|
||||
# don't allow change if modified
|
||||
if prev is not None and new_idx != old_idx:
|
||||
if not self.modifiedFlagVerification(
|
||||
"Change Archive",
|
||||
"If you change archives now, data in the form will be lost. Are you sure?"):
|
||||
self.twList.currentItemChanged.disconnect(
|
||||
self.currentItemChangedCB)
|
||||
self.twList.setCurrentItem(prev)
|
||||
self.twList.currentItemChanged.connect(
|
||||
self.currentItemChangedCB)
|
||||
# Need to defer this revert selection, for some reason
|
||||
QTimer.singleShot(1, self.revertSelection)
|
||||
if old_idx == new_idx:
|
||||
return
|
||||
|
||||
fi = self.twList.item(new_idx, FileSelectionList.dataColNum).data(
|
||||
Qt.UserRole) #.toPyObject()
|
||||
self.selectionChanged.emit(QVariant(fi))
|
||||
# don't allow change if modified
|
||||
if prev is not None and new_idx != old_idx:
|
||||
if not self.dirty_flag_verification(
|
||||
"Change Archive", "If you change archives now, data in the form will be lost. Are you sure?"
|
||||
):
|
||||
self.twList.currentItemChanged.disconnect(self.current_item_changed_cb)
|
||||
self.twList.setCurrentItem(prev)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed_cb)
|
||||
# Need to defer this revert selection, for some reason
|
||||
QtCore.QTimer.singleShot(1, self.revert_selection)
|
||||
return
|
||||
|
||||
def revertSelection(self):
|
||||
fi = self.twList.item(new_idx, FileSelectionList.dataColNum).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
self.selectionChanged.emit(QtCore.QVariant(fi))
|
||||
|
||||
def revert_selection(self) -> None:
|
||||
self.twList.selectRow(self.twList.currentRow())
|
||||
|
||||
def modifiedFlagVerification(self, title, desc):
|
||||
if self.modifiedFlag:
|
||||
reply = QMessageBox.question(self,
|
||||
self.tr(title),
|
||||
self.tr(desc),
|
||||
QMessageBox.Yes, QMessageBox.No)
|
||||
|
||||
if reply != QMessageBox.Yes:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Attempt to use a special checkbox widget in the cell.
|
||||
# Couldn't figure out how to disable it with "enabled" colors
|
||||
#w = QWidget()
|
||||
#cb = QCheckBox(w)
|
||||
# cb.setCheckState(Qt.Checked)
|
||||
#layout = QHBoxLayout()
|
||||
# layout.addWidget(cb)
|
||||
# layout.setAlignment(Qt.AlignHCenter)
|
||||
# layout.setMargin(2)
|
||||
# w.setLayout(layout)
|
||||
#self.twList.setCellWidget(row, 2, w)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.genericmetadata import *
|
||||
5
comictaggerlib/graphics/__init__.py
Normal file
5
comictaggerlib/graphics/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.resources
|
||||
|
||||
graphics_path = importlib.resources.files(__package__)
|
||||
102
comictaggerlib/graphics/eye.svg
Normal file
102
comictaggerlib/graphics/eye.svg
Normal file
@@ -0,0 +1,102 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Generator: Adobe Illustrator 19.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
|
||||
<svg
|
||||
version="1.1"
|
||||
id="Capa_1"
|
||||
x="0px"
|
||||
y="0px"
|
||||
viewBox="0 0 469.333 469.333"
|
||||
style="enable-background:new 0 0 469.333 469.333;"
|
||||
xml:space="preserve"
|
||||
sodipodi:docname="eye.svg"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
id="defs45" /><sodipodi:namedview
|
||||
id="namedview43"
|
||||
pagecolor="#505050"
|
||||
bordercolor="#eeeeee"
|
||||
borderopacity="1"
|
||||
inkscape:showpageshadow="0"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#505050"
|
||||
showgrid="false"
|
||||
inkscape:zoom="2.1882117"
|
||||
inkscape:cx="234.6665"
|
||||
inkscape:cy="234.6665"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1361"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="42"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="Capa_1" />
|
||||
<g
|
||||
id="g10"
|
||||
style="fill:#333333">
|
||||
<g
|
||||
id="g8"
|
||||
style="fill:#333333">
|
||||
<g
|
||||
id="g6"
|
||||
style="fill:#333333">
|
||||
<path
|
||||
d="M234.667,170.667c-35.307,0-64,28.693-64,64s28.693,64,64,64s64-28.693,64-64S269.973,170.667,234.667,170.667z"
|
||||
id="path2"
|
||||
style="fill:#333333" />
|
||||
<path
|
||||
d="M234.667,74.667C128,74.667,36.907,141.013,0,234.667c36.907,93.653,128,160,234.667,160 c106.773,0,197.76-66.347,234.667-160C432.427,141.013,341.44,74.667,234.667,74.667z M234.667,341.333 c-58.88,0-106.667-47.787-106.667-106.667S175.787,128,234.667,128s106.667,47.787,106.667,106.667 S293.547,341.333,234.667,341.333z"
|
||||
id="path4"
|
||||
style="fill:#333333" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
id="g12">
|
||||
</g>
|
||||
<g
|
||||
id="g14">
|
||||
</g>
|
||||
<g
|
||||
id="g16">
|
||||
</g>
|
||||
<g
|
||||
id="g18">
|
||||
</g>
|
||||
<g
|
||||
id="g20">
|
||||
</g>
|
||||
<g
|
||||
id="g22">
|
||||
</g>
|
||||
<g
|
||||
id="g24">
|
||||
</g>
|
||||
<g
|
||||
id="g26">
|
||||
</g>
|
||||
<g
|
||||
id="g28">
|
||||
</g>
|
||||
<g
|
||||
id="g30">
|
||||
</g>
|
||||
<g
|
||||
id="g32">
|
||||
</g>
|
||||
<g
|
||||
id="g34">
|
||||
</g>
|
||||
<g
|
||||
id="g36">
|
||||
</g>
|
||||
<g
|
||||
id="g38">
|
||||
</g>
|
||||
<g
|
||||
id="g40">
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.1 KiB |
106
comictaggerlib/graphics/hidden.svg
Normal file
106
comictaggerlib/graphics/hidden.svg
Normal file
@@ -0,0 +1,106 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Generator: Adobe Illustrator 19.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
|
||||
<svg
|
||||
version="1.1"
|
||||
id="Capa_1"
|
||||
x="0px"
|
||||
y="0px"
|
||||
viewBox="0 0 469.44 469.44"
|
||||
style="enable-background:new 0 0 469.44 469.44;"
|
||||
xml:space="preserve"
|
||||
sodipodi:docname="hidden.svg"
|
||||
inkscape:version="1.2.2 (b0a8486541, 2022-12-01)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
id="defs47" /><sodipodi:namedview
|
||||
id="namedview45"
|
||||
pagecolor="#505050"
|
||||
bordercolor="#eeeeee"
|
||||
borderopacity="1"
|
||||
inkscape:showpageshadow="0"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#505050"
|
||||
showgrid="false"
|
||||
inkscape:zoom="2.187713"
|
||||
inkscape:cx="234.72"
|
||||
inkscape:cy="234.72"
|
||||
inkscape:window-width="2560"
|
||||
inkscape:window-height="1361"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="42"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="Capa_1" />
|
||||
<g
|
||||
id="g12"
|
||||
style="fill:#333333">
|
||||
<g
|
||||
id="g10"
|
||||
style="fill:#333333">
|
||||
<g
|
||||
id="g8"
|
||||
style="fill:#333333">
|
||||
<path
|
||||
d="M231.147,160.373l67.2,67.2l0.32-3.52c0-35.307-28.693-64-64-64L231.147,160.373z"
|
||||
id="path2"
|
||||
style="fill:#333333" />
|
||||
<path
|
||||
d="M234.667,117.387c58.88,0,106.667,47.787,106.667,106.667c0,13.76-2.773,26.88-7.573,38.933l62.4,62.4 c32.213-26.88,57.6-61.653,73.28-101.333c-37.013-93.653-128-160-234.773-160c-29.867,0-58.453,5.333-85.013,14.933l46.08,45.973 C207.787,120.267,220.907,117.387,234.667,117.387z"
|
||||
id="path4"
|
||||
style="fill:#333333" />
|
||||
<path
|
||||
d="M21.333,59.253l48.64,48.64l9.707,9.707C44.48,145.12,16.64,181.707,0,224.053c36.907,93.653,128,160,234.667,160 c33.067,0,64.64-6.4,93.547-18.027l9.067,9.067l62.187,62.293l27.2-27.093L48.533,32.053L21.333,59.253z M139.307,177.12 l32.96,32.96c-0.96,4.587-1.6,9.173-1.6,13.973c0,35.307,28.693,64,64,64c4.8,0,9.387-0.64,13.867-1.6l32.96,32.96 c-14.187,7.04-29.973,11.307-46.827,11.307C175.787,330.72,128,282.933,128,224.053C128,207.2,132.267,191.413,139.307,177.12z"
|
||||
id="path6"
|
||||
style="fill:#333333" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
id="g14">
|
||||
</g>
|
||||
<g
|
||||
id="g16">
|
||||
</g>
|
||||
<g
|
||||
id="g18">
|
||||
</g>
|
||||
<g
|
||||
id="g20">
|
||||
</g>
|
||||
<g
|
||||
id="g22">
|
||||
</g>
|
||||
<g
|
||||
id="g24">
|
||||
</g>
|
||||
<g
|
||||
id="g26">
|
||||
</g>
|
||||
<g
|
||||
id="g28">
|
||||
</g>
|
||||
<g
|
||||
id="g30">
|
||||
</g>
|
||||
<g
|
||||
id="g32">
|
||||
</g>
|
||||
<g
|
||||
id="g34">
|
||||
</g>
|
||||
<g
|
||||
id="g36">
|
||||
</g>
|
||||
<g
|
||||
id="g38">
|
||||
</g>
|
||||
<g
|
||||
id="g40">
|
||||
</g>
|
||||
<g
|
||||
id="g42">
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.6 KiB |
151
comictaggerlib/gui.py
Normal file
151
comictaggerlib/gui.py
Normal file
@@ -0,0 +1,151 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging.handlers
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
|
||||
import settngs
|
||||
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
try:
|
||||
qt_available = True
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||
|
||||
def show_exception_box(log_msg: str) -> None:
|
||||
"""Checks if a QApplication instance is available and shows a messagebox with the exception message.
|
||||
If unavailable (non-console application), log an additional notice.
|
||||
"""
|
||||
if QtWidgets.QApplication.instance() is not None:
|
||||
errorbox = QtWidgets.QMessageBox()
|
||||
errorbox.setText(log_msg)
|
||||
errorbox.exec()
|
||||
QtWidgets.QApplication.exit(1)
|
||||
else:
|
||||
logger.debug("No QApplication instance available.")
|
||||
|
||||
class UncaughtHook(QtCore.QObject):
|
||||
_exception_caught = QtCore.pyqtSignal(object)
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
# this registers the exception_hook() function as hook with the Python interpreter
|
||||
sys.excepthook = self.exception_hook
|
||||
|
||||
# connect signal to execute the message box function always on main thread
|
||||
self._exception_caught.connect(show_exception_box)
|
||||
|
||||
def exception_hook(
|
||||
self, exc_type: type[BaseException], exc_value: BaseException, exc_traceback: types.TracebackType | None
|
||||
) -> None:
|
||||
"""Function handling uncaught exceptions.
|
||||
It is triggered each time an uncaught exception occurs.
|
||||
"""
|
||||
if issubclass(exc_type, KeyboardInterrupt):
|
||||
# ignore keyboard interrupt to support console applications
|
||||
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||
else:
|
||||
exc_info = (exc_type, exc_value, exc_traceback)
|
||||
trace_back = "".join(traceback.format_tb(exc_traceback))
|
||||
log_msg = f"{exc_type.__name__}: {exc_value}\n\n{trace_back}"
|
||||
logger.critical("Uncaught exception: %s: %s", exc_type.__name__, exc_value, exc_info=exc_info)
|
||||
|
||||
# trigger message box show
|
||||
self._exception_caught.emit(f"Oops. An unexpected error occurred:\n{log_msg}")
|
||||
|
||||
qt_exception_hook = UncaughtHook()
|
||||
from comictaggerlib.taggerwindow import TaggerWindow
|
||||
|
||||
try:
|
||||
# needed here to initialize QWebEngine
|
||||
from PyQt5.QtWebEngineWidgets import QWebEngineView # noqa: F401
|
||||
|
||||
qt_webengine_available = True
|
||||
except ImportError:
|
||||
qt_webengine_available = False
|
||||
|
||||
class Application(QtWidgets.QApplication):
|
||||
openFileRequest = QtCore.pyqtSignal(QtCore.QUrl, name="openfileRequest")
|
||||
|
||||
# Handles "Open With" from Finder on macOS
|
||||
def event(self, event: QtCore.QEvent) -> bool:
|
||||
if event.type() == QtCore.QEvent.FileOpen:
|
||||
logger.info(event.url().toLocalFile())
|
||||
self.openFileRequest.emit(event.url())
|
||||
return True
|
||||
return super().event(event)
|
||||
|
||||
except ImportError as e:
|
||||
|
||||
def show_exception_box(log_msg: str) -> None: ...
|
||||
|
||||
logger.exception("Qt unavailable")
|
||||
qt_available = False
|
||||
import_error = e
|
||||
|
||||
|
||||
def open_tagger_window(
|
||||
talkers: dict[str, ComicTalker], config: settngs.Config[ct_ns], error: tuple[str, bool] | None
|
||||
) -> None:
|
||||
os.environ["QtWidgets.QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
|
||||
args = [sys.argv[0]]
|
||||
if config[0].Runtime_Options__darkmode:
|
||||
args.extend(["-platform", "windows:darkmode=2"])
|
||||
app = Application(args)
|
||||
if error is not None:
|
||||
show_exception_box(error[0])
|
||||
if error[1]:
|
||||
raise SystemExit(1)
|
||||
|
||||
# needed to catch initial open file events (macOS)
|
||||
app.openFileRequest.connect(lambda x: config[0].Runtime_Options__files.append(x.toLocalFile()))
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
# Set the MacOS dock icon
|
||||
app.setWindowIcon(QtGui.QIcon(str(graphics_path / "app.png")))
|
||||
|
||||
if platform.system() == "Windows":
|
||||
# For pure python, tell windows that we're not python,
|
||||
# so we can have our own taskbar icon
|
||||
import ctypes
|
||||
|
||||
myappid = "comictagger" # arbitrary string
|
||||
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) # type: ignore[attr-defined]
|
||||
# force close of console window
|
||||
swp_hidewindow = 0x0080
|
||||
console_wnd = ctypes.windll.kernel32.GetConsoleWindow() # type: ignore[attr-defined]
|
||||
if console_wnd != 0:
|
||||
ctypes.windll.user32.SetWindowPos(console_wnd, None, 0, 0, 0, 0, swp_hidewindow) # type: ignore[attr-defined]
|
||||
|
||||
if platform.system() != "Linux":
|
||||
img = QtGui.QPixmap(str(graphics_path / "tags.png"))
|
||||
|
||||
splash = QtWidgets.QSplashScreen(img)
|
||||
splash.show()
|
||||
splash.raise_()
|
||||
QtWidgets.QApplication.processEvents()
|
||||
|
||||
try:
|
||||
tagger_window = TaggerWindow(config[0].Runtime_Options__files, config, talkers)
|
||||
tagger_window.setWindowIcon(QtGui.QIcon(str(graphics_path / "app.png")))
|
||||
tagger_window.show()
|
||||
|
||||
# Catch open file events (macOS)
|
||||
app.openFileRequest.connect(tagger_window.open_file_event)
|
||||
|
||||
if platform.system() != "Linux":
|
||||
splash.finish(tagger_window)
|
||||
|
||||
sys.exit(app.exec())
|
||||
except Exception:
|
||||
logger.exception("GUI mode failed")
|
||||
QtWidgets.QMessageBox.critical(
|
||||
QtWidgets.QMainWindow(), "Error", "Unhandled exception in app:\n" + traceback.format_exc()
|
||||
)
|
||||
@@ -1,80 +1,77 @@
|
||||
"""A class to manage fetching and caching of images by URL"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlite3 as lite
|
||||
import os
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import sqlite3 as lite
|
||||
import tempfile
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import ssl
|
||||
#import urllib2
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
try:
|
||||
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
|
||||
from PyQt5.QtCore import QUrl, pyqtSignal, QObject, QByteArray
|
||||
from PyQt5 import QtGui
|
||||
except ImportError:
|
||||
# No Qt, so define a few dummy QObjects to help us compile
|
||||
class QObject():
|
||||
import requests
|
||||
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
from comictaggerlib import ctversion
|
||||
|
||||
class QByteArray():
|
||||
pass
|
||||
if TYPE_CHECKING:
|
||||
from PyQt5 import QtCore, QtNetwork
|
||||
|
||||
class pyqtSignal():
|
||||
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def emit(a, b, c):
|
||||
pass
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ImageFetcherException(Exception):
|
||||
pass
|
||||
class ImageFetcherException(Exception): ...
|
||||
|
||||
|
||||
class ImageFetcher(QObject):
|
||||
def fetch_complete(url: str, image_data: bytes | QtCore.QByteArray) -> None: ...
|
||||
|
||||
fetchComplete = pyqtSignal(QByteArray, int)
|
||||
|
||||
def __init__(self):
|
||||
QObject.__init__(self)
|
||||
class ImageFetcher:
|
||||
image_fetch_complete = fetch_complete
|
||||
qt_available = True
|
||||
|
||||
self.settings_folder = ComicTaggerSettings.getSettingsFolder()
|
||||
self.db_file = os.path.join(self.settings_folder, "image_url_cache.db")
|
||||
self.cache_folder = os.path.join(self.settings_folder, "image_cache")
|
||||
def __init__(self, cache_folder: pathlib.Path) -> None:
|
||||
self.db_file = cache_folder / "image_url_cache.db"
|
||||
self.cache_folder = cache_folder / "image_cache"
|
||||
|
||||
self.user_data = None
|
||||
self.fetched_url = ""
|
||||
|
||||
if self.qt_available:
|
||||
try:
|
||||
from PyQt5 import QtNetwork
|
||||
|
||||
self.qt_available = True
|
||||
except ImportError:
|
||||
self.qt_available = False
|
||||
if not os.path.exists(self.db_file):
|
||||
self.create_image_db()
|
||||
|
||||
# always use a tls context for urlopen
|
||||
self.ssl = ssl.SSLContext(ssl.PROTOCOL_TLS)
|
||||
if self.qt_available:
|
||||
self.nam = QtNetwork.QNetworkAccessManager()
|
||||
|
||||
def clearCache(self):
|
||||
def clear_cache(self) -> None:
|
||||
os.unlink(self.db_file)
|
||||
if os.path.isdir(self.cache_folder):
|
||||
shutil.rmtree(self.cache_folder)
|
||||
self.cache_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def fetch(self, url, user_data=None, blocking=False):
|
||||
def fetch(self, url: str, blocking: bool = False) -> bytes:
|
||||
"""
|
||||
If called with blocking=True, this will block until the image is
|
||||
fetched.
|
||||
@@ -82,113 +79,92 @@ class ImageFetcher(QObject):
|
||||
background, and emit a signal when done
|
||||
"""
|
||||
|
||||
self.user_data = user_data
|
||||
self.fetched_url = url
|
||||
|
||||
# first look in the DB
|
||||
image_data = self.get_image_from_cache(url)
|
||||
if blocking:
|
||||
if image_data is None:
|
||||
# Async for retrieving covers seems to work well
|
||||
if blocking or not self.qt_available:
|
||||
if not image_data:
|
||||
try:
|
||||
image_data = urllib.request.urlopen(url, context=self.ssl).read()
|
||||
image_data = requests.get(url, headers={"user-agent": "comictagger/" + ctversion.version}).content
|
||||
# save the image to the cache
|
||||
self.add_image_to_cache(self.fetched_url, image_data)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise ImageFetcherException("Network Error!")
|
||||
|
||||
# save the image to the cache
|
||||
self.add_image_to_cache(self.fetched_url, image_data)
|
||||
logger.exception("Fetching url failed: %s")
|
||||
raise ImageFetcherException("Network Error!") from e
|
||||
return image_data
|
||||
|
||||
else:
|
||||
if self.qt_available:
|
||||
from PyQt5 import QtCore, QtNetwork
|
||||
|
||||
# if we found it, just emit the signal asap
|
||||
if image_data is not None:
|
||||
self.fetchComplete.emit(QByteArray(image_data), self.user_data)
|
||||
return
|
||||
if image_data:
|
||||
ImageFetcher.image_fetch_complete(url, QtCore.QByteArray(image_data))
|
||||
return b""
|
||||
|
||||
# didn't find it. look online
|
||||
self.nam = QNetworkAccessManager()
|
||||
self.nam.finished.connect(self.finishRequest)
|
||||
self.nam.get(QNetworkRequest(QUrl(url)))
|
||||
self.nam.finished.connect(self.finish_request)
|
||||
self.nam.get(QtNetwork.QNetworkRequest(QtCore.QUrl(url)))
|
||||
|
||||
# we'll get called back when done...
|
||||
return b""
|
||||
|
||||
def finishRequest(self, reply):
|
||||
|
||||
def finish_request(self, reply: QtNetwork.QNetworkReply) -> None:
|
||||
# read in the image data
|
||||
logger.debug("request finished")
|
||||
image_data = reply.readAll()
|
||||
|
||||
# save the image to the cache
|
||||
self.add_image_to_cache(self.fetched_url, image_data)
|
||||
self.add_image_to_cache(reply.request().url().toString(), image_data)
|
||||
|
||||
self.fetchComplete.emit(QByteArray(image_data), self.user_data)
|
||||
|
||||
def create_image_db(self):
|
||||
ImageFetcher.image_fetch_complete(reply.request().url().toString(), image_data)
|
||||
|
||||
def create_image_db(self) -> None:
|
||||
# this will wipe out any existing version
|
||||
open(self.db_file, 'w').close()
|
||||
open(self.db_file, "wb").close()
|
||||
|
||||
# wipe any existing image cache folder too
|
||||
if os.path.isdir(self.cache_folder):
|
||||
shutil.rmtree(self.cache_folder)
|
||||
os.makedirs(self.cache_folder)
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
# create tables
|
||||
with con:
|
||||
|
||||
with lite.connect(self.db_file) as con:
|
||||
cur = con.cursor()
|
||||
|
||||
cur.execute("CREATE TABLE Images(" +
|
||||
"url TEXT," +
|
||||
"filename TEXT," +
|
||||
"timestamp TEXT," +
|
||||
"PRIMARY KEY (url))"
|
||||
)
|
||||
|
||||
def add_image_to_cache(self, url, image_data):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
|
||||
with con:
|
||||
cur.execute("CREATE TABLE Images(url TEXT,filename TEXT,timestamp TEXT,PRIMARY KEY (url))")
|
||||
|
||||
def add_image_to_cache(self, url: str, image_data: bytes | QtCore.QByteArray) -> None:
|
||||
with lite.connect(self.db_file) as con:
|
||||
cur = con.cursor()
|
||||
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
tmp_fd, filename = tempfile.mkstemp(
|
||||
dir=self.cache_folder, prefix="img")
|
||||
f = os.fdopen(tmp_fd, 'w+b')
|
||||
f.write(image_data)
|
||||
f.close()
|
||||
tmp_fd, filename = tempfile.mkstemp(dir=self.cache_folder, prefix="img")
|
||||
with os.fdopen(tmp_fd, "w+b") as f:
|
||||
f.write(bytes(image_data))
|
||||
|
||||
cur.execute("INSERT or REPLACE INTO Images VALUES(?, ?, ?)",
|
||||
(url,
|
||||
filename,
|
||||
timestamp)
|
||||
)
|
||||
cur.execute("INSERT or REPLACE INTO Images VALUES(?, ?, ?)", (url, filename, timestamp))
|
||||
|
||||
def get_image_from_cache(self, url):
|
||||
|
||||
con = lite.connect(self.db_file)
|
||||
with con:
|
||||
def get_image_from_cache(self, url: str) -> bytes:
|
||||
with lite.connect(self.db_file) as con:
|
||||
cur = con.cursor()
|
||||
|
||||
cur.execute("SELECT filename FROM Images WHERE url=?", [url])
|
||||
row = cur.fetchone()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
else:
|
||||
filename = row[0]
|
||||
image_data = None
|
||||
return b""
|
||||
|
||||
try:
|
||||
with open(filename, 'rb') as f:
|
||||
image_data = f.read()
|
||||
f.close()
|
||||
except IOError as e:
|
||||
pass
|
||||
filename = row[0]
|
||||
image_data = b""
|
||||
|
||||
return image_data
|
||||
try:
|
||||
with open(filename, "rb") as f:
|
||||
image_data = f.read()
|
||||
f.close()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return image_data
|
||||
|
||||
227
comictaggerlib/imagehasher.py
Executable file → Normal file
227
comictaggerlib/imagehasher.py
Executable file → Normal file
@@ -1,83 +1,81 @@
|
||||
"""A class to manage creating image content hashes, and calculate hamming distances"""
|
||||
|
||||
# Copyright 2013 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2013 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import sys
|
||||
from functools import reduce
|
||||
import itertools
|
||||
import logging
|
||||
import math
|
||||
from collections.abc import Sequence
|
||||
from statistics import median
|
||||
from typing import TypeVar
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
from PIL import WebPImagePlugin
|
||||
|
||||
pil_available = True
|
||||
except ImportError:
|
||||
pil_available = False
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ImageHasher(object):
|
||||
|
||||
def __init__(self, path=None, data=None, width=8, height=8):
|
||||
#self.hash_size = size
|
||||
class ImageHasher:
|
||||
def __init__(
|
||||
self, path: str | None = None, image: Image | None = None, data: bytes = b"", width: int = 8, height: int = 8
|
||||
) -> None:
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
if path is None and data is None:
|
||||
raise IOError
|
||||
else:
|
||||
try:
|
||||
if path is not None:
|
||||
self.image = Image.open(path)
|
||||
else:
|
||||
self.image = Image.open(io.BytesIO(data))
|
||||
except Exception as e:
|
||||
print("Image data seems corrupted! [{}]".format(e))
|
||||
# just generate a bogus image
|
||||
self.image = Image.new("L", (1, 1))
|
||||
if path is None and not data and not image:
|
||||
raise OSError
|
||||
|
||||
if image is not None:
|
||||
self.image = image
|
||||
return
|
||||
|
||||
def average_hash(self):
|
||||
try:
|
||||
image = self.image.resize(
|
||||
(self.width, self.height), Image.ANTIALIAS).convert("L")
|
||||
except Exception as e:
|
||||
sys.exc_clear()
|
||||
print("average_hash error:", e)
|
||||
return int(0)
|
||||
if path is not None:
|
||||
self.image = Image.open(path)
|
||||
else:
|
||||
self.image = Image.open(io.BytesIO(data))
|
||||
except Exception:
|
||||
logger.exception("Image data seems corrupted!")
|
||||
# just generate a bogus image
|
||||
self.image = Image.new("L", (1, 1))
|
||||
|
||||
def average_hash(self) -> int:
|
||||
try:
|
||||
image = self.image.resize((self.width, self.height), Image.Resampling.LANCZOS).convert("L")
|
||||
except Exception:
|
||||
logger.exception("average_hash error")
|
||||
return 0
|
||||
|
||||
pixels = list(image.getdata())
|
||||
avg = sum(pixels) / len(pixels)
|
||||
|
||||
def compare_value_to_avg(i):
|
||||
return (1 if i > avg else 0)
|
||||
diff = "".join(str(int(p > avg)) for p in pixels)
|
||||
|
||||
bitlist = list(map(compare_value_to_avg, pixels))
|
||||
result = int(diff, 2)
|
||||
|
||||
# build up an int value from the bit list, one bit at a time
|
||||
def set_bit(x, idx_val):
|
||||
(idx, val) = idx_val
|
||||
return (x | (val << idx))
|
||||
|
||||
result = reduce(set_bit, enumerate(bitlist), 0)
|
||||
|
||||
# print("{0:016x}".format(result))
|
||||
return result
|
||||
|
||||
def average_hash2(self):
|
||||
pass
|
||||
def average_hash2(self) -> None:
|
||||
"""
|
||||
# Got this one from somewhere on the net. Not a clue how the 'convolve2d'
|
||||
# works!
|
||||
# Got this one from somewhere on the net. Not a clue how the 'convolve2d' works!
|
||||
|
||||
from numpy import array
|
||||
from scipy.signal import convolve2d
|
||||
@@ -91,103 +89,102 @@ class ImageHasher(object):
|
||||
result = reduce(lambda x, (y, z): x | (z << y),
|
||||
enumerate(map(lambda i: 0 if i < 0 else 1, filt_data)),
|
||||
0)
|
||||
#print("{0:016x}".format(result))
|
||||
return result
|
||||
"""
|
||||
|
||||
def dct_average_hash(self):
|
||||
pass
|
||||
def p_hash(self) -> int:
|
||||
"""
|
||||
# Algorithm source: http://syntaxcandy.blogspot.com/2012/08/perceptual-hash.html
|
||||
|
||||
1. Reduce size. Like Average Hash, pHash starts with a small image.
|
||||
However, the image is larger than 8x8; 32x32 is a good size. This
|
||||
is really done to simplify the DCT computation and not because it
|
||||
is needed to reduce the high frequencies.
|
||||
|
||||
2. Reduce color. The image is reduced to a grayscale just to further
|
||||
simplify the number of computations.
|
||||
|
||||
3. Compute the DCT. The DCT separates the image into a collection of
|
||||
frequencies and scalars. While JPEG uses an 8x8 DCT, this algorithm
|
||||
uses a 32x32 DCT.
|
||||
|
||||
4. Reduce the DCT. This is the magic step. While the DCT is 32x32,
|
||||
just keep the top-left 8x8. Those represent the lowest frequencies in
|
||||
the picture.
|
||||
|
||||
5. Compute the average value. Like the Average Hash, compute the mean DCT
|
||||
value (using only the 8x8 DCT low-frequency values and excluding the first
|
||||
term since the DC coefficient can be significantly different from the other
|
||||
values and will throw off the average). Thanks to David Starkweather for the
|
||||
added information about pHash. He wrote: "the dct hash is based on the low 2D
|
||||
DCT coefficients starting at the second from lowest, leaving out the first DC
|
||||
term. This excludes completely flat image information (i.e. solid colors) from
|
||||
being included in the hash description."
|
||||
|
||||
6. Further reduce the DCT. This is the magic step. Set the 64 hash bits to 0 or
|
||||
1 depending on whether each of the 64 DCT values is above or below the average
|
||||
value. The result doesn't tell us the actual low frequencies; it just tells us
|
||||
the very-rough relative scale of the frequencies to the mean. The result will not
|
||||
vary as long as the overall structure of the image remains the same; this can
|
||||
survive gamma and color histogram adjustments without a problem.
|
||||
|
||||
7. Construct the hash. Set the 64 bits into a 64-bit integer. The order does not
|
||||
matter, just as long as you are consistent.
|
||||
Pure python version of Perceptual Hash computation of https://github.com/JohannesBuchner/imagehash/tree/master
|
||||
Implementation follows http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html
|
||||
"""
|
||||
"""
|
||||
import numpy
|
||||
import scipy.fftpack
|
||||
numpy.set_printoptions(threshold=10000, linewidth=200, precision=2, suppress=True)
|
||||
|
||||
# Step 1,2
|
||||
im = self.image.resize((32, 32), Image.ANTIALIAS).convert("L")
|
||||
in_data = numpy.asarray(im)
|
||||
def generate_dct2(block: Sequence[Sequence[float]], axis: int = 0) -> list[list[float]]:
|
||||
def dct1(block: Sequence[float]) -> list[float]:
|
||||
"""Perform 1D Discrete Cosine Transform (DCT) on a given block."""
|
||||
N = len(block)
|
||||
dct_block = [0.0] * N
|
||||
|
||||
# Step 3
|
||||
dct = scipy.fftpack.dct(in_data.astype(float))
|
||||
for k in range(N):
|
||||
sum_val = 0.0
|
||||
for n in range(N):
|
||||
cos_val = math.cos(math.pi * k * (2 * n + 1) / (2 * N))
|
||||
sum_val += block[n] * cos_val
|
||||
dct_block[k] = sum_val
|
||||
|
||||
# Step 4
|
||||
# Just skip the top and left rows when slicing, as suggested somewhere else...
|
||||
lofreq_dct = dct[1:9, 1:9].flatten()
|
||||
return dct_block
|
||||
|
||||
# Step 5
|
||||
avg = (lofreq_dct.sum()) / (lofreq_dct.size)
|
||||
median = numpy.median(lofreq_dct)
|
||||
"""Perform 2D Discrete Cosine Transform (DCT) on a given block along the specified axis."""
|
||||
rows = len(block)
|
||||
cols = len(block[0])
|
||||
dct_block = [[0.0] * cols for _ in range(rows)]
|
||||
|
||||
thresh = avg
|
||||
if axis == 0:
|
||||
# Apply 1D DCT on each row
|
||||
for i in range(rows):
|
||||
dct_block[i] = dct1(block[i])
|
||||
elif axis == 1:
|
||||
# Apply 1D DCT on each column
|
||||
for j in range(cols):
|
||||
column = [block[i][j] for i in range(rows)]
|
||||
dct_column = dct1(column)
|
||||
for i in range(rows):
|
||||
dct_block[i][j] = dct_column[i]
|
||||
else:
|
||||
raise ValueError("Invalid axis value. Must be either 0 or 1.")
|
||||
|
||||
# Step 6
|
||||
def compare_value_to_thresh(i):
|
||||
return (1 if i > thresh else 0)
|
||||
return dct_block
|
||||
|
||||
bitlist = map(compare_value_to_thresh, lofreq_dct)
|
||||
def convert_image_to_ndarray(image: Image.Image) -> Sequence[Sequence[float]]:
|
||||
width, height = image.size
|
||||
|
||||
#Step 7
|
||||
def set_bit(x, (idx, val)):
|
||||
return (x | (val << idx))
|
||||
pixels2 = []
|
||||
for y in range(height):
|
||||
row = []
|
||||
for x in range(width):
|
||||
pixel = image.getpixel((x, y))
|
||||
row.append(pixel)
|
||||
pixels2.append(row)
|
||||
|
||||
result = reduce(set_bit, enumerate(bitlist), long(0))
|
||||
return pixels2
|
||||
|
||||
highfreq_factor = 4
|
||||
img_size = 8 * highfreq_factor
|
||||
|
||||
try:
|
||||
image = self.image.convert("L").resize((img_size, img_size), Image.Resampling.LANCZOS)
|
||||
except Exception:
|
||||
logger.exception("p_hash error converting to greyscale and resizing")
|
||||
return 0
|
||||
|
||||
pixels = convert_image_to_ndarray(image)
|
||||
dct = generate_dct2(generate_dct2(pixels, axis=0), axis=1)
|
||||
dctlowfreq = list(itertools.chain.from_iterable(row[:8] for row in dct[:8]))
|
||||
med = median(dctlowfreq)
|
||||
# Convert to a bit string
|
||||
diff = "".join(str(int(item > med)) for item in dctlowfreq)
|
||||
|
||||
result = int(diff, 2)
|
||||
|
||||
#print("{0:016x}".format(result))
|
||||
return result
|
||||
"""
|
||||
|
||||
# accepts 2 hashes (longs or hex strings) and returns the hamming distance
|
||||
|
||||
T = TypeVar("T", int, str)
|
||||
|
||||
@staticmethod
|
||||
def hamming_distance(h1, h2):
|
||||
if isinstance(h1, int) or isinstance(h1, int):
|
||||
def hamming_distance(h1: T, h2: T) -> int:
|
||||
if isinstance(h1, int):
|
||||
n1 = h1
|
||||
else:
|
||||
n1 = int(h1, 16)
|
||||
|
||||
if isinstance(h2, int):
|
||||
n2 = h2
|
||||
else:
|
||||
# convert hex strings to ints
|
||||
n1 = int(h1, 16)
|
||||
n2 = int(h2, 16)
|
||||
|
||||
# xor the two numbers
|
||||
n = n1 ^ n2
|
||||
|
||||
# count up the 1's in the binary string
|
||||
return sum(b == '1' for b in bin(n)[2:])
|
||||
return sum(b == "1" for b in bin(n)[2:])
|
||||
|
||||
@@ -1,83 +1,85 @@
|
||||
"""A PyQT4 widget to display a popup image"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import sys
|
||||
#import os
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, sip, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ImagePopup(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget, image_pixmap: QtGui.QPixmap) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def __init__(self, parent, image_pixmap):
|
||||
super(ImagePopup, self).__init__(parent)
|
||||
with (ui_path / "imagepopup.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('imagepopup.ui'), self)
|
||||
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
|
||||
|
||||
QtWidgets.QApplication.setOverrideCursor(
|
||||
QtGui.QCursor(QtCore.Qt.WaitCursor))
|
||||
|
||||
# self.setWindowModality(QtCore.Qt.WindowModal)
|
||||
self.setWindowFlags(QtCore.Qt.Popup)
|
||||
self.setWindowState(QtCore.Qt.WindowFullScreen)
|
||||
self.setWindowFlags(QtCore.Qt.WindowType.Popup)
|
||||
self.setWindowState(QtCore.Qt.WindowState.WindowFullScreen)
|
||||
|
||||
self.imagePixmap = image_pixmap
|
||||
|
||||
screen_size = QtWidgets.QDesktopWidget().screenGeometry()
|
||||
screen_size = QtGui.QGuiApplication.primaryScreen().geometry()
|
||||
QtWidgets.QApplication.primaryScreen()
|
||||
self.resize(screen_size.width(), screen_size.height())
|
||||
self.move(0, 0)
|
||||
|
||||
# This is a total hack. Uses a snapshot of the desktop, and overlays a
|
||||
# translucent screen over it. Probably can do it better by setting opacity of a
|
||||
# widget
|
||||
# translucent screen over it. Probably can do it better by setting opacity of a widget
|
||||
# TODO: macOS denies this
|
||||
screen = QtWidgets.QApplication.primaryScreen()
|
||||
self.desktopBg = screen.grabWindow(
|
||||
QtWidgets.QApplication.desktop().winId(),
|
||||
0,
|
||||
0,
|
||||
screen_size.width(),
|
||||
screen_size.height())
|
||||
bg = QtGui.QPixmap(ComicTaggerSettings.getGraphic('popup_bg.png'))
|
||||
self.desktopBg = screen.grabWindow(sip.voidptr(0), 0, 0, screen_size.width(), screen_size.height())
|
||||
bg = QtGui.QPixmap(str(graphics_path / "popup_bg.png"))
|
||||
self.clientBgPixmap = bg.scaled(
|
||||
screen_size.width(), screen_size.height())
|
||||
screen_size.width(),
|
||||
screen_size.height(),
|
||||
QtCore.Qt.AspectRatioMode.IgnoreAspectRatio,
|
||||
QtCore.Qt.SmoothTransformation,
|
||||
)
|
||||
self.setMask(self.clientBgPixmap.mask())
|
||||
|
||||
self.applyImagePixmap()
|
||||
self.apply_image_pixmap()
|
||||
self.showFullScreen()
|
||||
self.raise_()
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
|
||||
def paintEvent(self, event):
|
||||
self.painter = QtGui.QPainter(self)
|
||||
self.painter.setRenderHint(QtGui.QPainter.Antialiasing)
|
||||
self.painter.drawPixmap(0, 0, self.desktopBg)
|
||||
self.painter.drawPixmap(0, 0, self.clientBgPixmap)
|
||||
self.painter.end()
|
||||
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
|
||||
painter = QtGui.QPainter(self)
|
||||
painter.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
|
||||
painter.drawPixmap(0, 0, self.desktopBg)
|
||||
painter.drawPixmap(0, 0, self.clientBgPixmap)
|
||||
painter.end()
|
||||
|
||||
def applyImagePixmap(self):
|
||||
def apply_image_pixmap(self) -> None:
|
||||
win_h = self.height()
|
||||
win_w = self.width()
|
||||
|
||||
if self.imagePixmap.width(
|
||||
) > win_w or self.imagePixmap.height() > win_h:
|
||||
if self.imagePixmap.width() > win_w or self.imagePixmap.height() > win_h:
|
||||
# scale the pixmap to fit in the frame
|
||||
display_pixmap = self.imagePixmap.scaled(
|
||||
win_w, win_h, QtCore.Qt.KeepAspectRatio)
|
||||
win_w, win_h, QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.SmoothTransformation
|
||||
)
|
||||
self.lblImage.setPixmap(display_pixmap)
|
||||
else:
|
||||
display_pixmap = self.imagePixmap
|
||||
@@ -87,7 +89,7 @@ class ImagePopup(QtWidgets.QDialog):
|
||||
img_w = display_pixmap.width()
|
||||
img_h = display_pixmap.height()
|
||||
self.lblImage.resize(img_w, img_h)
|
||||
self.lblImage.move((win_w - img_w) / 2, (win_h - img_h) / 2)
|
||||
self.lblImage.move(int((win_w - img_w) / 2), int((win_h - img_h) / 2))
|
||||
|
||||
def mousePressEvent(self, event):
|
||||
def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:
|
||||
self.close()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,188 +1,237 @@
|
||||
"""A PyQT4 dialog to select specific issue from list"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import sys
|
||||
#import os
|
||||
#import re
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
#from PyQt5.QtCore import QUrl, pyqtSignal, QByteArray
|
||||
#from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
|
||||
|
||||
from .comicvinetalker import ComicVineTalker, ComicVineTalkerException
|
||||
from .settings import ComicTaggerSettings
|
||||
from .issuestring import IssueString
|
||||
from .coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize
|
||||
#from imagefetcher import ImageFetcher
|
||||
#import utils
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.issuestring import IssueString
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.ui import qtutils, ui_path
|
||||
from comictaggerlib.ui.qtutils import new_web_view, reduce_widget_font_size
|
||||
from comictalker.comictalker import ComicTalker, TalkerError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IssueNumberTableWidgetItem(QtWidgets.QTableWidgetItem):
|
||||
|
||||
def __lt__(self, other):
|
||||
selfStr = self.data(QtCore.Qt.DisplayRole)
|
||||
otherStr = other.data(QtCore.Qt.DisplayRole)
|
||||
return (IssueString(selfStr).asFloat() <
|
||||
IssueString(otherStr).asFloat())
|
||||
def __lt__(self, other: object) -> bool:
|
||||
assert isinstance(other, QtWidgets.QTableWidgetItem)
|
||||
self_str: str = self.data(QtCore.Qt.ItemDataRole.DisplayRole)
|
||||
other_str: str = other.data(QtCore.Qt.ItemDataRole.DisplayRole)
|
||||
return (IssueString(self_str).as_float() or 0) < (IssueString(other_str).as_float() or 0)
|
||||
|
||||
|
||||
class IssueSelectionWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
config: ct_ns,
|
||||
talker: ComicTalker,
|
||||
series_id: str,
|
||||
issue_number: str,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
volume_id = 0
|
||||
|
||||
def __init__(self, parent, settings, series_id, issue_number):
|
||||
super(IssueSelectionWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('issueselectionwindow.ui'), self)
|
||||
with (ui_path / "issueselectionwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.coverWidget = CoverImageWidget(
|
||||
self.coverImageContainer, CoverImageWidget.AltCoverMode)
|
||||
self.coverImageContainer,
|
||||
CoverImageWidget.AltCoverMode,
|
||||
config.Runtime_Options__config.user_cache_dir,
|
||||
talker,
|
||||
)
|
||||
gridlayout = QtWidgets.QGridLayout(self.coverImageContainer)
|
||||
gridlayout.addWidget(self.coverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
reduceWidgetFontSize(self.twList)
|
||||
reduceWidgetFontSize(self.teDescription, 1)
|
||||
self.teDescription: QtWidgets.QWidget
|
||||
webengine = new_web_view(self)
|
||||
if webengine:
|
||||
self.teDescription = qtutils.replaceWidget(self.splitter, self.teDescription, webengine)
|
||||
logger.info("successfully loaded QWebEngineView")
|
||||
else:
|
||||
logger.info("failed to open QWebEngineView")
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
reduce_widget_font_size(self.twList)
|
||||
reduce_widget_font_size(self.teDescription, 1)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
self.series_id = series_id
|
||||
self.settings = settings
|
||||
self.issue_id: str = ""
|
||||
self.config = config
|
||||
self.talker = talker
|
||||
self.url_fetch_thread = None
|
||||
self.issue_list: dict[str, GenericMetadata] = {}
|
||||
|
||||
# Display talker logo and set url
|
||||
self.lblIssuesSourceName.setText(talker.attribution)
|
||||
|
||||
self.imageIssuesSourceWidget = CoverImageWidget(
|
||||
self.imageIssuesSourceLogo,
|
||||
CoverImageWidget.URLMode,
|
||||
config.Runtime_Options__config.user_cache_dir,
|
||||
talker,
|
||||
False,
|
||||
)
|
||||
self.imageIssuesSourceWidget.showControls = False
|
||||
gridlayoutIssuesSourceLogo = QtWidgets.QGridLayout(self.imageIssuesSourceLogo)
|
||||
gridlayoutIssuesSourceLogo.addWidget(self.imageIssuesSourceWidget)
|
||||
gridlayoutIssuesSourceLogo.setContentsMargins(0, 2, 0, 0)
|
||||
self.imageIssuesSourceWidget.set_url(talker.logo_url)
|
||||
|
||||
if issue_number is None or issue_number == "":
|
||||
self.issue_number = 1
|
||||
self.issue_number = "1"
|
||||
else:
|
||||
self.issue_number = issue_number
|
||||
|
||||
self.initial_id = None
|
||||
self.performQuery()
|
||||
self.initial_id: str = ""
|
||||
self.perform_query()
|
||||
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.currentItemChanged.connect(self.currentItemChanged)
|
||||
self.twList.cellDoubleClicked.connect(self.cellDoubleClicked)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed)
|
||||
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
|
||||
|
||||
# now that the list has been sorted, find the initial record, and
|
||||
# select it
|
||||
if self.initial_id is None:
|
||||
if not self.initial_id:
|
||||
self.twList.selectRow(0)
|
||||
else:
|
||||
for r in range(0, self.twList.rowCount()):
|
||||
issue_id = self.twList.item(r, 0).data(QtCore.Qt.UserRole)
|
||||
if (issue_id == self.initial_id):
|
||||
issue_id = self.twList.item(r, 0).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
if issue_id == self.initial_id:
|
||||
self.twList.selectRow(r)
|
||||
break
|
||||
|
||||
def performQuery(self):
|
||||
self.leFilter.textChanged.connect(self.filter)
|
||||
|
||||
QtWidgets.QApplication.setOverrideCursor(
|
||||
QtGui.QCursor(QtCore.Qt.WaitCursor))
|
||||
def filter(self, text: str) -> None:
|
||||
rows = set(range(self.twList.rowCount()))
|
||||
for r in rows:
|
||||
self.twList.showRow(r)
|
||||
if text.strip():
|
||||
shown_rows = {x.row() for x in self.twList.findItems(text, QtCore.Qt.MatchFlag.MatchContains)}
|
||||
for r in rows - shown_rows:
|
||||
self.twList.hideRow(r)
|
||||
|
||||
def perform_query(self) -> None:
|
||||
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
|
||||
|
||||
try:
|
||||
comicVine = ComicVineTalker()
|
||||
volume_data = comicVine.fetchVolumeData(self.series_id)
|
||||
self.issue_list = comicVine.fetchIssuesByVolume(self.series_id)
|
||||
except ComicVineTalkerException as e:
|
||||
self.issue_list = {
|
||||
x.issue_id: x for x in self.talker.fetch_issues_in_series(self.series_id) if x.issue_id is not None
|
||||
}
|
||||
except TalkerError as e:
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
if e.code == ComicVineTalkerException.RateLimit:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
self,
|
||||
self.tr("Comic Vine Error"),
|
||||
ComicVineTalker.getRateLimitMessage())
|
||||
else:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
self,
|
||||
self.tr("Network Issue"),
|
||||
self.tr("Could not connect to Comic Vine to list issues!"))
|
||||
QtWidgets.QMessageBox.critical(self, f"{e.source} {e.code_name} Error", f"{e}")
|
||||
return
|
||||
|
||||
while self.twList.rowCount() > 0:
|
||||
self.twList.removeRow(0)
|
||||
self.twList.setRowCount(0)
|
||||
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
row = 0
|
||||
for record in self.issue_list:
|
||||
for row, issue in enumerate(self.issue_list.values()):
|
||||
self.twList.insertRow(row)
|
||||
self.twList.setItem(row, 0, IssueNumberTableWidgetItem())
|
||||
self.twList.setItem(row, 1, QtWidgets.QTableWidgetItem())
|
||||
self.twList.setItem(row, 2, QtWidgets.QTableWidgetItem())
|
||||
|
||||
item_text = record['issue_number']
|
||||
item = IssueNumberTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.UserRole, record['id'])
|
||||
item.setData(QtCore.Qt.DisplayRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 0, item)
|
||||
self.update_row(row, issue)
|
||||
|
||||
item_text = record['cover_date']
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
# remove the day of "YYYY-MM-DD"
|
||||
parts = item_text.split("-")
|
||||
if len(parts) > 1:
|
||||
item_text = parts[0] + "-" + parts[1]
|
||||
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 1, item)
|
||||
|
||||
item_text = record['name']
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.twList.setItem(row, 2, item)
|
||||
|
||||
if IssueString(
|
||||
record['issue_number']).asString().lower() == IssueString(
|
||||
self.issue_number).asString().lower():
|
||||
self.initial_id = record['id']
|
||||
|
||||
row += 1
|
||||
if IssueString(issue.issue).as_string().casefold() == IssueString(self.issue_number).as_string().casefold():
|
||||
self.initial_id = issue.issue_id or ""
|
||||
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.sortItems(0, QtCore.Qt.AscendingOrder)
|
||||
self.twList.sortItems(0, QtCore.Qt.SortOrder.AscendingOrder)
|
||||
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
|
||||
def cellDoubleClicked(self, r, c):
|
||||
def cell_double_clicked(self, r: int, c: int) -> None:
|
||||
self.accept()
|
||||
|
||||
def currentItemChanged(self, curr, prev):
|
||||
def set_description(self, widget: QtWidgets.QWidget, text: str) -> None:
|
||||
if isinstance(widget, QtWidgets.QTextEdit):
|
||||
widget.setText(text.replace("</figure>", "</div>").replace("<figure", "<div"))
|
||||
else:
|
||||
html = text
|
||||
widget.setHtml(html, QtCore.QUrl(self.talker.website))
|
||||
|
||||
def update_row(self, row: int, issue: GenericMetadata) -> None:
|
||||
item_text = issue.issue or ""
|
||||
item = self.twList.item(row, 0)
|
||||
item.setText(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.UserRole, issue.issue_id)
|
||||
item.setData(QtCore.Qt.ItemDataRole.DisplayRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
|
||||
item_text = ""
|
||||
if issue.year is not None:
|
||||
item_text += f"-{issue.year:04}"
|
||||
if issue.month is not None:
|
||||
item_text += f"-{issue.month:02}"
|
||||
|
||||
qtw_item = self.twList.item(row, 1)
|
||||
qtw_item.setText(item_text.strip("-"))
|
||||
qtw_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
qtw_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
|
||||
item_text = issue.title or ""
|
||||
qtw_item = self.twList.item(row, 2)
|
||||
qtw_item.setText(item_text)
|
||||
qtw_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
qtw_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
|
||||
def current_item_changed(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
|
||||
if curr is None:
|
||||
return
|
||||
if prev is not None and prev.row() == curr.row():
|
||||
return
|
||||
|
||||
self.issue_id = self.twList.item(curr.row(), 0).data(QtCore.Qt.UserRole)
|
||||
row = curr.row()
|
||||
self.issue_id = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
|
||||
# list selection was changed, update the the issue cover
|
||||
for record in self.issue_list:
|
||||
if record['id'] == self.issue_id:
|
||||
self.issue_number = record['issue_number']
|
||||
self.coverWidget.setIssueID(int(self.issue_id))
|
||||
if record['description'] is None:
|
||||
self.teDescription.setText("")
|
||||
else:
|
||||
self.teDescription.setText(record['description'])
|
||||
# list selection was changed, update the issue cover
|
||||
issue = self.issue_list[self.issue_id]
|
||||
if not (issue.issue and issue.year and issue.month and issue._cover_image and issue.title):
|
||||
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
|
||||
try:
|
||||
issue = self.talker.fetch_comic_data(issue_id=self.issue_id)
|
||||
except TalkerError:
|
||||
pass
|
||||
QtWidgets.QApplication.restoreOverrideCursor()
|
||||
|
||||
break
|
||||
self.issue_number = issue.issue or ""
|
||||
self.coverWidget.set_issue_details(self.issue_id, [issue._cover_image or "", *issue._alternate_images])
|
||||
if issue.description is None:
|
||||
self.set_description(self.teDescription, "")
|
||||
else:
|
||||
self.set_description(self.teDescription, issue.description)
|
||||
|
||||
# Update current record information
|
||||
self.update_row(row, issue)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
from comicapi.issuestring import *
|
||||
57
comictaggerlib/log.py
Normal file
57
comictaggerlib/log.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging.handlers
|
||||
import pathlib
|
||||
import platform
|
||||
import sys
|
||||
|
||||
from comictaggerlib.ctversion import version
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
|
||||
|
||||
def get_filename(filename: str) -> str:
|
||||
filename, _, number = filename.rpartition(".")
|
||||
return filename.removesuffix("log") + number + ".log"
|
||||
|
||||
|
||||
def get_file_handler(filename: pathlib.Path) -> logging.FileHandler:
|
||||
file_handler = logging.handlers.RotatingFileHandler(filename, encoding="utf-8", backupCount=10)
|
||||
file_handler.namer = get_filename
|
||||
|
||||
if filename.is_file() and filename.stat().st_size > 0:
|
||||
file_handler.doRollover()
|
||||
return file_handler
|
||||
|
||||
|
||||
def setup_logging(verbose: int, log_dir: pathlib.Path) -> None:
|
||||
logging.getLogger("comicapi").setLevel(logging.DEBUG)
|
||||
logging.getLogger("comictaggerlib").setLevel(logging.DEBUG)
|
||||
logging.getLogger("comictalker").setLevel(logging.DEBUG)
|
||||
|
||||
log_file = log_dir / "ComicTagger.log"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
stream_handler = logging.StreamHandler()
|
||||
file_handler = get_file_handler(log_file)
|
||||
|
||||
if verbose > 1:
|
||||
stream_handler.setLevel(logging.DEBUG)
|
||||
elif verbose > 0:
|
||||
stream_handler.setLevel(logging.INFO)
|
||||
else:
|
||||
stream_handler.setLevel(logging.WARNING)
|
||||
|
||||
logging.basicConfig(
|
||||
handlers=[stream_handler, file_handler],
|
||||
level=logging.WARNING,
|
||||
format="%(asctime)s | %(name)s | %(levelname)s | %(message)s",
|
||||
datefmt="%Y-%m-%dT%H:%M:%S",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"ComicTagger Version: %s running on: %s PyInstaller: %s",
|
||||
version,
|
||||
platform.system(),
|
||||
"Yes" if getattr(sys, "frozen", None) else "No",
|
||||
)
|
||||
@@ -1,41 +1,53 @@
|
||||
"""A PyQT4 dialog to a text file or log"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
#import sys
|
||||
#import os
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from comictaggerlib.ui import qtutils, ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogWindow(QtWidgets.QDialog):
|
||||
def __init__(self, parent: QtWidgets.QWidget) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
def __init__(self, parent):
|
||||
super(LogWindow, self).__init__(parent)
|
||||
with (ui_path / "logwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
uic.loadUi(ComicTaggerSettings.getUIFile('logwindow.ui'), self)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
|
||||
def setText(self, text):
|
||||
def set_text(self, text: str | bytes | None) -> None:
|
||||
try:
|
||||
text = text.decode()
|
||||
except:
|
||||
if text is not None:
|
||||
if isinstance(text, bytes):
|
||||
text = text.decode("utf-8")
|
||||
self.textEdit.setPlainText(text)
|
||||
except AttributeError:
|
||||
pass
|
||||
self.textEdit.setPlainText(text)
|
||||
except Exception as e:
|
||||
logger.exception("Displaying raw tags failed")
|
||||
qtutils.qt_error("Displaying raw tags failed:", e)
|
||||
|
||||
369
comictaggerlib/main.py
Executable file → Normal file
369
comictaggerlib/main.py
Executable file → Normal file
@@ -1,108 +1,323 @@
|
||||
"""A python app to (automatically) tag comic archives"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import locale
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import traceback
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from collections.abc import Collection
|
||||
from typing import cast
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
# Need to load setting before anything else
|
||||
SETTINGS = ComicTaggerSettings()
|
||||
import settngs
|
||||
|
||||
try:
|
||||
qt_available = True
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||
from .taggerwindow import TaggerWindow
|
||||
except ImportError as e:
|
||||
qt_available = False
|
||||
import comicapi.comicarchive
|
||||
import comicapi.utils
|
||||
import comictalker
|
||||
from comictaggerlib import cli, ctsettings
|
||||
from comictaggerlib.ctsettings import ct_ns, plugin_finder
|
||||
from comictaggerlib.ctversion import version
|
||||
from comictaggerlib.log import setup_logging
|
||||
from comictaggerlib.resulttypes import Action
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
import importlib_metadata
|
||||
else:
|
||||
import importlib.metadata as importlib_metadata
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
|
||||
|
||||
from . import utils
|
||||
from . import cli
|
||||
from .options import Options
|
||||
from .comicvinetalker import ComicVineTalker
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
def ctmain():
|
||||
opts = Options()
|
||||
opts.parseCmdLineArgs()
|
||||
|
||||
# manage the CV API key
|
||||
if opts.cv_api_key:
|
||||
if opts.cv_api_key != SETTINGS.cv_api_key:
|
||||
SETTINGS.cv_api_key = opts.cv_api_key
|
||||
SETTINGS.save()
|
||||
if opts.only_set_key:
|
||||
print("Key set")
|
||||
return
|
||||
def _lang_code_mac() -> str:
|
||||
"""
|
||||
stolen from https://github.com/mu-editor/mu
|
||||
Returns the user's language preference as defined in the Language & Region
|
||||
preference pane in macOS's System Preferences.
|
||||
"""
|
||||
|
||||
ComicVineTalker.api_key = SETTINGS.cv_api_key
|
||||
# Uses the shell command `defaults read -g AppleLocale` that prints out a
|
||||
# language code to standard output. Assumptions about the command:
|
||||
# - It exists and is in the shell's PATH.
|
||||
# - It accepts those arguments.
|
||||
# - It returns a usable language code.
|
||||
#
|
||||
# Reference documentation:
|
||||
# - The man page for the `defaults` command on macOS.
|
||||
# - The macOS underlying API:
|
||||
# https://developer.apple.com/documentation/foundation/nsuserdefaults.
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
lang_detect_command = "defaults read -g AppleLocale"
|
||||
|
||||
if not qt_available and not opts.no_gui:
|
||||
opts.no_gui = True
|
||||
print("PyQt5 is not available. ComicTagger is limited to command-line mode.", file=sys.stderr)
|
||||
|
||||
if opts.no_gui:
|
||||
cli.cli_mode(opts, SETTINGS)
|
||||
status, output = subprocess.getstatusoutput(lang_detect_command)
|
||||
if status == 0:
|
||||
# Command was successful.
|
||||
lang_code = output
|
||||
else:
|
||||
|
||||
os.environ['QT_AUTO_SCREEN_SCALE_FACTOR'] = '1'
|
||||
|
||||
#if platform.system() == "Darwin":
|
||||
# QtWidgets.QApplication.setStyle("macintosh")
|
||||
#else:
|
||||
# QtWidgets.QApplication.setStyle("Fusion")
|
||||
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
if platform.system() == "Darwin":
|
||||
# Set the MacOS dock icon
|
||||
app.setWindowIcon(
|
||||
QtGui.QIcon(ComicTaggerSettings.getGraphic('app.png')))
|
||||
logging.warning("Language detection command failed: %r", output)
|
||||
lang_code = ""
|
||||
|
||||
if platform.system() == "Windows":
|
||||
# For pure python, tell windows that we're not python,
|
||||
# so we can have our own taskbar icon
|
||||
import ctypes
|
||||
myappid = u'comictagger' # arbitrary string
|
||||
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
|
||||
return lang_code
|
||||
|
||||
if platform.system() != "Linux":
|
||||
img = QtGui.QPixmap(ComicTaggerSettings.getGraphic('tags.png'))
|
||||
|
||||
splash = QtWidgets.QSplashScreen(img)
|
||||
splash.show()
|
||||
splash.raise_()
|
||||
app.processEvents()
|
||||
def configure_locale() -> None:
|
||||
if sys.platform == "darwin" and "LANG" not in os.environ:
|
||||
code = _lang_code_mac()
|
||||
if code != "":
|
||||
os.environ["LANG"] = f"{code}.utf-8"
|
||||
|
||||
locale.setlocale(locale.LC_ALL, "")
|
||||
sys.stdout.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
sys.stderr.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
sys.stdin.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
|
||||
|
||||
def update_publishers(config: settngs.Config[ct_ns]) -> None:
|
||||
json_file = config[0].Runtime_Options__config.user_config_dir / "publishers.json"
|
||||
if json_file.exists():
|
||||
try:
|
||||
comicapi.utils.update_publishers(json.loads(json_file.read_text("utf-8")))
|
||||
except Exception as e:
|
||||
logger.exception("Failed to load publishers from %s: %s", json_file, e)
|
||||
|
||||
|
||||
class App:
|
||||
"""docstring for App"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.config: settngs.Config[ct_ns]
|
||||
self.initial_arg_parser = ctsettings.initial_commandline_parser()
|
||||
self.config_load_success = False
|
||||
self.talkers: dict[str, ComicTalker]
|
||||
|
||||
def run(self) -> None:
|
||||
configure_locale()
|
||||
conf = self.initialize()
|
||||
self.initialize_dirs(conf.config)
|
||||
self.load_plugins(conf)
|
||||
self.register_settings()
|
||||
self.config = self.parse_settings(conf.config)
|
||||
|
||||
self.main()
|
||||
|
||||
def load_plugins(self, opts: argparse.Namespace) -> None:
|
||||
local_plugins = plugin_finder.find_plugins(opts.config.user_plugin_dir)
|
||||
self._extend_plugin_paths(local_plugins)
|
||||
|
||||
comicapi.comicarchive.load_archive_plugins(local_plugins=[p.entry_point for p in local_plugins.archivers])
|
||||
comicapi.comicarchive.load_metadata_plugins(
|
||||
version=version, local_plugins=[p.entry_point for p in local_plugins.metadata]
|
||||
)
|
||||
self.talkers = comictalker.get_talkers(
|
||||
version, opts.config.user_cache_dir, local_plugins=[p.entry_point for p in local_plugins.talkers]
|
||||
)
|
||||
|
||||
def _extend_plugin_paths(self, plugins: plugin_finder.Plugins) -> None:
|
||||
sys.path.extend(str(p.path.absolute()) for p in plugins.all_plugins())
|
||||
|
||||
def list_plugins(
|
||||
self,
|
||||
talkers: Collection[comictalker.ComicTalker],
|
||||
archivers: Collection[type[comicapi.comicarchive.Archiver]],
|
||||
metadata_styles: Collection[comicapi.comicarchive.Metadata],
|
||||
) -> None:
|
||||
if self.config[0].Runtime_Options__json:
|
||||
for talker in talkers:
|
||||
print( # noqa: T201
|
||||
json.dumps(
|
||||
{
|
||||
"type": "talker",
|
||||
"id": talker.id,
|
||||
"name": talker.name,
|
||||
"website": talker.website,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
for archiver in archivers:
|
||||
try:
|
||||
a = archiver()
|
||||
print( # noqa: T201
|
||||
json.dumps(
|
||||
{
|
||||
"type": "archiver",
|
||||
"enabled": a.enabled,
|
||||
"name": a.name(),
|
||||
"extension": a.extension(),
|
||||
"exe": a.exe,
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
print( # noqa: T201
|
||||
json.dumps(
|
||||
{
|
||||
"type": "archiver",
|
||||
"enabled": archiver.enabled,
|
||||
"name": "",
|
||||
"extension": "",
|
||||
"exe": archiver.exe,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
for style in metadata_styles:
|
||||
print( # noqa: T201
|
||||
json.dumps(
|
||||
{
|
||||
"type": "metadata",
|
||||
"enabled": style.enabled,
|
||||
"name": style.name(),
|
||||
"short_name": style.short_name,
|
||||
}
|
||||
)
|
||||
)
|
||||
else:
|
||||
print("Metadata Sources: (ID: Name, URL)") # noqa: T201
|
||||
for talker in talkers:
|
||||
print(f"{talker.id:<10}: {talker.name:<21}, {talker.website}") # noqa: T201
|
||||
|
||||
print("\nComic Archive: (Name: extension, exe)") # noqa: T201
|
||||
for archiver in archivers:
|
||||
a = archiver()
|
||||
print(f"{a.name():<10}: {a.extension():<5}, {a.exe}") # noqa: T201
|
||||
|
||||
print("\nMetadata Style: (Short Name: Name)") # noqa: T201
|
||||
for style in metadata_styles:
|
||||
print(f"{style.short_name:<10}: {style.name()}") # noqa: T201
|
||||
|
||||
def initialize(self) -> argparse.Namespace:
|
||||
conf, _ = self.initial_arg_parser.parse_known_intermixed_args()
|
||||
|
||||
assert conf is not None
|
||||
setup_logging(conf.verbose, conf.config.user_log_dir)
|
||||
return conf
|
||||
|
||||
def register_settings(self) -> None:
|
||||
self.manager = settngs.Manager(
|
||||
description="A utility for reading and writing metadata to comic archives.\n\n\n"
|
||||
+ "If no options are given, %(prog)s will run in windowed mode.\nPlease keep the '-v' option separated '-so -v' not '-sov'",
|
||||
epilog="For more help visit the wiki at: https://github.com/comictagger/comictagger/wiki",
|
||||
)
|
||||
ctsettings.register_commandline_settings(self.manager)
|
||||
ctsettings.register_file_settings(self.manager)
|
||||
ctsettings.register_plugin_settings(self.manager, getattr(self, "talkers", {}))
|
||||
|
||||
def parse_settings(self, config_paths: ctsettings.ComicTaggerPaths, *args: str) -> settngs.Config[ct_ns]:
|
||||
cfg, self.config_load_success = ctsettings.parse_config(
|
||||
self.manager, config_paths.user_config_dir / "settings.json", list(args) or None
|
||||
)
|
||||
config = cast(settngs.Config[ct_ns], self.manager.get_namespace(cfg, file=True, cmdline=True))
|
||||
config[0].Runtime_Options__config = config_paths
|
||||
|
||||
config = ctsettings.validate_commandline_settings(config, self.manager)
|
||||
config = ctsettings.validate_file_settings(config)
|
||||
config = ctsettings.validate_plugin_settings(config, getattr(self, "talkers", {}))
|
||||
return config
|
||||
|
||||
def initialize_dirs(self, paths: ctsettings.ComicTaggerPaths) -> None:
|
||||
paths.user_config_dir.mkdir(parents=True, exist_ok=True)
|
||||
paths.user_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
paths.user_log_dir.mkdir(parents=True, exist_ok=True)
|
||||
paths.user_plugin_dir.mkdir(parents=True, exist_ok=True)
|
||||
logger.debug("user_config_dir: %s", paths.user_config_dir)
|
||||
logger.debug("user_cache_dir: %s", paths.user_cache_dir)
|
||||
logger.debug("user_log_dir: %s", paths.user_log_dir)
|
||||
logger.debug("user_plugin_dir: %s", paths.user_plugin_dir)
|
||||
|
||||
def main(self) -> None:
|
||||
assert self.config is not None
|
||||
# config already loaded
|
||||
error = None
|
||||
|
||||
if self.config[0].General__disable_cr:
|
||||
if "cr" in comicapi.comicarchive.metadata_styles:
|
||||
del comicapi.comicarchive.metadata_styles["cr"]
|
||||
|
||||
if len(self.talkers) < 1:
|
||||
error = error = (
|
||||
"Failed to load any talkers, please re-install and check the log located in '"
|
||||
+ str(self.config[0].Runtime_Options__config.user_log_dir)
|
||||
+ "' for more details",
|
||||
True,
|
||||
)
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
logger.debug("Installed Packages")
|
||||
for pkg in sorted(importlib_metadata.distributions(), key=lambda x: x.name):
|
||||
logger.debug("%s\t%s", pkg.metadata["Name"], pkg.metadata["Version"])
|
||||
|
||||
comicapi.utils.load_publishers()
|
||||
update_publishers(self.config)
|
||||
|
||||
if self.config[0].Commands__command == Action.list_plugins:
|
||||
self.list_plugins(
|
||||
list(self.talkers.values()),
|
||||
comicapi.comicarchive.archivers,
|
||||
comicapi.comicarchive.metadata_styles.values(),
|
||||
)
|
||||
return
|
||||
|
||||
if self.config[0].Commands__command == Action.save_config:
|
||||
if self.config_load_success:
|
||||
settings_path = self.config[0].Runtime_Options__config.user_config_dir / "settings.json"
|
||||
if self.config_load_success:
|
||||
ctsettings.save_file(self.config, settings_path)
|
||||
print("Settings saved") # noqa: T201
|
||||
return
|
||||
|
||||
if not self.config_load_success:
|
||||
error = (
|
||||
"Failed to load settings, check the log located in '"
|
||||
+ str(self.config[0].Runtime_Options__config.user_log_dir)
|
||||
+ "' for more details",
|
||||
True,
|
||||
)
|
||||
|
||||
if not self.config[0].Runtime_Options__no_gui:
|
||||
try:
|
||||
from comictaggerlib import gui
|
||||
|
||||
if not gui.qt_available:
|
||||
raise gui.import_error
|
||||
return gui.open_tagger_window(self.talkers, self.config, error)
|
||||
except ImportError:
|
||||
self.config[0].Runtime_Options__no_gui = True
|
||||
logger.warning("PyQt5 is not available. ComicTagger is limited to command-line mode.")
|
||||
|
||||
# GUI mode is not available or CLI mode was requested
|
||||
if error and error[1]:
|
||||
print(f"A fatal error occurred please check the log for more information: {error[0]}") # noqa: T201
|
||||
raise SystemExit(1)
|
||||
|
||||
try:
|
||||
tagger_window = TaggerWindow(opts.file_list, SETTINGS, opts=opts)
|
||||
tagger_window.setWindowIcon(
|
||||
QtGui.QIcon(ComicTaggerSettings.getGraphic('app.png')))
|
||||
tagger_window.show()
|
||||
raise SystemExit(cli.CLI(self.config[0], self.talkers).run())
|
||||
except Exception:
|
||||
logger.exception("CLI mode failed")
|
||||
|
||||
if platform.system() != "Linux":
|
||||
splash.finish(tagger_window)
|
||||
|
||||
sys.exit(app.exec_())
|
||||
except Exception as e:
|
||||
QtWidgets.QMessageBox.critical(
|
||||
QtWidgets.QMainWindow(),
|
||||
"Error",
|
||||
"Unhandled exception in app:\n" +
|
||||
traceback.format_exc())
|
||||
def main() -> None:
|
||||
App().run()
|
||||
|
||||
@@ -1,160 +1,166 @@
|
||||
"""A PyQT4 dialog to select from automated issue matches"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
#import sys
|
||||
|
||||
from PyQt5 import QtCore, QtGui, QtWidgets, uic
|
||||
#from PyQt5.QtCore import QUrl, pyqtSignal, QByteArray
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
|
||||
from .settings import ComicTaggerSettings
|
||||
from .coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ui.qtutils import reduceWidgetFontSize
|
||||
#from imagefetcher import ImageFetcher
|
||||
#from comicarchive import MetaDataStyle
|
||||
#from comicvinetalker import ComicVineTalker
|
||||
#import utils
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comictaggerlib.coverimagewidget import CoverImageWidget
|
||||
from comictaggerlib.ctsettings import ct_ns
|
||||
from comictaggerlib.resulttypes import IssueResult
|
||||
from comictaggerlib.ui import ui_path
|
||||
from comictaggerlib.ui.qtutils import reduce_widget_font_size
|
||||
from comictalker.comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MatchSelectionWindow(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self,
|
||||
parent: QtWidgets.QWidget,
|
||||
matches: list[IssueResult],
|
||||
comic_archive: ComicArchive,
|
||||
config: ct_ns,
|
||||
talker: ComicTalker,
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
volume_id = 0
|
||||
|
||||
def __init__(self, parent, matches, comic_archive):
|
||||
super(MatchSelectionWindow, self).__init__(parent)
|
||||
|
||||
uic.loadUi(
|
||||
ComicTaggerSettings.getUIFile('matchselectionwindow.ui'), self)
|
||||
with (ui_path / "matchselectionwindow.ui").open(encoding="utf-8") as uifile:
|
||||
uic.loadUi(uifile, self)
|
||||
|
||||
self.altCoverWidget = CoverImageWidget(
|
||||
self.altCoverContainer, CoverImageWidget.AltCoverMode)
|
||||
self.altCoverContainer, CoverImageWidget.AltCoverMode, config.Runtime_Options__config.user_cache_dir, talker
|
||||
)
|
||||
gridlayout = QtWidgets.QGridLayout(self.altCoverContainer)
|
||||
gridlayout.addWidget(self.altCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
self.archiveCoverWidget = CoverImageWidget(
|
||||
self.archiveCoverContainer, CoverImageWidget.ArchiveMode)
|
||||
self.archiveCoverWidget = CoverImageWidget(self.archiveCoverContainer, CoverImageWidget.ArchiveMode, None, None)
|
||||
gridlayout = QtWidgets.QGridLayout(self.archiveCoverContainer)
|
||||
gridlayout.addWidget(self.archiveCoverWidget)
|
||||
gridlayout.setContentsMargins(0, 0, 0, 0)
|
||||
|
||||
reduceWidgetFontSize(self.twList)
|
||||
reduceWidgetFontSize(self.teDescription, 1)
|
||||
reduce_widget_font_size(self.twList)
|
||||
reduce_widget_font_size(self.teDescription, 1)
|
||||
|
||||
self.setWindowFlags(self.windowFlags() |
|
||||
QtCore.Qt.WindowSystemMenuHint |
|
||||
QtCore.Qt.WindowMaximizeButtonHint)
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowType(
|
||||
self.windowFlags()
|
||||
| QtCore.Qt.WindowType.WindowSystemMenuHint
|
||||
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
|
||||
)
|
||||
)
|
||||
|
||||
self.matches = matches
|
||||
self.matches: list[IssueResult] = matches
|
||||
self.comic_archive = comic_archive
|
||||
|
||||
self.twList.currentItemChanged.connect(self.currentItemChanged)
|
||||
self.twList.cellDoubleClicked.connect(self.cellDoubleClicked)
|
||||
self.twList.currentItemChanged.connect(self.current_item_changed)
|
||||
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
|
||||
|
||||
self.updateData()
|
||||
self.update_data()
|
||||
|
||||
def updateData(self):
|
||||
|
||||
self.setCoverImage()
|
||||
self.populateTable()
|
||||
def update_data(self) -> None:
|
||||
self.set_cover_image()
|
||||
self.populate_table()
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.selectRow(0)
|
||||
|
||||
path = self.comic_archive.path
|
||||
self.setWindowTitle("Select correct match: {0}".format(
|
||||
os.path.split(path)[1]))
|
||||
self.setWindowTitle(f"Select correct match: {os.path.split(path)[1]}")
|
||||
|
||||
def populateTable(self):
|
||||
|
||||
while self.twList.rowCount() > 0:
|
||||
self.twList.removeRow(0)
|
||||
def populate_table(self) -> None:
|
||||
self.twList.setRowCount(0)
|
||||
|
||||
self.twList.setSortingEnabled(False)
|
||||
|
||||
row = 0
|
||||
for match in self.matches:
|
||||
for row, match in enumerate(self.matches):
|
||||
self.twList.insertRow(row)
|
||||
|
||||
item_text = match['series']
|
||||
item_text = match.series
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.UserRole, (match,))
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 0, item)
|
||||
|
||||
if match['publisher'] is not None:
|
||||
item_text = "{0}".format(match['publisher'])
|
||||
if match.publisher is not None:
|
||||
item_text = str(match.publisher)
|
||||
else:
|
||||
item_text = "Unknown"
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 1, item)
|
||||
|
||||
month_str = ""
|
||||
year_str = "????"
|
||||
if match['month'] is not None:
|
||||
month_str = "-{0:02d}".format(int(match['month']))
|
||||
if match['year'] is not None:
|
||||
year_str = "{0}".format(match['year'])
|
||||
if match.month is not None:
|
||||
month_str = f"-{int(match.month):02d}"
|
||||
if match.year is not None:
|
||||
year_str = str(match.year)
|
||||
|
||||
item_text = year_str + month_str
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 2, item)
|
||||
|
||||
item_text = match['issue_title']
|
||||
item_text = match.issue_title
|
||||
if item_text is None:
|
||||
item_text = ""
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twList.setItem(row, 3, item)
|
||||
|
||||
row += 1
|
||||
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.setSortingEnabled(True)
|
||||
self.twList.sortItems(2, QtCore.Qt.AscendingOrder)
|
||||
self.twList.sortItems(2, QtCore.Qt.SortOrder.AscendingOrder)
|
||||
self.twList.selectRow(0)
|
||||
self.twList.resizeColumnsToContents()
|
||||
self.twList.horizontalHeader().setStretchLastSection(True)
|
||||
|
||||
def cellDoubleClicked(self, r, c):
|
||||
def cell_double_clicked(self, r: int, c: int) -> None:
|
||||
self.accept()
|
||||
|
||||
def currentItemChanged(self, curr, prev):
|
||||
|
||||
def current_item_changed(self, curr: QtCore.QModelIndex, prev: QtCore.QModelIndex) -> None:
|
||||
if curr is None:
|
||||
return
|
||||
if prev is not None and prev.row() == curr.row():
|
||||
return
|
||||
|
||||
self.altCoverWidget.setIssueID(self.currentMatch()['issue_id'])
|
||||
if self.currentMatch()['description'] is None:
|
||||
match = self.current_match()
|
||||
self.altCoverWidget.set_issue_details(
|
||||
match.issue_id,
|
||||
[match.image_url, *match.alt_image_urls],
|
||||
)
|
||||
if match.description is None:
|
||||
self.teDescription.setText("")
|
||||
else:
|
||||
self.teDescription.setText(self.currentMatch()['description'])
|
||||
self.teDescription.setText(match.description)
|
||||
|
||||
def setCoverImage(self):
|
||||
self.archiveCoverWidget.setArchive(self.comic_archive)
|
||||
def set_cover_image(self) -> None:
|
||||
self.archiveCoverWidget.set_archive(self.comic_archive)
|
||||
|
||||
def currentMatch(self):
|
||||
def current_match(self) -> IssueResult:
|
||||
row = self.twList.currentRow()
|
||||
match = self.twList.item(row, 0).data(
|
||||
QtCore.Qt.UserRole)[0]
|
||||
match: IssueResult = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)[0]
|
||||
return match
|
||||
|
||||
@@ -6,113 +6,115 @@ checked = OptionalMessageDialog.msg(self, "Disclaimer",
|
||||
"This is beta software, and you are using it at your own risk!",
|
||||
)
|
||||
|
||||
said_yes, checked = OptionalMessageDialog.question(self, "Question",
|
||||
said_yes, checked = OptionalMessageDialog.question(self, "QtWidgets.Question",
|
||||
"Are you sure you wish to do this?",
|
||||
)
|
||||
"""
|
||||
|
||||
# Copyright 2012-2014 Anthony Beville
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
from PyQt5.QtCore import *
|
||||
from PyQt5.QtGui import *
|
||||
from PyQt5.QtWidgets import *
|
||||
import logging
|
||||
|
||||
from PyQt5 import QtCore, QtWidgets
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
StyleMessage = 0
|
||||
StyleQuestion = 1
|
||||
|
||||
|
||||
class OptionalMessageDialog(QDialog):
|
||||
|
||||
def __init__(self, parent, style, title, msg,
|
||||
check_state=Qt.Unchecked, check_text=None):
|
||||
QDialog.__init__(self, parent)
|
||||
class OptionalMessageDialog(QtWidgets.QDialog):
|
||||
def __init__(
|
||||
self, parent: QtWidgets.QWidget, style: int, title: str, msg: str, checked: bool = False, check_text: str = ""
|
||||
) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
self.setWindowTitle(title)
|
||||
self.was_accepted = False
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
|
||||
l = QVBoxLayout(self)
|
||||
|
||||
self.theLabel = QLabel(msg)
|
||||
self.theLabel = QtWidgets.QLabel(msg)
|
||||
self.theLabel.setWordWrap(True)
|
||||
self.theLabel.setTextFormat(Qt.RichText)
|
||||
self.theLabel.setTextFormat(QtCore.Qt.TextFormat.RichText)
|
||||
self.theLabel.setOpenExternalLinks(True)
|
||||
self.theLabel.setTextInteractionFlags(
|
||||
Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse | Qt.LinksAccessibleByKeyboard)
|
||||
QtCore.Qt.TextInteractionFlag.TextSelectableByMouse
|
||||
| QtCore.Qt.TextInteractionFlag.LinksAccessibleByMouse
|
||||
| QtCore.Qt.TextInteractionFlag.LinksAccessibleByKeyboard
|
||||
)
|
||||
|
||||
l.addWidget(self.theLabel)
|
||||
l.insertSpacing(-1, 10)
|
||||
layout.addWidget(self.theLabel)
|
||||
layout.insertSpacing(-1, 10)
|
||||
|
||||
if check_text is None:
|
||||
if not check_text:
|
||||
if style == StyleQuestion:
|
||||
check_text = "Remember this answer"
|
||||
else:
|
||||
check_text = "Don't show this message again"
|
||||
|
||||
self.theCheckBox = QCheckBox(check_text)
|
||||
self.theCheckBox = QtWidgets.QCheckBox(check_text)
|
||||
|
||||
self.theCheckBox.setCheckState(check_state)
|
||||
self.theCheckBox.setChecked(checked)
|
||||
|
||||
l.addWidget(self.theCheckBox)
|
||||
layout.addWidget(self.theCheckBox)
|
||||
|
||||
btnbox_style = QDialogButtonBox.Ok
|
||||
btnbox_style: QtWidgets.QDialogButtonBox.StandardButtons | QtWidgets.QDialogButtonBox.StandardButton
|
||||
if style == StyleQuestion:
|
||||
btnbox_style = QDialogButtonBox.Yes | QDialogButtonBox.No
|
||||
btnbox_style = QtWidgets.QDialogButtonBox.StandardButton.Yes | QtWidgets.QDialogButtonBox.StandardButton.No
|
||||
else:
|
||||
btnbox_style = QtWidgets.QDialogButtonBox.StandardButton.Ok
|
||||
|
||||
self.theButtonBox = QDialogButtonBox(
|
||||
btnbox_style,
|
||||
parent=self,
|
||||
accepted=self.accept,
|
||||
rejected=self.reject)
|
||||
self.theButtonBox = QtWidgets.QDialogButtonBox(btnbox_style, parent=self)
|
||||
self.theButtonBox.accepted.connect(self.accept)
|
||||
self.theButtonBox.rejected.connect(self.reject)
|
||||
|
||||
l.addWidget(self.theButtonBox)
|
||||
layout.addWidget(self.theButtonBox)
|
||||
|
||||
def accept(self):
|
||||
def accept(self) -> None:
|
||||
self.was_accepted = True
|
||||
QDialog.accept(self)
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
def reject(self):
|
||||
def reject(self) -> None:
|
||||
self.was_accepted = False
|
||||
QDialog.reject(self)
|
||||
QtWidgets.QDialog.reject(self)
|
||||
|
||||
@staticmethod
|
||||
def msg(parent, title, msg, check_state=Qt.Unchecked, check_text=None):
|
||||
def msg(parent: QtWidgets.QWidget, title: str, msg: str, checked: bool = False, check_text: str = "") -> bool:
|
||||
d = OptionalMessageDialog(parent, StyleMessage, title, msg, checked=checked, check_text=check_text)
|
||||
|
||||
d = OptionalMessageDialog(
|
||||
parent,
|
||||
StyleMessage,
|
||||
title,
|
||||
msg,
|
||||
check_state=check_state,
|
||||
check_text=check_text)
|
||||
|
||||
d.exec_()
|
||||
d.exec()
|
||||
return d.theCheckBox.isChecked()
|
||||
|
||||
@staticmethod
|
||||
def question(
|
||||
parent, title, msg, check_state=Qt.Unchecked, check_text=None):
|
||||
parent: QtWidgets.QWidget, title: str, msg: str, checked: bool = False, check_text: str = ""
|
||||
) -> tuple[bool, bool]:
|
||||
d = OptionalMessageDialog(parent, StyleQuestion, title, msg, checked=checked, check_text=check_text)
|
||||
|
||||
d = OptionalMessageDialog(
|
||||
parent,
|
||||
StyleQuestion,
|
||||
title,
|
||||
msg,
|
||||
check_state=check_state,
|
||||
check_text=check_text)
|
||||
|
||||
d.exec_()
|
||||
d.exec()
|
||||
|
||||
return d.was_accepted, d.theCheckBox.isChecked()
|
||||
|
||||
@staticmethod
|
||||
def msg_no_checkbox(
|
||||
parent: QtWidgets.QWidget, title: str, msg: str, checked: bool = False, check_text: str = ""
|
||||
) -> bool:
|
||||
d = OptionalMessageDialog(parent, StyleMessage, title, msg, checked=checked, check_text=check_text)
|
||||
d.theCheckBox.hide()
|
||||
|
||||
d.exec()
|
||||
return d.theCheckBox.isChecked()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user