Compare commits
No commits in common. "c9de8370c24fa9435b43673014e4e8280dc74e5a" and "d089c4bb6a8c3dd0239e1c101c3180f895987914" have entirely different histories.
c9de8370c2
...
d089c4bb6a
4
.github/workflows/build.yaml
vendored
4
.github/workflows/build.yaml
vendored
@ -47,7 +47,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-22.04, macos-13, windows-latest]
|
||||
os: [ubuntu-latest, macos-12, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -70,7 +70,7 @@ jobs:
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2 desktop-file-utils
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build and install PyPi packages
|
||||
|
4
.github/workflows/package.yaml
vendored
4
.github/workflows/package.yaml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.9]
|
||||
os: [ubuntu-22.04, macos-13, windows-latest]
|
||||
os: [ubuntu-latest, macos-12, windows-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
- name: Install linux dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2 desktop-file-utils
|
||||
sudo apt-get update && sudo apt-get upgrade && sudo apt-get install pkg-config libicu-dev libqt5gui5 libfuse2
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Build, Install and Test PyPi packages
|
||||
|
@ -1,7 +1,7 @@
|
||||
exclude: ^(scripts|comictaggerlib/graphics/resources.py)
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
@ -10,11 +10,11 @@ repos:
|
||||
- id: name-tests-test
|
||||
- id: requirements-txt-fixer
|
||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||
rev: v2.7.0
|
||||
rev: v2.5.0
|
||||
hooks:
|
||||
- id: setup-cfg-fmt
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.18.0
|
||||
rev: v3.17.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py39-plus]
|
||||
@ -33,12 +33,12 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.1.1
|
||||
rev: 7.1.0
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-encodings, flake8-builtins, flake8-print, flake8-no-nested-comprehensions]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.11.2
|
||||
rev: v1.11.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies: [types-setuptools, types-requests, settngs>=0.10.4]
|
||||
|
@ -10,7 +10,7 @@ import comictaggerlib.main
|
||||
def generate() -> str:
|
||||
app = comictaggerlib.main.App()
|
||||
app.load_plugins(app.initial_arg_parser.parse_known_args()[0])
|
||||
app.register_settings(True)
|
||||
app.register_settings()
|
||||
imports, types = settngs.generate_dict(app.manager.definitions)
|
||||
imports2, types2 = settngs.generate_ns(app.manager.definitions)
|
||||
i = imports.splitlines()
|
||||
|
@ -27,7 +27,7 @@ if opts.APPIMAGETOOL.exists():
|
||||
raise SystemExit(0)
|
||||
|
||||
urlretrieve(
|
||||
"https://github.com/AppImage/appimagetool/releases/latest/download/appimagetool-x86_64.AppImage", opts.APPIMAGETOOL
|
||||
"https://github.com/AppImage/AppImageKit/releases/latest/download/appimagetool-x86_64.AppImage", opts.APPIMAGETOOL
|
||||
)
|
||||
os.chmod(opts.APPIMAGETOOL, 0o0700)
|
||||
|
||||
|
@ -4,6 +4,7 @@ import logging
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import struct
|
||||
import tempfile
|
||||
import zipfile
|
||||
from typing import cast
|
||||
@ -46,7 +47,7 @@ class ZipArchiver(Archiver):
|
||||
try:
|
||||
data = zf.read(archive_file)
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
logger.exception("Error reading zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
logger.error("Error reading zip archive [%s]: %s :: %s", e, self.path, archive_file)
|
||||
raise
|
||||
return data
|
||||
|
||||
@ -58,13 +59,13 @@ class ZipArchiver(Archiver):
|
||||
# zip archive w/o the indicated file. Very sucky, but maybe
|
||||
# another solution can be found
|
||||
files = self.get_filename_list()
|
||||
if archive_file in files:
|
||||
if not self.rebuild([archive_file]):
|
||||
return False
|
||||
|
||||
try:
|
||||
# now just add the archive file as a new one
|
||||
with zipfile.ZipFile(self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED) as zf:
|
||||
_patch_zipfile(zf)
|
||||
if archive_file in files:
|
||||
zf.remove(archive_file) # type: ignore
|
||||
zf.writestr(archive_file, data)
|
||||
return True
|
||||
except (zipfile.BadZipfile, OSError) as e:
|
||||
@ -124,7 +125,7 @@ class ZipArchiver(Archiver):
|
||||
# preserve the old comment
|
||||
comment = other_archive.get_comment()
|
||||
if comment is not None:
|
||||
if not self.set_comment(comment):
|
||||
if not self.write_zip_comment(self.path, comment):
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Error while copying to zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
|
||||
@ -143,106 +144,61 @@ class ZipArchiver(Archiver):
|
||||
|
||||
@classmethod
|
||||
def is_valid(cls, path: pathlib.Path) -> bool:
|
||||
if not zipfile.is_zipfile(path): # only checks central directory ot the end of the archive
|
||||
return False
|
||||
return zipfile.is_zipfile(path)
|
||||
|
||||
def write_zip_comment(self, filename: pathlib.Path | str, comment: str) -> bool:
|
||||
"""
|
||||
This is a custom function for writing a comment to a zip file,
|
||||
since the built-in one doesn't seem to work on Windows and Mac OS/X
|
||||
|
||||
Fortunately, the zip comment is at the end of the file, and it's
|
||||
easy to manipulate. See this website for more info:
|
||||
see: http://en.wikipedia.org/wiki/Zip_(file_format)#Structure
|
||||
"""
|
||||
|
||||
# get file size
|
||||
statinfo = os.stat(filename)
|
||||
file_length = statinfo.st_size
|
||||
|
||||
try:
|
||||
# test all the files in the zip. adds about 0.1 to execution time per zip
|
||||
with zipfile.ZipFile(path) as zf:
|
||||
for zipinfo in zf.filelist:
|
||||
zf.open(zipinfo).close()
|
||||
with open(filename, mode="r+b") as file:
|
||||
# the starting position, relative to EOF
|
||||
pos = -4
|
||||
found = False
|
||||
|
||||
# walk backwards to find the "End of Central Directory" record
|
||||
while (not found) and (-pos != file_length):
|
||||
# seek, relative to EOF
|
||||
file.seek(pos, 2)
|
||||
value = file.read(4)
|
||||
|
||||
# look for the end of central directory signature
|
||||
if bytearray(value) == bytearray([0x50, 0x4B, 0x05, 0x06]):
|
||||
found = True
|
||||
else:
|
||||
# not found, step back another byte
|
||||
pos = pos - 1
|
||||
|
||||
if found:
|
||||
# now skip forward 20 bytes to the comment length word
|
||||
pos += 20
|
||||
file.seek(pos, 2)
|
||||
|
||||
# Pack the length of the comment string
|
||||
fmt = "H" # one 2-byte integer
|
||||
comment_length = struct.pack(fmt, len(comment)) # pack integer in a binary string
|
||||
|
||||
# write out the length
|
||||
file.write(comment_length)
|
||||
file.seek(pos + 2, 2)
|
||||
|
||||
# write out the comment itself
|
||||
file.write(comment.encode("utf-8"))
|
||||
file.truncate()
|
||||
else:
|
||||
raise Exception("Could not find the End of Central Directory record!")
|
||||
except Exception as e:
|
||||
logger.error("Error writing comment to zip archive [%s]: %s", e, self.path)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _patch_zipfile(zf): # type: ignore
|
||||
zf.remove = _zip_remove.__get__(zf, zipfile.ZipFile)
|
||||
zf._remove_members = _zip_remove_members.__get__(zf, zipfile.ZipFile)
|
||||
|
||||
|
||||
def _zip_remove(self, zinfo_or_arcname): # type: ignore
|
||||
"""Remove a member from the archive."""
|
||||
|
||||
if self.mode not in ("w", "x", "a"):
|
||||
raise ValueError("remove() requires mode 'w', 'x', or 'a'")
|
||||
if not self.fp:
|
||||
raise ValueError("Attempt to write to ZIP archive that was already closed")
|
||||
if self._writing:
|
||||
raise ValueError("Can't write to ZIP archive while an open writing handle exists")
|
||||
|
||||
# Make sure we have an existing info object
|
||||
if isinstance(zinfo_or_arcname, zipfile.ZipInfo):
|
||||
zinfo = zinfo_or_arcname
|
||||
# make sure zinfo exists
|
||||
if zinfo not in self.filelist:
|
||||
raise KeyError("There is no item %r in the archive" % zinfo_or_arcname)
|
||||
else:
|
||||
# get the info object
|
||||
zinfo = self.getinfo(zinfo_or_arcname)
|
||||
|
||||
return self._remove_members({zinfo})
|
||||
|
||||
|
||||
def _zip_remove_members(self, members, *, remove_physical=True, chunk_size=2**20): # type: ignore
|
||||
"""Remove members in a zip file.
|
||||
All members (as zinfo) should exist in the zip; otherwise the zip file
|
||||
will erroneously end in an inconsistent state.
|
||||
"""
|
||||
fp = self.fp
|
||||
entry_offset = 0
|
||||
member_seen = False
|
||||
|
||||
# get a sorted filelist by header offset, in case the dir order
|
||||
# doesn't match the actual entry order
|
||||
filelist = sorted(self.filelist, key=lambda x: x.header_offset)
|
||||
for i in range(len(filelist)):
|
||||
info = filelist[i]
|
||||
is_member = info in members
|
||||
|
||||
if not (member_seen or is_member):
|
||||
continue
|
||||
|
||||
# get the total size of the entry
|
||||
try:
|
||||
offset = filelist[i + 1].header_offset
|
||||
except IndexError:
|
||||
offset = self.start_dir
|
||||
entry_size = offset - info.header_offset
|
||||
|
||||
if is_member:
|
||||
member_seen = True
|
||||
entry_offset += entry_size
|
||||
|
||||
# update caches
|
||||
self.filelist.remove(info)
|
||||
try:
|
||||
del self.NameToInfo[info.filename]
|
||||
except KeyError:
|
||||
pass
|
||||
continue
|
||||
|
||||
# update the header and move entry data to the new position
|
||||
if remove_physical:
|
||||
old_header_offset = info.header_offset
|
||||
info.header_offset -= entry_offset
|
||||
read_size = 0
|
||||
while read_size < entry_size:
|
||||
fp.seek(old_header_offset + read_size)
|
||||
data = fp.read(min(entry_size - read_size, chunk_size))
|
||||
fp.seek(info.header_offset + read_size)
|
||||
fp.write(data)
|
||||
fp.flush()
|
||||
read_size += len(data)
|
||||
|
||||
# Avoid missing entry if entries have a duplicated name.
|
||||
# Reverse the order as NameToInfo normally stores the last added one.
|
||||
for info in reversed(self.filelist):
|
||||
self.NameToInfo.setdefault(info.filename, info)
|
||||
|
||||
# update state
|
||||
if remove_physical:
|
||||
self.start_dir -= entry_offset
|
||||
self._didModify = True
|
||||
|
||||
# seek to the start of the central dir
|
||||
fp.seek(self.start_dir)
|
||||
|
@ -23,7 +23,9 @@ import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver, UnknownArchiver, ZipArchiver
|
||||
@ -31,13 +33,16 @@ from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.tags import Tag
|
||||
from comictaggerlib.ctversion import version
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from importlib.metadata import EntryPoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
archivers: list[type[Archiver]] = []
|
||||
tags: dict[str, Tag] = {}
|
||||
|
||||
|
||||
def load_archive_plugins(local_plugins: Iterable[type[Archiver]] = tuple()) -> None:
|
||||
def load_archive_plugins(local_plugins: Iterable[EntryPoint] = tuple()) -> None:
|
||||
if archivers:
|
||||
return
|
||||
if sys.version_info < (3, 10):
|
||||
@ -48,7 +53,7 @@ def load_archive_plugins(local_plugins: Iterable[type[Archiver]] = tuple()) -> N
|
||||
archive_plugins: list[type[Archiver]] = []
|
||||
# A list is used first matching plugin wins
|
||||
|
||||
for ep in itertools.chain(entry_points(group="comicapi.archiver")):
|
||||
for ep in itertools.chain(local_plugins, entry_points(group="comicapi.archiver")):
|
||||
try:
|
||||
spec = importlib.util.find_spec(ep.module)
|
||||
except ValueError:
|
||||
@ -66,12 +71,11 @@ def load_archive_plugins(local_plugins: Iterable[type[Archiver]] = tuple()) -> N
|
||||
else:
|
||||
logger.exception("Failed to load archive plugin: %s", ep.name)
|
||||
archivers.clear()
|
||||
archivers.extend(local_plugins)
|
||||
archivers.extend(archive_plugins)
|
||||
archivers.extend(builtin)
|
||||
|
||||
|
||||
def load_tag_plugins(version: str = f"ComicAPI/{version}", local_plugins: Iterable[type[Tag]] = tuple()) -> None:
|
||||
def load_tag_plugins(version: str = f"ComicAPI/{version}", local_plugins: Iterable[EntryPoint] = tuple()) -> None:
|
||||
if tags:
|
||||
return
|
||||
if sys.version_info < (3, 10):
|
||||
@ -81,7 +85,7 @@ def load_tag_plugins(version: str = f"ComicAPI/{version}", local_plugins: Iterab
|
||||
builtin: dict[str, Tag] = {}
|
||||
tag_plugins: dict[str, tuple[Tag, str]] = {}
|
||||
# A dict is used, last plugin wins
|
||||
for ep in entry_points(group="comicapi.tags"):
|
||||
for ep in itertools.chain(entry_points(group="comicapi.tags"), local_plugins):
|
||||
location = "Unknown"
|
||||
try:
|
||||
_spec = importlib.util.find_spec(ep.module)
|
||||
@ -106,9 +110,6 @@ def load_tag_plugins(version: str = f"ComicAPI/{version}", local_plugins: Iterab
|
||||
tag_plugins[tag.id] = (tag(version), location)
|
||||
except Exception:
|
||||
logger.exception("Failed to load tag plugin: %s from %s", ep.name, location)
|
||||
# A dict is used, last plugin wins
|
||||
for tag in local_plugins:
|
||||
tag_plugins[tag.id] = (tag(version), "Local")
|
||||
|
||||
for tag_id in set(builtin.keys()).intersection(tag_plugins):
|
||||
location = tag_plugins[tag_id][1]
|
||||
@ -197,11 +198,7 @@ class ComicArchive:
|
||||
return self.archiver.name() == "ZIP"
|
||||
|
||||
def seems_to_be_a_comic_archive(self) -> bool:
|
||||
if (
|
||||
not (isinstance(self.archiver, UnknownArchiver))
|
||||
and self.get_number_of_pages() > 0
|
||||
and self.archiver.is_valid(self.path)
|
||||
):
|
||||
if not (isinstance(self.archiver, UnknownArchiver)) and self.get_number_of_pages() > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
@ -255,8 +252,11 @@ class ComicArchive:
|
||||
if filename:
|
||||
try:
|
||||
image_data = self.archiver.read_file(filename) or b""
|
||||
except Exception:
|
||||
logger.exception("Error reading in page %d. Substituting logo page.", index)
|
||||
except Exception as e:
|
||||
tb = traceback.extract_tb(e.__traceback__)
|
||||
logger.error(
|
||||
"%s:%s: Error reading in page %d. Substituting logo page.", tb[1].filename, tb[1].lineno, index
|
||||
)
|
||||
image_data = ComicArchive.logo_data
|
||||
|
||||
return image_data
|
||||
@ -337,42 +337,37 @@ class ComicArchive:
|
||||
) -> None:
|
||||
md.page_count = self.get_number_of_pages()
|
||||
md.apply_default_page_list(self.get_page_name_list())
|
||||
if not calc_page_sizes or not self.seems_to_be_a_comic_archive():
|
||||
return
|
||||
for p in md.pages:
|
||||
if calc_page_sizes:
|
||||
for index, p in enumerate(md.pages):
|
||||
idx = p.display_index
|
||||
if self.pil_available:
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
if not self.pil_available:
|
||||
if p.byte_size is not None:
|
||||
data = self.get_page(p.archive_index)
|
||||
p.byte_size = len(data)
|
||||
continue
|
||||
try:
|
||||
from PIL import Image
|
||||
self.pil_available = True
|
||||
except ImportError:
|
||||
self.pil_available = False
|
||||
if p.byte_size is None or p.height is None or p.width is None or p.double_page is None:
|
||||
data = self.get_page(idx)
|
||||
p.byte_size = len(data)
|
||||
if data:
|
||||
try:
|
||||
if isinstance(data, bytes):
|
||||
im = Image.open(io.BytesIO(data))
|
||||
else:
|
||||
im = Image.open(io.StringIO(data))
|
||||
w, h = im.size
|
||||
|
||||
self.pil_available = True
|
||||
except ImportError:
|
||||
self.pil_available = False
|
||||
if p.byte_size is not None:
|
||||
data = self.get_page(p.archive_index)
|
||||
p.byte_size = len(data)
|
||||
continue
|
||||
|
||||
if p.byte_size is None or p.height is None or p.width is None or p.double_page is None:
|
||||
try:
|
||||
data = self.get_page(p.archive_index)
|
||||
p.byte_size = len(data)
|
||||
if not data:
|
||||
continue
|
||||
|
||||
im = Image.open(io.BytesIO(data))
|
||||
w, h = im.size
|
||||
|
||||
p.height = h
|
||||
p.width = w
|
||||
if detect_double_page:
|
||||
p.double_page = p.is_double_page()
|
||||
except Exception as e:
|
||||
logger.exception("Error decoding image [%s] %s :: image %s", e, self.path, p.archive_index)
|
||||
p.height = h
|
||||
p.width = w
|
||||
if detect_double_page:
|
||||
p.double_page = p.is_double_page()
|
||||
except Exception as e:
|
||||
logger.warning("Error decoding image [%s] %s :: image %s", e, self.path, index)
|
||||
else:
|
||||
if p.byte_size is not None:
|
||||
data = self.get_page(idx)
|
||||
p.byte_size = len(data)
|
||||
|
||||
def metadata_from_filename(
|
||||
self,
|
||||
|
@ -126,18 +126,5 @@
|
||||
"radio comics": "Mighty Comics Group",
|
||||
"red circle Comics": "Dark Circle Comics",
|
||||
"red circle": "Dark Circle Comics"
|
||||
},
|
||||
|
||||
"Image Comics": {
|
||||
"Image": "",
|
||||
"avalon studios": "Avalon Studios",
|
||||
"desperado publishing": "Desperado Publishing",
|
||||
"extreme studios": "Extreme Studios",
|
||||
"gorilla comics": "Gorilla Comics",
|
||||
"highbrow entertainment": "Highbrow Entertainment",
|
||||
"shadowline": "Shadowline",
|
||||
"skybound entertainment": "Skybound Entertainment",
|
||||
"todd mcfarlane productions": "Todd McFarlane Productions",
|
||||
"top cow productions": "Top Cow Productions"
|
||||
}
|
||||
}
|
||||
|
@ -213,11 +213,8 @@ def lex_filename(lex: Lexer) -> LexerFunc | None:
|
||||
r = lex.peek()
|
||||
if r.isdigit():
|
||||
return lex_number
|
||||
if is_symbol(r):
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
else:
|
||||
return lex_text
|
||||
lex.accept_run(is_symbol)
|
||||
lex.emit(ItemType.Symbol)
|
||||
elif r.isnumeric():
|
||||
lex.backup()
|
||||
return lex_number
|
||||
@ -308,7 +305,7 @@ def lex_space(lex: Lexer) -> LexerFunc:
|
||||
def lex_text(lex: Lexer) -> LexerFunc:
|
||||
while True:
|
||||
r = lex.get()
|
||||
if is_alpha_numeric(r) or r in "'":
|
||||
if is_alpha_numeric(r):
|
||||
if r.isnumeric(): # E.g. v1
|
||||
word = lex.input[lex.start : lex.pos]
|
||||
if key.get(word.casefold(), None) == ItemType.InfoSpecifier:
|
||||
@ -316,7 +313,10 @@ def lex_text(lex: Lexer) -> LexerFunc:
|
||||
lex.emit(key[word.casefold()])
|
||||
return lex_filename
|
||||
else:
|
||||
lex.backup()
|
||||
if r == "'" and lex.peek().casefold() == "s":
|
||||
lex.get()
|
||||
else:
|
||||
lex.backup()
|
||||
word = lex.input[lex.start : lex.pos + 1]
|
||||
|
||||
if word.casefold() in key:
|
||||
|
@ -43,9 +43,6 @@ logger = logging.getLogger(__name__)
|
||||
REMOVE = object()
|
||||
|
||||
|
||||
Credit = merge.Credit
|
||||
|
||||
|
||||
class PageType(merge.StrEnum):
|
||||
"""
|
||||
These page info classes are exactly the same as the CIX scheme, since
|
||||
@ -97,18 +94,8 @@ class PageMetadata:
|
||||
return False
|
||||
return self.archive_index == other.archive_index
|
||||
|
||||
def _get_clean_metadata(self, *attributes: str) -> PageMetadata:
|
||||
return PageMetadata(
|
||||
filename=self.filename if "filename" in attributes else "",
|
||||
type=self.type if "type" in attributes else "",
|
||||
bookmark=self.bookmark if "bookmark" in attributes else "",
|
||||
display_index=self.display_index if "display_index" in attributes else 0,
|
||||
archive_index=self.archive_index if "archive_index" in attributes else 0,
|
||||
double_page=self.double_page if "double_page" in attributes else None,
|
||||
byte_size=self.byte_size if "byte_size" in attributes else None,
|
||||
height=self.height if "height" in attributes else None,
|
||||
width=self.width if "width" in attributes else None,
|
||||
)
|
||||
|
||||
Credit = merge.Credit
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@ -192,7 +179,7 @@ class GenericMetadata:
|
||||
characters: set[str] = dataclasses.field(default_factory=set)
|
||||
teams: set[str] = dataclasses.field(default_factory=set)
|
||||
locations: set[str] = dataclasses.field(default_factory=set)
|
||||
credits: list[Credit] = dataclasses.field(default_factory=list)
|
||||
credits: list[merge.Credit] = dataclasses.field(default_factory=list)
|
||||
|
||||
# Some CoMet-only items
|
||||
price: float | None = None
|
||||
@ -219,23 +206,14 @@ class GenericMetadata:
|
||||
tmp.__dict__.update(kwargs)
|
||||
return tmp
|
||||
|
||||
def _get_clean_metadata(self, *attributes: str) -> GenericMetadata:
|
||||
def get_clean_metadata(self, *attributes: str) -> GenericMetadata:
|
||||
new_md = GenericMetadata()
|
||||
list_handled = []
|
||||
for attr in sorted(attributes):
|
||||
if "." in attr:
|
||||
lst, _, name = attr.partition(".")
|
||||
if lst in list_handled:
|
||||
continue
|
||||
old_value = getattr(self, lst)
|
||||
new_value = getattr(new_md, lst)
|
||||
if old_value:
|
||||
if hasattr(old_value[0], "_get_clean_metadata"):
|
||||
list_attributes = [x.removeprefix(lst + ".") for x in attributes if x.startswith(lst)]
|
||||
for x in old_value:
|
||||
new_value.append(x._get_clean_metadata(*list_attributes))
|
||||
list_handled.append(lst)
|
||||
continue
|
||||
if not new_value:
|
||||
for x in old_value:
|
||||
new_value.append(x.__class__())
|
||||
@ -391,21 +369,19 @@ class GenericMetadata:
|
||||
return coverlist
|
||||
|
||||
@overload
|
||||
def add_credit(self, person: Credit) -> None: ...
|
||||
def add_credit(self, person: merge.Credit) -> None: ...
|
||||
|
||||
@overload
|
||||
def add_credit(self, person: str, role: str, primary: bool = False, language: str = "") -> None: ...
|
||||
def add_credit(self, person: str, role: str, primary: bool = False) -> None: ...
|
||||
|
||||
def add_credit(
|
||||
self, person: str | Credit, role: str | None = None, primary: bool = False, language: str = ""
|
||||
) -> None:
|
||||
def add_credit(self, person: str | merge.Credit, role: str | None = None, primary: bool = False) -> None:
|
||||
|
||||
credit: Credit
|
||||
if isinstance(person, Credit):
|
||||
credit: merge.Credit
|
||||
if isinstance(person, merge.Credit):
|
||||
credit = person
|
||||
else:
|
||||
assert role is not None
|
||||
credit = Credit(person=person, role=role, primary=primary, language=language)
|
||||
credit = merge.Credit(person=person, role=role, primary=primary)
|
||||
|
||||
if credit.role is None:
|
||||
raise TypeError("GenericMetadata.add_credit takes either a Credit object or a person name and role")
|
||||
@ -576,12 +552,12 @@ md_test: GenericMetadata = GenericMetadata(
|
||||
teams={"Fahrenheit"},
|
||||
locations=set(utils.split("lonely cottage ", ",")),
|
||||
credits=[
|
||||
Credit(primary=False, person="Dara Naraghi", role="Writer"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Penciller"),
|
||||
Credit(primary=False, person="Esteve Polls", role="Inker"),
|
||||
Credit(primary=False, person="Neil Uyetake", role="Letterer"),
|
||||
Credit(primary=False, person="Sam Kieth", role="Cover"),
|
||||
Credit(primary=False, person="Ted Adams", role="Editor"),
|
||||
merge.Credit(primary=False, person="Dara Naraghi", role="Writer"),
|
||||
merge.Credit(primary=False, person="Esteve Polls", role="Penciller"),
|
||||
merge.Credit(primary=False, person="Esteve Polls", role="Inker"),
|
||||
merge.Credit(primary=False, person="Neil Uyetake", role="Letterer"),
|
||||
merge.Credit(primary=False, person="Sam Kieth", role="Cover"),
|
||||
merge.Credit(primary=False, person="Ted Adams", role="Editor"),
|
||||
],
|
||||
tags=set(),
|
||||
pages=[
|
||||
|
@ -1,11 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from collections import defaultdict
|
||||
from collections.abc import Collection
|
||||
from enum import auto
|
||||
from typing import Any
|
||||
|
||||
from comicapi.utils import DefaultDict, StrEnum, norm_fold
|
||||
from comicapi.utils import StrEnum, norm_fold
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@ -13,13 +14,9 @@ class Credit:
|
||||
person: str = ""
|
||||
role: str = ""
|
||||
primary: bool = False
|
||||
language: str = "" # Should be ISO 639 language code
|
||||
|
||||
def __str__(self) -> str:
|
||||
lang = ""
|
||||
if self.language:
|
||||
lang = f" [{self.language}]"
|
||||
return f"{self.role}: {self.person}{lang}"
|
||||
return f"{self.role}: {self.person}"
|
||||
|
||||
|
||||
class Mode(StrEnum):
|
||||
@ -54,19 +51,19 @@ def overlay(old: Any, new: Any) -> Any:
|
||||
return new
|
||||
|
||||
|
||||
attribute = DefaultDict(
|
||||
attribute = defaultdict(
|
||||
lambda: overlay,
|
||||
{
|
||||
Mode.OVERLAY: overlay,
|
||||
Mode.ADD_MISSING: lambda old, new: overlay(new, old),
|
||||
},
|
||||
default=lambda x: overlay,
|
||||
)
|
||||
|
||||
|
||||
lists = DefaultDict(
|
||||
lists = defaultdict(
|
||||
lambda: overlay,
|
||||
{
|
||||
Mode.OVERLAY: merge_lists,
|
||||
Mode.ADD_MISSING: lambda old, new: merge_lists(new, old),
|
||||
},
|
||||
default=lambda x: overlay,
|
||||
)
|
||||
|
323
comicapi/tags/comet.py
Normal file
323
comicapi/tags/comet.py
Normal file
@ -0,0 +1,323 @@
|
||||
"""A class to encapsulate CoMet data"""
|
||||
|
||||
#
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Any
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.comicarchive import ComicArchive
|
||||
from comicapi.genericmetadata import GenericMetadata, PageMetadata, PageType
|
||||
from comicapi.tags import Tag
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CoMet(Tag):
|
||||
enabled = True
|
||||
|
||||
id = "comet"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.comet_filename = "CoMet.xml"
|
||||
self.file = "CoMet.xml"
|
||||
self.supported_attributes = {
|
||||
"series",
|
||||
"issue",
|
||||
"title",
|
||||
"volume",
|
||||
"genres",
|
||||
"description",
|
||||
"publisher",
|
||||
"language",
|
||||
"format",
|
||||
"maturity_rating",
|
||||
"month",
|
||||
"year",
|
||||
"page_count",
|
||||
"characters",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.primary",
|
||||
"credits.role",
|
||||
"price",
|
||||
"is_version_of",
|
||||
"rights",
|
||||
"identifier",
|
||||
"last_mark",
|
||||
"pages.type", # This is required for setting the cover image none of the other types will be saved
|
||||
"pages",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return role.casefold() in self._get_parseable_credits()
|
||||
|
||||
def supports_tags(self, archive: Archiver) -> bool:
|
||||
return archive.supports_files()
|
||||
|
||||
def has_tags(self, archive: Archiver) -> bool:
|
||||
if not self.supports_tags(archive):
|
||||
return False
|
||||
has_tags = False
|
||||
# look at all xml files in root, and search for CoMet data, get first
|
||||
for n in archive.get_filename_list():
|
||||
if os.path.dirname(n) == "" and os.path.splitext(n)[1].casefold() == ".xml":
|
||||
# read in XML file, and validate it
|
||||
data = b""
|
||||
try:
|
||||
data = archive.read_file(n)
|
||||
except Exception as e:
|
||||
logger.warning("Error reading in Comet XML for validation! from %s: %s", archive.path, e)
|
||||
if self._validate_bytes(data):
|
||||
# since we found it, save it!
|
||||
self.file = n
|
||||
has_tags = True
|
||||
break
|
||||
return has_tags
|
||||
|
||||
def remove_tags(self, archive: Archiver) -> bool:
|
||||
return self.has_tags(archive) and archive.remove_file(self.file)
|
||||
|
||||
def read_tags(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_tags(archive):
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata, archive)
|
||||
return GenericMetadata()
|
||||
|
||||
def read_raw_tags(self, archive: Archiver) -> str:
|
||||
if self.has_tags(archive):
|
||||
return ET.tostring(ET.fromstring(archive.read_file(self.file)), encoding="unicode", xml_declaration=True)
|
||||
return ""
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_tags(archive):
|
||||
success = True
|
||||
xml = b""
|
||||
if self.has_tags(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
if self.file != self.comet_filename:
|
||||
success = self.remove_tags(archive)
|
||||
|
||||
return success and archive.write_file(self.comet_filename, self._bytes_from_metadata(metadata, xml))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "Comic Metadata (CoMet)"
|
||||
|
||||
@classmethod
|
||||
def _get_parseable_credits(cls) -> list[str]:
|
||||
parsable_credits: list[str] = []
|
||||
parsable_credits.extend(GenericMetadata.writer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.penciller_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.inker_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.colorist_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.letterer_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.cover_synonyms)
|
||||
parsable_credits.extend(GenericMetadata.editor_synonyms)
|
||||
return parsable_credits
|
||||
|
||||
def _metadata_from_bytes(self, string: bytes, archive: Archiver) -> GenericMetadata:
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
return self._convert_xml_to_metadata(tree, archive)
|
||||
|
||||
def _bytes_from_metadata(self, metadata: GenericMetadata, xml: bytes = b"") -> bytes:
|
||||
tree = self._convert_metadata_to_xml(metadata, xml)
|
||||
return ET.tostring(tree.getroot(), encoding="utf-8", xml_declaration=True)
|
||||
|
||||
def _convert_metadata_to_xml(self, metadata: GenericMetadata, xml: bytes = b"") -> ET.ElementTree:
|
||||
# shorthand for the metadata
|
||||
md = metadata
|
||||
|
||||
if xml:
|
||||
root = ET.fromstring(xml)
|
||||
else:
|
||||
# build a tree structure
|
||||
root = ET.Element("comet")
|
||||
root.attrib["xmlns:comet"] = "http://www.denvog.com/comet/"
|
||||
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
root.attrib["xsi:schemaLocation"] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
|
||||
|
||||
# helper func
|
||||
def assign(comet_entry: str, md_entry: Any) -> None:
|
||||
if md_entry is not None:
|
||||
ET.SubElement(root, comet_entry).text = str(md_entry)
|
||||
|
||||
# title is manditory
|
||||
assign("title", md.title or "")
|
||||
assign("series", md.series)
|
||||
assign("issue", md.issue) # must be int??
|
||||
assign("volume", md.volume)
|
||||
assign("description", md.description)
|
||||
assign("publisher", md.publisher)
|
||||
assign("pages", md.page_count)
|
||||
assign("format", md.format)
|
||||
assign("language", md.language)
|
||||
assign("rating", md.maturity_rating)
|
||||
assign("price", md.price)
|
||||
assign("isVersionOf", md.is_version_of)
|
||||
assign("rights", md.rights)
|
||||
assign("identifier", md.identifier)
|
||||
assign("lastMark", md.last_mark)
|
||||
assign("genre", ",".join(md.genres)) # TODO repeatable
|
||||
|
||||
for c in md.characters:
|
||||
assign("character", c.strip())
|
||||
|
||||
if md.manga is not None and md.manga == "YesAndRightToLeft":
|
||||
assign("readingDirection", "rtl")
|
||||
|
||||
if md.year is not None:
|
||||
date_str = f"{md.year:04}"
|
||||
if md.month is not None:
|
||||
date_str += f"-{md.month:02}"
|
||||
assign("date", date_str)
|
||||
|
||||
cover_index = md.get_cover_page_index_list()[0]
|
||||
assign("coverImage", md.pages[cover_index].filename)
|
||||
|
||||
# loop thru credits, and build a list for each role that CoMet supports
|
||||
for credit in metadata.credits:
|
||||
if credit.role.casefold() in set(GenericMetadata.writer_synonyms):
|
||||
ET.SubElement(root, "writer").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.penciller_synonyms):
|
||||
ET.SubElement(root, "penciller").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.inker_synonyms):
|
||||
ET.SubElement(root, "inker").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.colorist_synonyms):
|
||||
ET.SubElement(root, "colorist").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.letterer_synonyms):
|
||||
ET.SubElement(root, "letterer").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.cover_synonyms):
|
||||
ET.SubElement(root, "coverDesigner").text = str(credit.person)
|
||||
|
||||
if credit.role.casefold() in set(GenericMetadata.editor_synonyms):
|
||||
ET.SubElement(root, "editor").text = str(credit.person)
|
||||
|
||||
ET.indent(root)
|
||||
|
||||
# wrap it in an ElementTree instance, and save as XML
|
||||
tree = ET.ElementTree(root)
|
||||
return tree
|
||||
|
||||
def _convert_xml_to_metadata(self, tree: ET.ElementTree, archive: Archiver) -> GenericMetadata:
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag != "comet":
|
||||
raise Exception("Not a CoMet file")
|
||||
|
||||
metadata = GenericMetadata()
|
||||
md = metadata
|
||||
|
||||
# Helper function
|
||||
def get(tag: str) -> Any:
|
||||
node = root.find(tag)
|
||||
if node is not None:
|
||||
return node.text
|
||||
return None
|
||||
|
||||
md.series = utils.xlate(get("series"))
|
||||
md.title = utils.xlate(get("title"))
|
||||
md.issue = utils.xlate(get("issue"))
|
||||
md.volume = utils.xlate_int(get("volume"))
|
||||
md.description = utils.xlate(get("description"))
|
||||
md.publisher = utils.xlate(get("publisher"))
|
||||
md.language = utils.xlate(get("language"))
|
||||
md.format = utils.xlate(get("format"))
|
||||
md.page_count = utils.xlate_int(get("pages"))
|
||||
md.maturity_rating = utils.xlate(get("rating"))
|
||||
md.price = utils.xlate_float(get("price"))
|
||||
md.is_version_of = utils.xlate(get("isVersionOf"))
|
||||
md.rights = utils.xlate(get("rights"))
|
||||
md.identifier = utils.xlate(get("identifier"))
|
||||
md.last_mark = utils.xlate(get("lastMark"))
|
||||
|
||||
_, md.month, md.year = utils.parse_date_str(utils.xlate(get("date")))
|
||||
|
||||
ca = ComicArchive(archive)
|
||||
cover_filename = utils.xlate(get("coverImage"))
|
||||
page_list = ca.get_page_name_list()
|
||||
if cover_filename in page_list:
|
||||
cover_index = page_list.index(cover_filename)
|
||||
md.pages = [
|
||||
PageMetadata(
|
||||
archive_index=cover_index,
|
||||
display_index=0,
|
||||
filename=cover_filename,
|
||||
type=PageType.FrontCover,
|
||||
bookmark="",
|
||||
)
|
||||
]
|
||||
|
||||
reading_direction = utils.xlate(get("readingDirection"))
|
||||
if reading_direction is not None and reading_direction == "rtl":
|
||||
md.manga = "YesAndRightToLeft"
|
||||
|
||||
# loop for genre tags
|
||||
for n in root:
|
||||
if n.tag == "genre":
|
||||
md.genres.add((n.text or "").strip())
|
||||
|
||||
# loop for character tags
|
||||
for n in root:
|
||||
if n.tag == "character":
|
||||
md.characters.add((n.text or "").strip())
|
||||
|
||||
# Now extract the credit info
|
||||
for n in root:
|
||||
if any(
|
||||
[
|
||||
n.tag == "writer",
|
||||
n.tag == "penciller",
|
||||
n.tag == "inker",
|
||||
n.tag == "colorist",
|
||||
n.tag == "letterer",
|
||||
n.tag == "editor",
|
||||
]
|
||||
):
|
||||
metadata.add_credit((n.text or "").strip(), n.tag.title())
|
||||
|
||||
if n.tag == "coverDesigner":
|
||||
metadata.add_credit((n.text or "").strip(), "Cover")
|
||||
|
||||
metadata.is_empty = False
|
||||
|
||||
return metadata
|
||||
|
||||
# verify that the string actually contains CoMet data in XML format
|
||||
def _validate_bytes(self, string: bytes) -> bool:
|
||||
try:
|
||||
tree = ET.ElementTree(ET.fromstring(string))
|
||||
root = tree.getroot()
|
||||
if root.tag != "comet":
|
||||
return False
|
||||
except ET.ParseError:
|
||||
return False
|
||||
|
||||
return True
|
229
comicapi/tags/comicbookinfo.py
Normal file
229
comicapi/tags/comicbookinfo.py
Normal file
@ -0,0 +1,229 @@
|
||||
"""A class to encapsulate the ComicBookInfo data"""
|
||||
|
||||
# Copyright 2012-2014 ComicTagger Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal, TypedDict
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.archivers import Archiver
|
||||
from comicapi.genericmetadata import Credit, GenericMetadata
|
||||
from comicapi.tags import Tag
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_CBILiteralType = Literal[
|
||||
"series",
|
||||
"title",
|
||||
"issue",
|
||||
"publisher",
|
||||
"publicationMonth",
|
||||
"publicationYear",
|
||||
"numberOfIssues",
|
||||
"comments",
|
||||
"genre",
|
||||
"volume",
|
||||
"numberOfVolumes",
|
||||
"language",
|
||||
"country",
|
||||
"rating",
|
||||
"credits",
|
||||
"tags",
|
||||
]
|
||||
|
||||
|
||||
class credit(TypedDict):
|
||||
person: str
|
||||
role: str
|
||||
primary: bool
|
||||
|
||||
|
||||
class _ComicBookInfoJson(TypedDict, total=False):
|
||||
series: str
|
||||
title: str
|
||||
publisher: str
|
||||
publicationMonth: int
|
||||
publicationYear: int
|
||||
issue: int
|
||||
numberOfIssues: int
|
||||
volume: int
|
||||
numberOfVolumes: int
|
||||
rating: int
|
||||
genre: str
|
||||
language: str
|
||||
country: str
|
||||
credits: list[credit]
|
||||
tags: list[str]
|
||||
comments: str
|
||||
|
||||
|
||||
_CBIContainer = TypedDict("_CBIContainer", {"appID": str, "lastModified": str, "ComicBookInfo/1.0": _ComicBookInfoJson})
|
||||
|
||||
|
||||
class ComicBookInfo(Tag):
|
||||
enabled = True
|
||||
|
||||
id = "cbi"
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
super().__init__(version)
|
||||
|
||||
self.supported_attributes = {
|
||||
"series",
|
||||
"issue",
|
||||
"issue_count",
|
||||
"title",
|
||||
"volume",
|
||||
"volume_count",
|
||||
"genres",
|
||||
"description",
|
||||
"publisher",
|
||||
"month",
|
||||
"year",
|
||||
"language",
|
||||
"country",
|
||||
"critical_rating",
|
||||
"tags",
|
||||
"credits",
|
||||
"credits.person",
|
||||
"credits.primary",
|
||||
"credits.role",
|
||||
}
|
||||
|
||||
def supports_credit_role(self, role: str) -> bool:
|
||||
return True
|
||||
|
||||
def supports_tags(self, archive: Archiver) -> bool:
|
||||
return archive.supports_comment()
|
||||
|
||||
def has_tags(self, archive: Archiver) -> bool:
|
||||
return self.supports_tags(archive) and self._validate_string(archive.get_comment())
|
||||
|
||||
def remove_tags(self, archive: Archiver) -> bool:
|
||||
return archive.set_comment("")
|
||||
|
||||
def read_tags(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_tags(archive):
|
||||
comment = archive.get_comment()
|
||||
if self._validate_string(comment):
|
||||
return self._metadata_from_string(comment)
|
||||
return GenericMetadata()
|
||||
|
||||
def read_raw_tags(self, archive: Archiver) -> str:
|
||||
if self.has_tags(archive):
|
||||
return json.dumps(json.loads(archive.get_comment()), indent=2)
|
||||
return ""
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_tags(archive):
|
||||
return archive.set_comment(self._string_from_metadata(metadata))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
|
||||
def name(self) -> str:
|
||||
return "ComicBookInfo"
|
||||
|
||||
def _metadata_from_string(self, string: str) -> GenericMetadata:
|
||||
cbi_container: _CBIContainer = json.loads(string)
|
||||
|
||||
metadata = GenericMetadata()
|
||||
|
||||
cbi = cbi_container["ComicBookInfo/1.0"]
|
||||
|
||||
metadata.series = utils.xlate(cbi.get("series"))
|
||||
metadata.title = utils.xlate(cbi.get("title"))
|
||||
metadata.issue = utils.xlate(cbi.get("issue"))
|
||||
metadata.publisher = utils.xlate(cbi.get("publisher"))
|
||||
metadata.month = utils.xlate_int(cbi.get("publicationMonth"))
|
||||
metadata.year = utils.xlate_int(cbi.get("publicationYear"))
|
||||
metadata.issue_count = utils.xlate_int(cbi.get("numberOfIssues"))
|
||||
metadata.description = utils.xlate(cbi.get("comments"))
|
||||
metadata.genres = set(utils.split(cbi.get("genre"), ","))
|
||||
metadata.volume = utils.xlate_int(cbi.get("volume"))
|
||||
metadata.volume_count = utils.xlate_int(cbi.get("numberOfVolumes"))
|
||||
metadata.language = utils.xlate(cbi.get("language"))
|
||||
metadata.country = utils.xlate(cbi.get("country"))
|
||||
metadata.critical_rating = utils.xlate_int(cbi.get("rating"))
|
||||
|
||||
metadata.credits = [
|
||||
Credit(
|
||||
person=x["person"] if "person" in x else "",
|
||||
role=x["role"] if "role" in x else "",
|
||||
primary=x["primary"] if "primary" in x else False,
|
||||
)
|
||||
for x in cbi.get("credits", [])
|
||||
]
|
||||
metadata.tags.update(cbi.get("tags", set()))
|
||||
|
||||
# need the language string to be ISO
|
||||
if metadata.language:
|
||||
metadata.language = utils.get_language_iso(metadata.language)
|
||||
|
||||
metadata.is_empty = False
|
||||
|
||||
return metadata
|
||||
|
||||
def _string_from_metadata(self, metadata: GenericMetadata) -> str:
|
||||
cbi_container = self._create_json_dictionary(metadata)
|
||||
return json.dumps(cbi_container)
|
||||
|
||||
def _validate_string(self, string: bytes | str) -> bool:
|
||||
"""Verify that the string actually contains CBI data in JSON format"""
|
||||
|
||||
try:
|
||||
cbi_container = json.loads(string)
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
|
||||
return "ComicBookInfo/1.0" in cbi_container
|
||||
|
||||
def _create_json_dictionary(self, metadata: GenericMetadata) -> _CBIContainer:
|
||||
"""Create the dictionary that we will convert to JSON text"""
|
||||
|
||||
cbi_container = _CBIContainer(
|
||||
{
|
||||
"appID": "ComicTagger/1.0.0",
|
||||
"lastModified": str(datetime.now()),
|
||||
"ComicBookInfo/1.0": {},
|
||||
}
|
||||
) # TODO: ctversion.version,
|
||||
|
||||
# helper func
|
||||
def assign(cbi_entry: _CBILiteralType, md_entry: Any) -> None:
|
||||
if md_entry is not None or isinstance(md_entry, str) and md_entry != "":
|
||||
cbi_container["ComicBookInfo/1.0"][cbi_entry] = md_entry
|
||||
|
||||
assign("series", utils.xlate(metadata.series))
|
||||
assign("title", utils.xlate(metadata.title))
|
||||
assign("issue", utils.xlate(metadata.issue))
|
||||
assign("publisher", utils.xlate(metadata.publisher))
|
||||
assign("publicationMonth", utils.xlate_int(metadata.month))
|
||||
assign("publicationYear", utils.xlate_int(metadata.year))
|
||||
assign("numberOfIssues", utils.xlate_int(metadata.issue_count))
|
||||
assign("comments", utils.xlate(metadata.description))
|
||||
assign("genre", utils.xlate(",".join(metadata.genres)))
|
||||
assign("volume", utils.xlate_int(metadata.volume))
|
||||
assign("numberOfVolumes", utils.xlate_int(metadata.volume_count))
|
||||
assign("language", utils.xlate(utils.get_language_from_iso(metadata.language)))
|
||||
assign("country", utils.xlate(metadata.country))
|
||||
assign("rating", utils.xlate_int(metadata.critical_rating))
|
||||
assign("credits", [credit(person=c.person, role=c.role, primary=c.primary) for c in metadata.credits])
|
||||
assign("tags", list(metadata.tags))
|
||||
|
||||
return cbi_container
|
@ -87,47 +87,33 @@ class ComicRack(Tag):
|
||||
return archive.supports_files()
|
||||
|
||||
def has_tags(self, archive: Archiver) -> bool:
|
||||
try: # read_file can cause an exception
|
||||
return (
|
||||
self.supports_tags(archive)
|
||||
and self.file in archive.get_filename_list()
|
||||
and self._validate_bytes(archive.read_file(self.file))
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
return (
|
||||
self.supports_tags(archive)
|
||||
and self.file in archive.get_filename_list()
|
||||
and self._validate_bytes(archive.read_file(self.file))
|
||||
)
|
||||
|
||||
def remove_tags(self, archive: Archiver) -> bool:
|
||||
return self.has_tags(archive) and archive.remove_file(self.file)
|
||||
|
||||
def read_tags(self, archive: Archiver) -> GenericMetadata:
|
||||
if self.has_tags(archive):
|
||||
try: # read_file can cause an exception
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata)
|
||||
except Exception:
|
||||
...
|
||||
metadata = archive.read_file(self.file) or b""
|
||||
if self._validate_bytes(metadata):
|
||||
return self._metadata_from_bytes(metadata)
|
||||
return GenericMetadata()
|
||||
|
||||
def read_raw_tags(self, archive: Archiver) -> str:
|
||||
try: # read_file can cause an exception
|
||||
if self.has_tags(archive):
|
||||
b = archive.read_file(self.file)
|
||||
# ET.fromstring is used as xml can declare the encoding
|
||||
return ET.tostring(ET.fromstring(b), encoding="unicode", xml_declaration=True)
|
||||
except Exception:
|
||||
...
|
||||
if self.has_tags(archive):
|
||||
return ET.tostring(ET.fromstring(archive.read_file(self.file)), encoding="unicode", xml_declaration=True)
|
||||
return ""
|
||||
|
||||
def write_tags(self, metadata: GenericMetadata, archive: Archiver) -> bool:
|
||||
if self.supports_tags(archive):
|
||||
xml = b""
|
||||
try: # read_file can cause an exception
|
||||
if self.has_tags(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
return archive.write_file(self.file, self._bytes_from_metadata(metadata, xml))
|
||||
except Exception:
|
||||
...
|
||||
if self.has_tags(archive):
|
||||
xml = archive.read_file(self.file)
|
||||
return archive.write_file(self.file, self._bytes_from_metadata(metadata, xml))
|
||||
else:
|
||||
logger.warning(f"Archive ({archive.name()}) does not support {self.name()} metadata")
|
||||
return False
|
||||
@ -266,7 +252,7 @@ class ComicRack(Tag):
|
||||
else:
|
||||
pages_node = ET.SubElement(root, "Pages")
|
||||
|
||||
for page in sorted(md.pages, key=lambda x: x.archive_index):
|
||||
for page in md.pages:
|
||||
page_node = ET.SubElement(pages_node, "Page")
|
||||
page_node.attrib = {"Image": str(page.display_index)}
|
||||
if page.bookmark:
|
||||
|
@ -61,7 +61,6 @@ class Tag:
|
||||
"credits.person",
|
||||
"credits.role",
|
||||
"credits.primary",
|
||||
"credits.language",
|
||||
"price",
|
||||
"is_version_of",
|
||||
"rights",
|
||||
|
@ -22,10 +22,11 @@ import pathlib
|
||||
import platform
|
||||
import sys
|
||||
import unicodedata
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable, Mapping
|
||||
from enum import Enum, auto
|
||||
from shutil import which # noqa: F401
|
||||
from typing import Any, Callable, TypeVar, cast
|
||||
from typing import Any, TypeVar, cast
|
||||
|
||||
from comicfn2dict import comicfn2dict
|
||||
|
||||
@ -87,7 +88,7 @@ if sys.version_info < (3, 11):
|
||||
cls._lower_members = {x.casefold(): x for x in cls} # type: ignore[attr-defined]
|
||||
return cls._lower_members.get(value.casefold(), None) # type: ignore[attr-defined]
|
||||
|
||||
def __str__(self) -> str:
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
else:
|
||||
@ -106,17 +107,6 @@ else:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DefaultDict(dict):
|
||||
def __init__(self, *args, default: Callable[[Any], Any] | None = None) -> None:
|
||||
super().__init__(*args)
|
||||
self.default = default
|
||||
|
||||
def __missing__(self, key: Any) -> Any:
|
||||
if self.default is None:
|
||||
return key
|
||||
return self.default(key)
|
||||
|
||||
|
||||
class Parser(StrEnum):
|
||||
ORIGINAL = auto()
|
||||
COMPLICATED = auto()
|
||||
@ -370,9 +360,7 @@ def xlate_float(data: Any) -> float | None:
|
||||
if isinstance(data, (int, float)):
|
||||
i = data
|
||||
else:
|
||||
i = str(data).translate(
|
||||
DefaultDict(zip((ord(c) for c in "1234567890."), "1234567890."), default=lambda x: None)
|
||||
)
|
||||
i = str(data).translate(defaultdict(lambda: None, zip((ord(c) for c in "1234567890."), "1234567890.")))
|
||||
if i == "":
|
||||
return None
|
||||
try:
|
||||
@ -505,9 +493,9 @@ def parse_version(s: str) -> tuple[int, int, int]:
|
||||
return (parts[0], parts[1], parts[2])
|
||||
|
||||
|
||||
_languages: dict[str | None, str | None] = DefaultDict(default=lambda x: None)
|
||||
_languages: dict[str | None, str | None] = defaultdict(lambda: None)
|
||||
|
||||
_countries: dict[str | None, str | None] = DefaultDict(default=lambda x: None)
|
||||
_countries: dict[str | None, str | None] = defaultdict(lambda: None)
|
||||
|
||||
|
||||
def countries() -> dict[str | None, str | None]:
|
||||
@ -516,7 +504,7 @@ def countries() -> dict[str | None, str | None]:
|
||||
|
||||
for alpha_2, c in isocodes.countries.by_alpha_2:
|
||||
_countries[alpha_2] = c["name"]
|
||||
return _countries.copy()
|
||||
return _countries
|
||||
|
||||
|
||||
def languages() -> dict[str | None, str | None]:
|
||||
@ -525,13 +513,11 @@ def languages() -> dict[str | None, str | None]:
|
||||
|
||||
for alpha_2, lng in isocodes.extendend_languages._sorted_by_index(index="alpha_2"):
|
||||
_languages[alpha_2] = lng["name"]
|
||||
return _languages.copy()
|
||||
return _languages
|
||||
|
||||
|
||||
def get_language_from_iso(iso: str | None) -> str | None:
|
||||
if not _languages:
|
||||
return languages()[iso]
|
||||
return _languages[iso]
|
||||
return languages()[iso]
|
||||
|
||||
|
||||
def get_language_iso(string: str | None) -> str | None:
|
||||
@ -543,12 +529,10 @@ def get_language_iso(string: str | None) -> str | None:
|
||||
lang = string.casefold()
|
||||
|
||||
found = None
|
||||
|
||||
for lng in isocodes.extendend_languages.items:
|
||||
for x in ("alpha_2", "alpha_3", "bibliographic", "common_name", "name"):
|
||||
if x in lng and lng[x].casefold() == lang:
|
||||
found = lng
|
||||
# break
|
||||
if found:
|
||||
break
|
||||
|
||||
@ -558,9 +542,7 @@ def get_language_iso(string: str | None) -> str | None:
|
||||
|
||||
|
||||
def get_country_from_iso(iso: str | None) -> str | None:
|
||||
if not _countries:
|
||||
return countries()[iso]
|
||||
return _countries[iso]
|
||||
return countries()[iso]
|
||||
|
||||
|
||||
def get_publisher(publisher: str) -> tuple[str, str]:
|
||||
|
@ -36,7 +36,6 @@ from comictaggerlib.filerenamer import FileRenamer, get_rename_dir
|
||||
from comictaggerlib.graphics import graphics_path
|
||||
from comictaggerlib.issueidentifier import IssueIdentifier
|
||||
from comictaggerlib.md import prepare_metadata
|
||||
from comictaggerlib.quick_tag import QuickTag
|
||||
from comictaggerlib.resulttypes import Action, IssueResult, MatchStatus, OnlineMatchResults, Result, Status
|
||||
from comictalker.comictalker import ComicTalker, TalkerError
|
||||
|
||||
@ -94,13 +93,6 @@ class CLI:
|
||||
|
||||
def run(self) -> int:
|
||||
if len(self.config.Runtime_Options__files) < 1:
|
||||
if self.config.Commands__command == Action.print:
|
||||
res = self.print(None)
|
||||
if res.status != Status.success:
|
||||
return_code = 3
|
||||
if self.config.Runtime_Options__json:
|
||||
print(json.dumps(dataclasses.asdict(res), cls=OutputEncoder, indent=2))
|
||||
return 0
|
||||
logger.error("You must specify at least one filename. Use the -h option for more info")
|
||||
return 1
|
||||
return_code = 0
|
||||
@ -286,14 +278,7 @@ class CLI:
|
||||
|
||||
return (md, tags_used)
|
||||
|
||||
def print(self, ca: ComicArchive | None) -> Result:
|
||||
md = None
|
||||
if ca is None:
|
||||
if not self.config.Auto_Tag__metadata.is_empty:
|
||||
if not self.config.Auto_Tag__metadata.is_empty:
|
||||
self.output("--------- CLI tags ---------")
|
||||
self.output(self.config.Auto_Tag__metadata)
|
||||
return Result(Action.print, Status.success, None, md=md) # type: ignore
|
||||
def print(self, ca: ComicArchive) -> Result:
|
||||
if not self.config.Runtime_Options__tags_read:
|
||||
page_count = ca.get_number_of_pages()
|
||||
|
||||
@ -318,6 +303,7 @@ class CLI:
|
||||
|
||||
self.output()
|
||||
|
||||
md = None
|
||||
for tag_id, tag in tags.items():
|
||||
if not self.config.Runtime_Options__tags_read or tag_id in self.config.Runtime_Options__tags_read:
|
||||
if ca.has_tags(tag_id):
|
||||
@ -411,153 +397,6 @@ class CLI:
|
||||
res.status = status
|
||||
return res
|
||||
|
||||
def try_quick_tag(self, ca: ComicArchive, md: GenericMetadata) -> GenericMetadata | None:
|
||||
if not self.config.Runtime_Options__enable_quick_tag:
|
||||
self.output("skipping quick tag")
|
||||
return None
|
||||
self.output("starting quick tag")
|
||||
try:
|
||||
qt = QuickTag(
|
||||
self.config.Quick_Tag__url,
|
||||
str(utils.parse_url(self.current_talker().website).host),
|
||||
self.current_talker(),
|
||||
self.config,
|
||||
self.output,
|
||||
)
|
||||
ct_md = qt.id_comic(
|
||||
ca,
|
||||
md,
|
||||
self.config.Quick_Tag__simple,
|
||||
set(self.config.Quick_Tag__hash),
|
||||
self.config.Quick_Tag__exact_only,
|
||||
self.config.Runtime_Options__interactive,
|
||||
self.config.Quick_Tag__aggressive_filtering,
|
||||
self.config.Quick_Tag__max,
|
||||
)
|
||||
if ct_md is None:
|
||||
ct_md = GenericMetadata()
|
||||
return ct_md
|
||||
except Exception:
|
||||
logger.exception("Quick Tagging failed")
|
||||
return None
|
||||
|
||||
def normal_tag(
|
||||
self, ca: ComicArchive, tags_read: list[str], md: GenericMetadata, match_results: OnlineMatchResults
|
||||
) -> tuple[GenericMetadata, list[IssueResult], Result | None, OnlineMatchResults]:
|
||||
# ct_md, results, matches, match_results
|
||||
if md is None or md.is_empty:
|
||||
logger.error("No metadata given to search online with!")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
match_status=MatchStatus.no_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.no_matches.append(res)
|
||||
return GenericMetadata(), [], res, match_results
|
||||
|
||||
ii = IssueIdentifier(ca, self.config, self.current_talker())
|
||||
|
||||
ii.set_output_function(functools.partial(self.output, already_logged=True))
|
||||
if not self.config.Auto_Tag__use_year_when_identifying:
|
||||
md.year = None
|
||||
if self.config.Auto_Tag__ignore_leading_numbers_in_filename and md.series is not None:
|
||||
md.series = re.sub(r"^([\d.]+)(.*)", r"\2", md.series)
|
||||
result, matches = ii.identify(ca, md)
|
||||
|
||||
found_match = False
|
||||
choices = False
|
||||
low_confidence = False
|
||||
|
||||
if result == IssueIdentifier.result_no_matches:
|
||||
pass
|
||||
elif result == IssueIdentifier.result_found_match_but_bad_cover_score:
|
||||
low_confidence = True
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_found_match_but_not_first_page:
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_multiple_matches_with_bad_image_scores:
|
||||
low_confidence = True
|
||||
choices = True
|
||||
elif result == IssueIdentifier.result_one_good_match:
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_multiple_good_matches:
|
||||
choices = True
|
||||
|
||||
if choices:
|
||||
if low_confidence:
|
||||
logger.error("Online search: Multiple low confidence matches. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.low_confidence_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.low_confidence_matches.append(res)
|
||||
return GenericMetadata(), matches, res, match_results
|
||||
|
||||
logger.error("Online search: Multiple good matches. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.multiple_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.multiple_matches.append(res)
|
||||
return GenericMetadata(), matches, res, match_results
|
||||
if low_confidence and self.config.Runtime_Options__abort_on_low_confidence:
|
||||
logger.error("Online search: Low confidence match. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.low_confidence_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.low_confidence_matches.append(res)
|
||||
return GenericMetadata(), matches, res, match_results
|
||||
if not found_match:
|
||||
logger.error("Online search: No match found. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.no_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.no_matches.append(res)
|
||||
return GenericMetadata(), matches, res, match_results
|
||||
|
||||
# we got here, so we have a single match
|
||||
|
||||
# now get the particular issue data
|
||||
ct_md = self.fetch_metadata(matches[0].issue_id)
|
||||
if ct_md.is_empty:
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.fetch_data_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.good_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.fetch_data_failures.append(res)
|
||||
return GenericMetadata(), matches, res, match_results
|
||||
return ct_md, matches, None, match_results
|
||||
|
||||
def save(self, ca: ComicArchive, match_results: OnlineMatchResults) -> tuple[Result, OnlineMatchResults]:
|
||||
if self.config.Runtime_Options__skip_existing_tags:
|
||||
for tag_id in self.config.Runtime_Options__tags_write:
|
||||
@ -616,34 +455,117 @@ class CLI:
|
||||
return res, match_results
|
||||
|
||||
else:
|
||||
qt_md = self.try_quick_tag(ca, md)
|
||||
if qt_md is None or qt_md.is_empty:
|
||||
if qt_md is not None:
|
||||
self.output("Failed to find match via quick tag")
|
||||
ct_md, matches, res, match_results = self.normal_tag(ca, tags_read, md, match_results) # type: ignore[assignment]
|
||||
if res is not None:
|
||||
return res, match_results
|
||||
else:
|
||||
self.output("Successfully matched via quick tag")
|
||||
ct_md = qt_md
|
||||
matches = [
|
||||
IssueResult(
|
||||
series=ct_md.series or "",
|
||||
distance=-1,
|
||||
issue_number=ct_md.issue or "",
|
||||
issue_count=ct_md.issue_count,
|
||||
url_image_hash=-1,
|
||||
issue_title=ct_md.title or "",
|
||||
issue_id=ct_md.issue_id or "",
|
||||
series_id=ct_md.issue_id or "",
|
||||
month=ct_md.month,
|
||||
year=ct_md.year,
|
||||
publisher=None,
|
||||
image_url=ct_md._cover_image or "",
|
||||
alt_image_urls=[],
|
||||
description=ct_md.description or "",
|
||||
if md is None or md.is_empty:
|
||||
logger.error("No metadata given to search online with!")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
match_status=MatchStatus.no_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.no_matches.append(res)
|
||||
return res, match_results
|
||||
|
||||
ii = IssueIdentifier(ca, self.config, self.current_talker())
|
||||
|
||||
ii.set_output_function(functools.partial(self.output, already_logged=True))
|
||||
if not self.config.Auto_Tag__use_year_when_identifying:
|
||||
md.year = None
|
||||
if self.config.Auto_Tag__ignore_leading_numbers_in_filename and md.series is not None:
|
||||
md.series = re.sub(r"^([\d.]+)(.*)", r"\2", md.series)
|
||||
result, matches = ii.identify(ca, md)
|
||||
|
||||
found_match = False
|
||||
choices = False
|
||||
low_confidence = False
|
||||
|
||||
if result == IssueIdentifier.result_no_matches:
|
||||
pass
|
||||
elif result == IssueIdentifier.result_found_match_but_bad_cover_score:
|
||||
low_confidence = True
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_found_match_but_not_first_page:
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_multiple_matches_with_bad_image_scores:
|
||||
low_confidence = True
|
||||
choices = True
|
||||
elif result == IssueIdentifier.result_one_good_match:
|
||||
found_match = True
|
||||
elif result == IssueIdentifier.result_multiple_good_matches:
|
||||
choices = True
|
||||
|
||||
if choices:
|
||||
if low_confidence:
|
||||
logger.error("Online search: Multiple low confidence matches. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.low_confidence_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
]
|
||||
match_results.low_confidence_matches.append(res)
|
||||
return res, match_results
|
||||
|
||||
logger.error("Online search: Multiple good matches. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.multiple_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.multiple_matches.append(res)
|
||||
return res, match_results
|
||||
if low_confidence and self.config.Runtime_Options__abort_on_low_confidence:
|
||||
logger.error("Online search: Low confidence match. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.low_confidence_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.low_confidence_matches.append(res)
|
||||
return res, match_results
|
||||
if not found_match:
|
||||
logger.error("Online search: No match found. Save aborted")
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.match_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.no_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.no_matches.append(res)
|
||||
return res, match_results
|
||||
|
||||
# we got here, so we have a single match
|
||||
|
||||
# now get the particular issue data
|
||||
ct_md = self.fetch_metadata(matches[0].issue_id)
|
||||
if ct_md.is_empty:
|
||||
res = Result(
|
||||
Action.save,
|
||||
status=Status.fetch_data_failure,
|
||||
original_path=ca.path,
|
||||
online_results=matches,
|
||||
match_status=MatchStatus.good_match,
|
||||
tags_written=self.config.Runtime_Options__tags_write,
|
||||
tags_read=tags_read,
|
||||
)
|
||||
match_results.fetch_data_failures.append(res)
|
||||
return res, match_results
|
||||
|
||||
res = Result(
|
||||
Action.save,
|
||||
|
@ -17,13 +17,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import operator
|
||||
from typing import Any
|
||||
|
||||
import natsort
|
||||
from PyQt5 import QtCore, QtWidgets, uic
|
||||
from PyQt5 import QtWidgets, uic
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.genericmetadata import Credit
|
||||
from comictaggerlib.ui import ui_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -33,7 +30,7 @@ class CreditEditorWindow(QtWidgets.QDialog):
|
||||
ModeEdit = 0
|
||||
ModeNew = 1
|
||||
|
||||
def __init__(self, parent: QtWidgets.QWidget, mode: int, credit: Credit) -> None:
|
||||
def __init__(self, parent: QtWidgets.QWidget, mode: int, role: str, name: str, primary: bool) -> None:
|
||||
super().__init__(parent)
|
||||
|
||||
with (ui_path / "crediteditorwindow.ui").open(encoding="utf-8") as uifile:
|
||||
@ -48,51 +45,54 @@ class CreditEditorWindow(QtWidgets.QDialog):
|
||||
|
||||
# Add the entries to the role combobox
|
||||
self.cbRole.addItem("")
|
||||
self.cbRole.addItem("Writer")
|
||||
self.cbRole.addItem("Artist")
|
||||
self.cbRole.addItem("Penciller")
|
||||
self.cbRole.addItem("Inker")
|
||||
self.cbRole.addItem("Colorist")
|
||||
self.cbRole.addItem("Letterer")
|
||||
self.cbRole.addItem("Cover Artist")
|
||||
self.cbRole.addItem("Editor")
|
||||
self.cbRole.addItem("Inker")
|
||||
self.cbRole.addItem("Letterer")
|
||||
self.cbRole.addItem("Penciller")
|
||||
self.cbRole.addItem("Other")
|
||||
self.cbRole.addItem("Plotter")
|
||||
self.cbRole.addItem("Scripter")
|
||||
self.cbRole.addItem("Translator")
|
||||
self.cbRole.addItem("Writer")
|
||||
self.cbRole.addItem("Other")
|
||||
|
||||
self.cbLanguage.addItem("", "")
|
||||
for f in natsort.humansorted(utils.languages().items(), operator.itemgetter(1)):
|
||||
self.cbLanguage.addItem(f[1], f[0])
|
||||
self.leName.setText(name)
|
||||
|
||||
self.leName.setText(credit.person)
|
||||
|
||||
if credit.role is not None and credit.role != "":
|
||||
i = self.cbRole.findText(credit.role)
|
||||
if role is not None and role != "":
|
||||
i = self.cbRole.findText(role)
|
||||
if i == -1:
|
||||
self.cbRole.setEditText(credit.role)
|
||||
self.cbRole.setEditText(role)
|
||||
else:
|
||||
self.cbRole.setCurrentIndex(i)
|
||||
|
||||
if credit.language != "":
|
||||
i = (
|
||||
self.cbLanguage.findData(credit.language, QtCore.Qt.ItemDataRole.UserRole)
|
||||
if self.cbLanguage.findData(credit.language, QtCore.Qt.ItemDataRole.UserRole) > -1
|
||||
else self.cbLanguage.findText(credit.language)
|
||||
)
|
||||
if i == -1:
|
||||
self.cbLanguage.setEditText(credit.language)
|
||||
else:
|
||||
self.cbLanguage.setCurrentIndex(i)
|
||||
self.cbPrimary.setChecked(primary)
|
||||
|
||||
self.cbPrimary.setChecked(credit.primary)
|
||||
self.cbRole.currentIndexChanged.connect(self.role_changed)
|
||||
self.cbRole.editTextChanged.connect(self.role_changed)
|
||||
|
||||
def get_credit(self) -> Credit:
|
||||
lang = self.cbLanguage.currentData() or self.cbLanguage.currentText()
|
||||
return Credit(self.leName.text(), self.cbRole.currentText(), self.cbPrimary.isChecked(), lang)
|
||||
self.update_primary_button()
|
||||
|
||||
def update_primary_button(self) -> None:
|
||||
enabled = self.current_role_can_be_primary()
|
||||
self.cbPrimary.setEnabled(enabled)
|
||||
|
||||
def current_role_can_be_primary(self) -> bool:
|
||||
role = self.cbRole.currentText()
|
||||
if role.casefold() in ("artist", "writer"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def role_changed(self, s: Any) -> None:
|
||||
self.update_primary_button()
|
||||
|
||||
def get_credits(self) -> tuple[str, str, bool]:
|
||||
primary = self.current_role_can_be_primary() and self.cbPrimary.isChecked()
|
||||
return self.cbRole.currentText(), self.leName.text(), primary
|
||||
|
||||
def accept(self) -> None:
|
||||
if self.leName.text() == "":
|
||||
QtWidgets.QMessageBox.warning(self, "Whoops", "You need to enter a name for a credit.")
|
||||
if self.cbRole.currentText() == "" or self.leName.text() == "":
|
||||
QtWidgets.QMessageBox.warning(self, "Whoops", "You need to enter both role and name for a credit.")
|
||||
else:
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
@ -104,9 +104,6 @@ def save_file(
|
||||
filename: A pathlib.Path object to save the json dictionary to
|
||||
"""
|
||||
file_options = settngs.clean_config(config, file=True)
|
||||
if "Quick Tag" in file_options and "url" in file_options["Quick Tag"]:
|
||||
file_options["Quick Tag"]["url"] = str(file_options["Quick Tag"]["url"])
|
||||
|
||||
try:
|
||||
if not filename.exists():
|
||||
filename.parent.mkdir(exist_ok=True, parents=True)
|
||||
|
@ -27,7 +27,7 @@ import settngs
|
||||
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import tags
|
||||
from comictaggerlib import ctversion, quick_tag
|
||||
from comictaggerlib import ctversion
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS as ct_ns
|
||||
from comictaggerlib.ctsettings.types import ComicTaggerPaths, tag
|
||||
from comictaggerlib.resulttypes import Action
|
||||
@ -51,12 +51,6 @@ def initial_commandline_parser() -> argparse.ArgumentParser:
|
||||
default=0,
|
||||
help="Be noisy when doing what it does. Use a second time to enable debug logs.\nShort option cannot be combined with other options.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enable-quick-tag",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help='Enable the expiremental "quick tagger"',
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
@ -76,13 +70,6 @@ def register_runtime(parser: settngs.Manager) -> None:
|
||||
help="Be noisy when doing what it does. Use a second time to enable debug logs.\nShort option cannot be combined with other options.",
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting(
|
||||
"--enable-quick-tag",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=False,
|
||||
help='Enable the expiremental "quick tagger"',
|
||||
file=False,
|
||||
)
|
||||
parser.add_setting("-q", "--quiet", action="store_true", help="Don't say much (for print mode).", file=False)
|
||||
parser.add_setting(
|
||||
"-j",
|
||||
@ -253,11 +240,9 @@ def register_commands(parser: settngs.Manager) -> None:
|
||||
)
|
||||
|
||||
|
||||
def register_commandline_settings(parser: settngs.Manager, enable_quick_tag: bool) -> None:
|
||||
def register_commandline_settings(parser: settngs.Manager) -> None:
|
||||
parser.add_group("Commands", register_commands, True)
|
||||
parser.add_persistent_group("Runtime Options", register_runtime)
|
||||
if enable_quick_tag:
|
||||
parser.add_group("Quick Tag", quick_tag.settings)
|
||||
|
||||
|
||||
def validate_commandline_settings(config: settngs.Config[ct_ns], parser: settngs.Manager) -> settngs.Config[ct_ns]:
|
||||
@ -287,11 +272,12 @@ def validate_commandline_settings(config: settngs.Config[ct_ns], parser: settngs
|
||||
if config[0].Runtime_Options__tags_read and not config[0].Runtime_Options__tags_write:
|
||||
config[0].Runtime_Options__tags_write = config[0].Runtime_Options__tags_read
|
||||
|
||||
if config[0].Runtime_Options__no_gui and not config[0].Runtime_Options__files:
|
||||
if config[0].Commands__command == Action.print and not config[0].Auto_Tag__metadata.is_empty:
|
||||
... # allow printing the metadata provided on the commandline
|
||||
elif config[0].Commands__command not in (Action.save_config, Action.list_plugins):
|
||||
parser.exit(message="Command requires at least one filename!\n", status=1)
|
||||
if (
|
||||
config[0].Commands__command not in (Action.save_config, Action.list_plugins)
|
||||
and config[0].Runtime_Options__no_gui
|
||||
and not config[0].Runtime_Options__files
|
||||
):
|
||||
parser.exit(message="Command requires at least one filename!\n", status=1)
|
||||
|
||||
if config[0].Commands__command == Action.delete and not config[0].Runtime_Options__tags_write:
|
||||
parser.exit(message="Please specify the tags to delete with --tags-write\n", status=1)
|
||||
|
@ -27,8 +27,8 @@ def general(parser: settngs.Manager) -> None:
|
||||
def internal(parser: settngs.Manager) -> None:
|
||||
# automatic settings
|
||||
parser.add_setting("install_id", default=uuid.uuid4().hex, cmdline=False)
|
||||
parser.add_setting("write_tags", default=["cr"], cmdline=False)
|
||||
parser.add_setting("read_tags", default=["cr"], cmdline=False)
|
||||
parser.add_setting("write_tags", default=["cbi"], cmdline=False)
|
||||
parser.add_setting("read_tags", default=["cbi"], cmdline=False)
|
||||
parser.add_setting("last_opened_folder", default="", cmdline=False)
|
||||
parser.add_setting("window_width", default=0, cmdline=False)
|
||||
parser.add_setting("window_height", default=0, cmdline=False)
|
||||
@ -79,7 +79,7 @@ def identifier(parser: settngs.Manager) -> None:
|
||||
def dialog(parser: settngs.Manager) -> None:
|
||||
parser.add_setting("show_disclaimer", default=True, cmdline=False)
|
||||
parser.add_setting("dont_notify_about_this_version", default="", cmdline=False)
|
||||
parser.add_setting("notify_plugin_changes", default=True, cmdline=False)
|
||||
parser.add_setting("ask_about_usage_stats", default=True, cmdline=False)
|
||||
|
||||
|
||||
def filename(parser: settngs.Manager) -> None:
|
||||
@ -356,7 +356,7 @@ def migrate_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
elif isinstance(write_Tags, str):
|
||||
config[0].internal__write_tags = [write_Tags]
|
||||
else:
|
||||
config[0].internal__write_tags = ["cr"]
|
||||
config[0].internal__write_tags = ["cbi"]
|
||||
|
||||
read_tags = config[0].internal__read_tags
|
||||
if not isinstance(read_tags, list):
|
||||
@ -365,7 +365,7 @@ def migrate_settings(config: settngs.Config[ct_ns]) -> settngs.Config[ct_ns]:
|
||||
elif isinstance(read_tags, str):
|
||||
config[0].internal__read_tags = [read_tags]
|
||||
else:
|
||||
config[0].internal__read_tags = ["cr"]
|
||||
config[0].internal__read_tags = ["cbi"]
|
||||
|
||||
return config
|
||||
|
||||
|
@ -4,50 +4,18 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import configparser
|
||||
import importlib.metadata
|
||||
import logging
|
||||
import pathlib
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Generator, Iterable
|
||||
from typing import Any, NamedTuple, TypeVar
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
import importlib_metadata
|
||||
else:
|
||||
import importlib.metadata as importlib_metadata
|
||||
from collections.abc import Generator
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NORMALIZE_PACKAGE_NAME_RE = re.compile(r"[-_.]+")
|
||||
PLUGIN_GROUPS = frozenset(("comictagger.talker", "comicapi.archiver", "comicapi.tags"))
|
||||
icu_available = importlib.util.find_spec("icu") is not None
|
||||
|
||||
|
||||
def _custom_key(tup: Any) -> Any:
|
||||
import natsort
|
||||
|
||||
lst = []
|
||||
for x in natsort.os_sort_keygen()(tup):
|
||||
ret = x
|
||||
if len(x) > 1 and isinstance(x[1], int) and isinstance(x[0], str) and x[0] == "":
|
||||
ret = ("a", *x[1:])
|
||||
|
||||
lst.append(ret)
|
||||
return tuple(lst)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def os_sorted(lst: Iterable[T]) -> Iterable[T]:
|
||||
import natsort
|
||||
|
||||
key = _custom_key
|
||||
if icu_available or platform.system() == "Windows":
|
||||
key = natsort.os_sort_keygen()
|
||||
return sorted(lst, key=key)
|
||||
|
||||
|
||||
class FailedToLoadPlugin(Exception):
|
||||
@ -79,12 +47,9 @@ class Plugin(NamedTuple):
|
||||
|
||||
package: str
|
||||
version: str
|
||||
entry_point: importlib_metadata.EntryPoint
|
||||
entry_point: importlib.metadata.EntryPoint
|
||||
path: pathlib.Path
|
||||
|
||||
def load(self) -> LoadedPlugin:
|
||||
return LoadedPlugin(self, self.entry_point.load())
|
||||
|
||||
|
||||
class LoadedPlugin(NamedTuple):
|
||||
"""Represents a plugin after being imported."""
|
||||
@ -106,11 +71,11 @@ class LoadedPlugin(NamedTuple):
|
||||
class Plugins(NamedTuple):
|
||||
"""Classified plugins."""
|
||||
|
||||
archivers: list[LoadedPlugin]
|
||||
tags: list[LoadedPlugin]
|
||||
talkers: list[LoadedPlugin]
|
||||
archivers: list[Plugin]
|
||||
tags: list[Plugin]
|
||||
talkers: list[Plugin]
|
||||
|
||||
def all_plugins(self) -> Generator[LoadedPlugin]:
|
||||
def all_plugins(self) -> Generator[Plugin]:
|
||||
"""Return an iterator over all :class:`LoadedPlugin`s."""
|
||||
yield from self.archivers
|
||||
yield from self.tags
|
||||
@ -118,62 +83,65 @@ class Plugins(NamedTuple):
|
||||
|
||||
def versions_str(self) -> str:
|
||||
"""Return a user-displayed list of plugin versions."""
|
||||
return ", ".join(sorted({f"{plugin.plugin.package}: {plugin.plugin.version}" for plugin in self.all_plugins()}))
|
||||
return ", ".join(sorted({f"{plugin.package}: {plugin.version}" for plugin in self.all_plugins()}))
|
||||
|
||||
|
||||
def _find_local_plugins(plugin_path: pathlib.Path) -> Generator[Plugin]:
|
||||
logger.debug("Checking for distributions in %s", plugin_path)
|
||||
for dist in importlib_metadata.distributions(path=[str(plugin_path)]):
|
||||
logger.debug("found distribution %s", dist.name)
|
||||
eps = dist.entry_points
|
||||
for group in PLUGIN_GROUPS:
|
||||
for ep in eps.select(group=group):
|
||||
logger.debug("found EntryPoint group %s %s=%s", group, ep.name, ep.value)
|
||||
yield Plugin(plugin_path.name, dist.version, ep, plugin_path)
|
||||
|
||||
cfg = configparser.ConfigParser(interpolation=None)
|
||||
cfg.read(plugin_path / "setup.cfg")
|
||||
|
||||
for group in PLUGIN_GROUPS:
|
||||
for plugin_s in cfg.get("options.entry_points", group, fallback="").splitlines():
|
||||
if not plugin_s:
|
||||
continue
|
||||
|
||||
name, _, entry_str = plugin_s.partition("=")
|
||||
name, entry_str = name.strip(), entry_str.strip()
|
||||
ep = importlib.metadata.EntryPoint(name, entry_str, group)
|
||||
yield Plugin(plugin_path.name, cfg.get("metadata", "version", fallback="0.0.1"), ep, plugin_path)
|
||||
|
||||
|
||||
def _check_required_plugins(plugins: list[Plugin], expected: frozenset[str]) -> None:
|
||||
plugin_names = {normalize_pypi_name(plugin.package) for plugin in plugins}
|
||||
expected_names = {normalize_pypi_name(name) for name in expected}
|
||||
missing_plugins = expected_names - plugin_names
|
||||
|
||||
if missing_plugins:
|
||||
raise Exception(
|
||||
"required plugins were not installed!\n"
|
||||
+ f"- installed: {', '.join(sorted(plugin_names))}\n"
|
||||
+ f"- expected: {', '.join(sorted(expected_names))}\n"
|
||||
+ f"- missing: {', '.join(sorted(missing_plugins))}"
|
||||
)
|
||||
|
||||
|
||||
def find_plugins(plugin_folder: pathlib.Path) -> Plugins:
|
||||
"""Discovers all plugins (but does not load them)."""
|
||||
ret: list[LoadedPlugin] = []
|
||||
if not plugin_folder.is_dir():
|
||||
return _classify_plugins(ret)
|
||||
|
||||
zips = [x for x in plugin_folder.iterdir() if x.is_file() and x.suffix in (".zip", ".whl")]
|
||||
|
||||
for plugin_path in os_sorted(zips):
|
||||
logger.debug("looking for plugins in %s", plugin_path)
|
||||
ret: list[Plugin] = []
|
||||
for plugin_path in plugin_folder.glob("*/setup.cfg"):
|
||||
try:
|
||||
sys.path.append(str(plugin_path))
|
||||
for plugin in _find_local_plugins(plugin_path):
|
||||
logger.debug("Attempting to load %s from %s", plugin.entry_point.name, plugin.path)
|
||||
ret.append(plugin.load())
|
||||
ret.extend(_find_local_plugins(plugin_path.parent))
|
||||
except Exception as err:
|
||||
logger.exception(FailedToLoadPlugin(plugin_path.name, err))
|
||||
finally:
|
||||
sys.path.remove(str(plugin_path))
|
||||
for mod in list(sys.modules.values()):
|
||||
if (
|
||||
mod is not None
|
||||
and hasattr(mod, "__spec__")
|
||||
and mod.__spec__
|
||||
and str(plugin_path) in (mod.__spec__.origin or "")
|
||||
):
|
||||
sys.modules.pop(mod.__name__)
|
||||
FailedToLoadPlugin(plugin_path.parent.name, err)
|
||||
|
||||
# for determinism, sort the list
|
||||
ret.sort()
|
||||
|
||||
return _classify_plugins(ret)
|
||||
|
||||
|
||||
def _classify_plugins(plugins: list[LoadedPlugin]) -> Plugins:
|
||||
def _classify_plugins(plugins: list[Plugin]) -> Plugins:
|
||||
archivers = []
|
||||
tags = []
|
||||
talkers = []
|
||||
|
||||
for p in plugins:
|
||||
if p.plugin.entry_point.group == "comictagger.talker":
|
||||
if p.entry_point.group == "comictagger.talker":
|
||||
talkers.append(p)
|
||||
elif p.plugin.entry_point.group == "comicapi.tags":
|
||||
elif p.entry_point.group == "comicapi.tags":
|
||||
tags.append(p)
|
||||
elif p.plugin.entry_point.group == "comicapi.archiver":
|
||||
elif p.entry_point.group == "comicapi.archiver":
|
||||
archivers.append(p)
|
||||
else:
|
||||
logger.warning(NotImplementedError(f"what plugin type? {p}"))
|
||||
|
@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
import typing
|
||||
|
||||
import settngs
|
||||
import urllib3.util.url
|
||||
|
||||
import comicapi.genericmetadata
|
||||
import comicapi.merge
|
||||
@ -20,7 +19,6 @@ class SettngsNS(settngs.TypedNS):
|
||||
|
||||
Runtime_Options__config: comictaggerlib.ctsettings.types.ComicTaggerPaths
|
||||
Runtime_Options__verbose: int
|
||||
Runtime_Options__enable_quick_tag: bool
|
||||
Runtime_Options__quiet: bool
|
||||
Runtime_Options__json: bool
|
||||
Runtime_Options__raw: bool
|
||||
@ -39,13 +37,6 @@ class SettngsNS(settngs.TypedNS):
|
||||
Runtime_Options__skip_existing_tags: bool
|
||||
Runtime_Options__files: list[str]
|
||||
|
||||
Quick_Tag__url: urllib3.util.url.Url
|
||||
Quick_Tag__max: int
|
||||
Quick_Tag__simple: bool
|
||||
Quick_Tag__aggressive_filtering: bool
|
||||
Quick_Tag__hash: list[comictaggerlib.quick_tag.HashType]
|
||||
Quick_Tag__exact_only: bool
|
||||
|
||||
internal__install_id: str
|
||||
internal__write_tags: list[str]
|
||||
internal__read_tags: list[str]
|
||||
@ -123,14 +114,13 @@ class SettngsNS(settngs.TypedNS):
|
||||
|
||||
Dialog_Flags__show_disclaimer: bool
|
||||
Dialog_Flags__dont_notify_about_this_version: str
|
||||
Dialog_Flags__notify_plugin_changes: bool
|
||||
Dialog_Flags__ask_about_usage_stats: bool
|
||||
|
||||
Archive__rar: str
|
||||
|
||||
Source_comicvine__comicvine_key: str | None
|
||||
Source_comicvine__comicvine_url: str | None
|
||||
Source_comicvine__cv_use_series_start_as_volume: bool
|
||||
Source_comicvine__comicvine_custom_parameters: str | None
|
||||
|
||||
|
||||
class Commands(typing.TypedDict):
|
||||
@ -142,7 +132,6 @@ class Commands(typing.TypedDict):
|
||||
class Runtime_Options(typing.TypedDict):
|
||||
config: comictaggerlib.ctsettings.types.ComicTaggerPaths
|
||||
verbose: int
|
||||
enable_quick_tag: bool
|
||||
quiet: bool
|
||||
json: bool
|
||||
raw: bool
|
||||
@ -162,15 +151,6 @@ class Runtime_Options(typing.TypedDict):
|
||||
files: list[str]
|
||||
|
||||
|
||||
class Quick_Tag(typing.TypedDict):
|
||||
url: urllib3.util.url.Url
|
||||
max: int
|
||||
simple: bool
|
||||
aggressive_filtering: bool
|
||||
hash: list[comictaggerlib.quick_tag.HashType]
|
||||
exact_only: bool
|
||||
|
||||
|
||||
class internal(typing.TypedDict):
|
||||
install_id: str
|
||||
write_tags: list[str]
|
||||
@ -265,7 +245,7 @@ class General(typing.TypedDict):
|
||||
class Dialog_Flags(typing.TypedDict):
|
||||
show_disclaimer: bool
|
||||
dont_notify_about_this_version: str
|
||||
notify_plugin_changes: bool
|
||||
ask_about_usage_stats: bool
|
||||
|
||||
|
||||
class Archive(typing.TypedDict):
|
||||
@ -276,7 +256,6 @@ class Source_comicvine(typing.TypedDict):
|
||||
comicvine_key: str | None
|
||||
comicvine_url: str | None
|
||||
cv_use_series_start_as_volume: bool
|
||||
comicvine_custom_parameters: str | None
|
||||
|
||||
|
||||
SettngsDict = typing.TypedDict(
|
||||
@ -284,7 +263,6 @@ SettngsDict = typing.TypedDict(
|
||||
{
|
||||
"Commands": Commands,
|
||||
"Runtime Options": Runtime_Options,
|
||||
"Quick Tag": Quick_Tag,
|
||||
"internal": internal,
|
||||
"Issue Identifier": Issue_Identifier,
|
||||
"Filename Parsing": Filename_Parsing,
|
||||
|
@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import pathlib
|
||||
import sys
|
||||
import types
|
||||
@ -16,8 +15,6 @@ from comicapi import utils
|
||||
from comicapi.comicarchive import tags
|
||||
from comicapi.genericmetadata import REMOVE, GenericMetadata
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
|
||||
@typing.no_type_check
|
||||
@ -118,7 +115,8 @@ class ComicTaggerPaths(AppDirs):
|
||||
@property
|
||||
def user_cache_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "cache"
|
||||
path = self.path / "cache"
|
||||
return path
|
||||
return pathlib.Path(super().user_cache_dir)
|
||||
|
||||
@property
|
||||
@ -130,14 +128,16 @@ class ComicTaggerPaths(AppDirs):
|
||||
@property
|
||||
def user_log_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "log"
|
||||
path = self.path / "log"
|
||||
return path
|
||||
return pathlib.Path(super().user_log_dir)
|
||||
|
||||
@property
|
||||
def user_plugin_dir(self) -> pathlib.Path:
|
||||
if self.path:
|
||||
return self.path / "plugins"
|
||||
return pathlib.Path(super().user_config_dir) / "plugins"
|
||||
path = self.path / "plugins"
|
||||
return path
|
||||
return pathlib.Path(super().user_config_dir)
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> pathlib.Path:
|
||||
@ -198,48 +198,44 @@ def parse_metadata_from_string(mdstr: str) -> GenericMetadata:
|
||||
|
||||
md = GenericMetadata()
|
||||
|
||||
try:
|
||||
if not mdstr:
|
||||
return md
|
||||
if mdstr[0] == "@":
|
||||
p = pathlib.Path(mdstr[1:])
|
||||
if not p.is_file():
|
||||
raise argparse.ArgumentTypeError("Invalid filepath")
|
||||
mdstr = p.read_text()
|
||||
if mdstr[0] != "{":
|
||||
mdstr = "{" + mdstr + "}"
|
||||
if not mdstr:
|
||||
return md
|
||||
if mdstr[0] == "@":
|
||||
p = pathlib.Path(mdstr[1:])
|
||||
if not p.is_file():
|
||||
raise argparse.ArgumentTypeError("Invalid filepath")
|
||||
mdstr = p.read_text()
|
||||
if mdstr[0] != "{":
|
||||
mdstr = "{" + mdstr + "}"
|
||||
|
||||
md_dict = yaml.safe_load(mdstr)
|
||||
md_dict = yaml.safe_load(mdstr)
|
||||
|
||||
empty = True
|
||||
# Map the dict to the metadata object
|
||||
for key, value in md_dict.items():
|
||||
if hasattr(md, key):
|
||||
t = get_type(key)
|
||||
if value is None:
|
||||
value = REMOVE
|
||||
elif isinstance(t, tuple):
|
||||
if value == "":
|
||||
value = t[0]()
|
||||
else:
|
||||
if isinstance(value, str):
|
||||
value = [value]
|
||||
if not isinstance(value, Collection):
|
||||
raise argparse.ArgumentTypeError(f"Invalid syntax for tag '{key}'")
|
||||
values = list(value)
|
||||
for idx, v in enumerate(values):
|
||||
if not isinstance(v, t[1]):
|
||||
values[idx] = convert_value(t[1], v)
|
||||
value = t[0](values)
|
||||
empty = True
|
||||
# Map the dict to the metadata object
|
||||
for key, value in md_dict.items():
|
||||
if hasattr(md, key):
|
||||
t = get_type(key)
|
||||
if value is None:
|
||||
value = REMOVE
|
||||
elif isinstance(t, tuple):
|
||||
if value == "":
|
||||
value = t[0]()
|
||||
else:
|
||||
value = convert_value(t, value)
|
||||
|
||||
empty = False
|
||||
setattr(md, key, value)
|
||||
if isinstance(value, str):
|
||||
value = [value]
|
||||
if not isinstance(value, Collection):
|
||||
raise argparse.ArgumentTypeError(f"Invalid syntax for tag '{key}'")
|
||||
values = list(value)
|
||||
for idx, v in enumerate(values):
|
||||
if not isinstance(v, t[1]):
|
||||
values[idx] = convert_value(t[1], v)
|
||||
value = t[0](values)
|
||||
else:
|
||||
raise argparse.ArgumentTypeError(f"'{key}' is not a valid tag name")
|
||||
md.is_empty = empty
|
||||
except Exception as e:
|
||||
logger.exception("Unable to read metadata from the commandline '%s'", mdstr)
|
||||
raise Exception("Unable to read metadata from the commandline") from e
|
||||
value = convert_value(t, value)
|
||||
|
||||
empty = False
|
||||
setattr(md, key, value)
|
||||
else:
|
||||
raise argparse.ArgumentTypeError(f"'{key}' is not a valid tag name")
|
||||
md.is_empty = empty
|
||||
return md
|
||||
|
@ -73,8 +73,6 @@ class MetadataFormatter(string.Formatter):
|
||||
return cast(str, super().format_field(value, format_spec))
|
||||
|
||||
def convert_field(self, value: Any, conversion: str | None) -> str:
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, Iterable) and not isinstance(value, (str, tuple)):
|
||||
if conversion == "C":
|
||||
if isinstance(value, Sized):
|
||||
@ -184,11 +182,8 @@ class MetadataFormatter(string.Formatter):
|
||||
|
||||
# given the field_name, find the object it references
|
||||
# and the argument it came from
|
||||
try:
|
||||
obj, arg_used = self.get_field(field_name, args, kwargs)
|
||||
used_args.add(arg_used)
|
||||
except Exception:
|
||||
obj = None
|
||||
obj, arg_used = self.get_field(field_name, args, kwargs)
|
||||
used_args.add(arg_used)
|
||||
|
||||
obj = self.none_replacement(obj, replacement, r)
|
||||
# do any conversion on the resulting object
|
||||
|
@ -173,20 +173,15 @@ class FileSelectionList(QtWidgets.QWidget):
|
||||
self.listCleared.emit()
|
||||
|
||||
def add_path_list(self, pathlist: list[str]) -> None:
|
||||
if not pathlist:
|
||||
return
|
||||
filelist = utils.get_recursive_filelist(pathlist)
|
||||
# we now have a list of files to add
|
||||
|
||||
progdialog = None
|
||||
if len(filelist) < 3:
|
||||
# Prog dialog on Linux flakes out for small range, so scale up
|
||||
progdialog = QtWidgets.QProgressDialog("", "Cancel", 0, len(filelist), parent=self)
|
||||
progdialog.setWindowTitle("Adding Files")
|
||||
progdialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
progdialog.setMinimumDuration(300)
|
||||
progdialog.show()
|
||||
center_window_on_parent(progdialog)
|
||||
# Prog dialog on Linux flakes out for small range, so scale up
|
||||
progdialog = QtWidgets.QProgressDialog("", "Cancel", 0, len(filelist), parent=self)
|
||||
progdialog.setWindowTitle("Adding Files")
|
||||
progdialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
progdialog.setMinimumDuration(300)
|
||||
center_window_on_parent(progdialog)
|
||||
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
first_added = None
|
||||
@ -194,11 +189,10 @@ class FileSelectionList(QtWidgets.QWidget):
|
||||
self.twList.setSortingEnabled(False)
|
||||
for idx, f in enumerate(filelist):
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
if progdialog is not None:
|
||||
if progdialog.wasCanceled():
|
||||
break
|
||||
progdialog.setValue(idx + 1)
|
||||
progdialog.setLabelText(f)
|
||||
if progdialog.wasCanceled():
|
||||
break
|
||||
progdialog.setValue(idx + 1)
|
||||
progdialog.setLabelText(f)
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
row = self.add_path_item(f)
|
||||
if row is not None:
|
||||
@ -207,8 +201,7 @@ class FileSelectionList(QtWidgets.QWidget):
|
||||
if first_added is None and row != -1:
|
||||
first_added = row
|
||||
|
||||
if progdialog is not None:
|
||||
progdialog.hide()
|
||||
progdialog.hide()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
if first_added is not None:
|
||||
|
@ -73,23 +73,24 @@ class ImageHasher:
|
||||
|
||||
return result
|
||||
|
||||
def difference_hash(self) -> int:
|
||||
try:
|
||||
image = self.image.resize((self.width + 1, self.height), Image.Resampling.LANCZOS).convert("L")
|
||||
except Exception:
|
||||
logger.exception("difference_hash error")
|
||||
return 0
|
||||
def average_hash2(self) -> None:
|
||||
"""
|
||||
# Got this one from somewhere on the net. Not a clue how the 'convolve2d' works!
|
||||
|
||||
pixels = list(image.getdata())
|
||||
diff = ""
|
||||
for y in range(self.height):
|
||||
for x in range(self.width):
|
||||
idx = x + (self.width + 1 * y)
|
||||
diff += str(int(pixels[idx] < pixels[idx + 1]))
|
||||
from numpy import array
|
||||
from scipy.signal import convolve2d
|
||||
|
||||
result = int(diff, 2)
|
||||
im = self.image.resize((self.width, self.height), Image.ANTIALIAS).convert('L')
|
||||
|
||||
in_data = array((im.getdata())).reshape(self.width, self.height)
|
||||
filt = array([[0,1,0],[1,-4,1],[0,1,0]])
|
||||
filt_data = convolve2d(in_data,filt,mode='same',boundary='symm').flatten()
|
||||
|
||||
result = reduce(lambda x, (y, z): x | (z << y),
|
||||
enumerate(map(lambda i: 0 if i < 0 else 1, filt_data)),
|
||||
0)
|
||||
return result
|
||||
"""
|
||||
|
||||
def p_hash(self) -> int:
|
||||
"""
|
||||
|
@ -28,7 +28,6 @@ def setup_logging(verbose: int, log_dir: pathlib.Path) -> None:
|
||||
logging.getLogger("comicapi").setLevel(logging.DEBUG)
|
||||
logging.getLogger("comictaggerlib").setLevel(logging.DEBUG)
|
||||
logging.getLogger("comictalker").setLevel(logging.DEBUG)
|
||||
logging.getLogger("pyrate_limiter").setLevel(logging.DEBUG)
|
||||
|
||||
log_file = log_dir / "ComicTagger.log"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
@ -44,6 +44,7 @@ if sys.version_info < (3, 10):
|
||||
import importlib_metadata
|
||||
else:
|
||||
import importlib.metadata as importlib_metadata
|
||||
|
||||
logger = logging.getLogger("comictagger")
|
||||
|
||||
|
||||
@ -88,9 +89,9 @@ def configure_locale() -> None:
|
||||
os.environ["LANG"] = f"{code}.utf-8"
|
||||
|
||||
locale.setlocale(locale.LC_ALL, "")
|
||||
sys.stdout.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[union-attr]
|
||||
sys.stderr.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[union-attr]
|
||||
sys.stdin.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[union-attr]
|
||||
sys.stdout.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
sys.stderr.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
sys.stdin.reconfigure(encoding=sys.getdefaultencoding()) # type: ignore[attr-defined]
|
||||
|
||||
|
||||
def update_publishers(config: settngs.Config[ct_ns]) -> None:
|
||||
@ -116,19 +117,25 @@ class App:
|
||||
conf = self.initialize()
|
||||
self.initialize_dirs(conf.config)
|
||||
self.load_plugins(conf)
|
||||
self.register_settings(conf.enable_quick_tag)
|
||||
self.register_settings()
|
||||
self.config = self.parse_settings(conf.config)
|
||||
|
||||
self.main()
|
||||
|
||||
def load_plugins(self, opts: argparse.Namespace) -> None:
|
||||
local_plugins = plugin_finder.find_plugins(opts.config.user_plugin_dir)
|
||||
self._extend_plugin_paths(local_plugins)
|
||||
|
||||
comicapi.comicarchive.load_archive_plugins(local_plugins=[p.obj for p in local_plugins.archivers])
|
||||
comicapi.comicarchive.load_tag_plugins(version=version, local_plugins=[p.obj for p in local_plugins.tags])
|
||||
self.talkers = comictalker.get_talkers(
|
||||
version, opts.config.user_cache_dir, local_plugins=[p.obj for p in local_plugins.talkers]
|
||||
comicapi.comicarchive.load_archive_plugins(local_plugins=[p.entry_point for p in local_plugins.archivers])
|
||||
comicapi.comicarchive.load_tag_plugins(
|
||||
version=version, local_plugins=[p.entry_point for p in local_plugins.tags]
|
||||
)
|
||||
self.talkers = comictalker.get_talkers(
|
||||
version, opts.config.user_cache_dir, local_plugins=[p.entry_point for p in local_plugins.talkers]
|
||||
)
|
||||
|
||||
def _extend_plugin_paths(self, plugins: plugin_finder.Plugins) -> None:
|
||||
sys.path.extend(str(p.path.absolute()) for p in plugins.all_plugins())
|
||||
|
||||
def list_plugins(
|
||||
self,
|
||||
@ -208,13 +215,13 @@ class App:
|
||||
setup_logging(conf.verbose, conf.config.user_log_dir)
|
||||
return conf
|
||||
|
||||
def register_settings(self, enable_quick_tag: bool) -> None:
|
||||
def register_settings(self) -> None:
|
||||
self.manager = settngs.Manager(
|
||||
description="A utility for reading and writing metadata to comic archives.\n\n\n"
|
||||
+ "If no options are given, %(prog)s will run in windowed mode.\nPlease keep the '-v' option separated '-so -v' not '-sov'",
|
||||
epilog="For more help visit the wiki at: https://github.com/comictagger/comictagger/wiki",
|
||||
)
|
||||
ctsettings.register_commandline_settings(self.manager, enable_quick_tag)
|
||||
ctsettings.register_commandline_settings(self.manager)
|
||||
ctsettings.register_file_settings(self.manager)
|
||||
ctsettings.register_plugin_settings(self.manager, getattr(self, "talkers", {}))
|
||||
|
||||
|
@ -411,5 +411,3 @@ class PageListEditor(QtWidgets.QWidget):
|
||||
|
||||
for md_field, widget in self.md_attributes.items():
|
||||
enable_widget(widget, md_field in enabled_widgets)
|
||||
|
||||
self.listWidget.setDragEnabled(not ("pages.image_index" not in enabled_widgets and "pages" in enabled_widgets))
|
||||
|
@ -1,391 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import logging
|
||||
from enum import auto
|
||||
from io import BytesIO
|
||||
from typing import Callable, TypedDict, cast
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
import settngs
|
||||
|
||||
from comicapi import comicarchive, utils
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.issuestring import IssueString
|
||||
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS
|
||||
from comictaggerlib.imagehasher import ImageHasher
|
||||
from comictalker import ComicTalker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
|
||||
class HashType(utils.StrEnum):
|
||||
AHASH = auto()
|
||||
DHASH = auto()
|
||||
PHASH = auto()
|
||||
|
||||
|
||||
class SimpleResult(TypedDict):
|
||||
Distance: int
|
||||
# Mapping of domains (eg comicvine.gamespot.com) to IDs
|
||||
IDList: dict[str, list[str]]
|
||||
|
||||
|
||||
class Hash(TypedDict):
|
||||
Hash: int
|
||||
Kind: str
|
||||
|
||||
|
||||
class Result(TypedDict):
|
||||
# Mapping of domains (eg comicvine.gamespot.com) to IDs
|
||||
IDs: dict[str, list[str]]
|
||||
Distance: int
|
||||
Hash: Hash
|
||||
|
||||
|
||||
def ihash(types: str) -> list[HashType]:
|
||||
result: list[HashType] = []
|
||||
types = types.casefold()
|
||||
choices = ", ".join(HashType)
|
||||
for typ in utils.split(types, ","):
|
||||
if typ not in list(HashType):
|
||||
raise argparse.ArgumentTypeError(f"invalid choice: {typ} (choose from {choices.upper()})")
|
||||
result.append(HashType[typ.upper()])
|
||||
|
||||
if not result:
|
||||
raise argparse.ArgumentTypeError(f"invalid choice: {types} (choose from {choices.upper()})")
|
||||
return result
|
||||
|
||||
|
||||
def settings(manager: settngs.Manager) -> None:
|
||||
manager.add_setting(
|
||||
"--url",
|
||||
"-u",
|
||||
default="https://comic-hasher.narnian.us",
|
||||
type=utils.parse_url,
|
||||
help="Website to use for searching cover hashes",
|
||||
)
|
||||
manager.add_setting(
|
||||
"--max",
|
||||
default=8,
|
||||
type=int,
|
||||
help="Maximum score to allow. Lower score means more accurate",
|
||||
)
|
||||
manager.add_setting(
|
||||
"--simple",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Whether to retrieve simple results or full results",
|
||||
)
|
||||
manager.add_setting(
|
||||
"--aggressive-filtering",
|
||||
default=False,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Will filter out worse matches if better matches are found",
|
||||
)
|
||||
manager.add_setting(
|
||||
"--hash",
|
||||
default="ahash, dhash, phash",
|
||||
type=ihash,
|
||||
help="Pick what hashes you want to use to search (default: %(default)s)",
|
||||
)
|
||||
manager.add_setting(
|
||||
"--exact-only",
|
||||
default=True,
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Skip non-exact matches if we have exact matches",
|
||||
)
|
||||
|
||||
|
||||
class QuickTag:
|
||||
def __init__(
|
||||
self, url: utils.Url, domain: str, talker: ComicTalker, config: SettngsNS, output: Callable[[str], None]
|
||||
):
|
||||
self.output = output
|
||||
self.url = url
|
||||
self.talker = talker
|
||||
self.domain = domain
|
||||
self.config = config
|
||||
|
||||
def id_comic(
|
||||
self,
|
||||
ca: comicarchive.ComicArchive,
|
||||
tags: GenericMetadata,
|
||||
simple: bool,
|
||||
hashes: set[HashType],
|
||||
exact_only: bool,
|
||||
interactive: bool,
|
||||
aggressive_filtering: bool,
|
||||
max_hamming_distance: int,
|
||||
) -> GenericMetadata | None:
|
||||
if not ca.seems_to_be_a_comic_archive():
|
||||
raise Exception(f"{ca.path} is not an archive")
|
||||
from PIL import Image
|
||||
|
||||
cover_index = tags.get_cover_page_index_list()[0]
|
||||
cover_image = Image.open(BytesIO(ca.get_page(cover_index)))
|
||||
|
||||
self.output(f"Tagging: {ca.path}")
|
||||
|
||||
self.output("hashing cover")
|
||||
phash = dhash = ahash = ""
|
||||
hasher = ImageHasher(image=cover_image)
|
||||
if HashType.AHASH in hashes:
|
||||
ahash = hex(hasher.average_hash())[2:]
|
||||
if HashType.DHASH in hashes:
|
||||
dhash = hex(hasher.difference_hash())[2:]
|
||||
if HashType.PHASH in hashes:
|
||||
phash = hex(hasher.p_hash())[2:]
|
||||
|
||||
logger.info(f"Searching with {ahash=}, {dhash=}, {phash=}")
|
||||
|
||||
self.output("Searching hashes")
|
||||
results = self.SearchHashes(simple, max_hamming_distance, ahash, dhash, phash, exact_only)
|
||||
logger.debug(f"{results=}")
|
||||
|
||||
if simple:
|
||||
filtered_simple_results = self.filter_simple_results(
|
||||
cast(list[SimpleResult], results), interactive, aggressive_filtering
|
||||
)
|
||||
metadata_simple_results = self.get_simple_results(filtered_simple_results)
|
||||
chosen_result = self.display_simple_results(metadata_simple_results, tags, interactive)
|
||||
else:
|
||||
filtered_results = self.filter_results(cast(list[Result], results), interactive, aggressive_filtering)
|
||||
metadata_results = self.get_results(filtered_results)
|
||||
chosen_result = self.display_results(metadata_results, tags, interactive)
|
||||
|
||||
return self.talker.fetch_comic_data(issue_id=chosen_result.issue_id)
|
||||
|
||||
def SearchHashes(
|
||||
self, simple: bool, max_hamming_distance: int, ahash: str, dhash: str, phash: str, exact_only: bool
|
||||
) -> list[SimpleResult] | list[Result]:
|
||||
|
||||
resp = requests.get(
|
||||
urljoin(self.url.url, "/match_cover_hash"),
|
||||
params={
|
||||
"simple": str(simple),
|
||||
"max": str(max_hamming_distance),
|
||||
"ahash": ahash,
|
||||
"dhash": dhash,
|
||||
"phash": phash,
|
||||
"exactOnly": str(exact_only),
|
||||
},
|
||||
)
|
||||
if resp.status_code != 200:
|
||||
try:
|
||||
text = resp.json()["msg"]
|
||||
except Exception:
|
||||
text = resp.text
|
||||
if text == "No hashes found":
|
||||
return []
|
||||
logger.error("message from server: %s", text)
|
||||
raise Exception(f"Failed to retrieve results from the server: {text}")
|
||||
return resp.json()["results"]
|
||||
|
||||
def get_mds(self, results: list[SimpleResult] | list[Result]) -> list[GenericMetadata]:
|
||||
md_results: list[GenericMetadata] = []
|
||||
results.sort(key=lambda r: r["Distance"])
|
||||
all_ids = set()
|
||||
for res in results:
|
||||
all_ids.update(res.get("IDList", res.get("IDs", {})).get(self.domain, [])) # type: ignore[attr-defined]
|
||||
|
||||
self.output(f"Retrieving basic {self.talker.name} data")
|
||||
# Try to do a bulk feth of basic issue data
|
||||
if hasattr(self.talker, "fetch_comics"):
|
||||
md_results = self.talker.fetch_comics(issue_ids=list(all_ids))
|
||||
else:
|
||||
for md_id in all_ids:
|
||||
md_results.append(self.talker.fetch_comic_data(issue_id=md_id))
|
||||
return md_results
|
||||
|
||||
def get_simple_results(self, results: list[SimpleResult]) -> list[tuple[int, GenericMetadata]]:
|
||||
md_results = []
|
||||
mds = self.get_mds(results)
|
||||
|
||||
# Re-associate the md to the distance
|
||||
for res in results:
|
||||
for md in mds:
|
||||
if md.issue_id in res["IDList"].get(self.domain, []):
|
||||
md_results.append((res["Distance"], md))
|
||||
return md_results
|
||||
|
||||
def get_results(self, results: list[Result]) -> list[tuple[int, Hash, GenericMetadata]]:
|
||||
md_results = []
|
||||
mds = self.get_mds(results)
|
||||
|
||||
# Re-associate the md to the distance
|
||||
for res in results:
|
||||
for md in mds:
|
||||
if md.issue_id in res["IDs"].get(self.domain, []):
|
||||
md_results.append((res["Distance"], res["Hash"], md))
|
||||
return md_results
|
||||
|
||||
def filter_simple_results(
|
||||
self, results: list[SimpleResult], interactive: bool, aggressive_filtering: bool
|
||||
) -> list[SimpleResult]:
|
||||
# If there is a single exact match return it
|
||||
exact = [r for r in results if r["Distance"] == 0]
|
||||
if len(exact) == 1:
|
||||
logger.info("Exact result found. Ignoring any others")
|
||||
return exact
|
||||
|
||||
# If ther are more than 4 results and any are better than 6 return the first group of results
|
||||
if len(results) > 4:
|
||||
dist: list[tuple[int, list[SimpleResult]]] = []
|
||||
filtered_results: list[SimpleResult] = []
|
||||
for distance, group in itertools.groupby(results, key=lambda r: r["Distance"]):
|
||||
dist.append((distance, list(group)))
|
||||
if aggressive_filtering and dist[0][0] < 6:
|
||||
logger.info(f"Aggressive filtering is enabled. Dropping matches above {dist[0]}")
|
||||
for _, res in dist[:1]:
|
||||
filtered_results.extend(res)
|
||||
logger.debug(f"{filtered_results=}")
|
||||
return filtered_results
|
||||
return results
|
||||
|
||||
def filter_results(self, results: list[Result], interactive: bool, aggressive_filtering: bool) -> list[Result]:
|
||||
ahash_results = sorted([r for r in results if r["Hash"]["Kind"] == "ahash"], key=lambda r: r["Distance"])
|
||||
dhash_results = sorted([r for r in results if r["Hash"]["Kind"] == "dhash"], key=lambda r: r["Distance"])
|
||||
phash_results = sorted([r for r in results if r["Hash"]["Kind"] == "phash"], key=lambda r: r["Distance"])
|
||||
hash_results = [phash_results, dhash_results, ahash_results]
|
||||
|
||||
# If any of the hash types have a single exact match return it. Prefer phash for no particular reason
|
||||
for hashed_result in hash_results:
|
||||
exact = [r for r in hashed_result if r["Distance"] == 0]
|
||||
if len(exact) == 1:
|
||||
logger.info(f"Exact {exact[0]['Hash']['Kind']} result found. Ignoring any others")
|
||||
return exact
|
||||
|
||||
results_filtered = False
|
||||
# If any of the hash types have more than 4 results and they have results better than 6 return the first group of results for each hash type
|
||||
for i, hashed_results in enumerate(hash_results):
|
||||
filtered_results: list[Result] = []
|
||||
if len(hashed_results) > 4:
|
||||
dist: list[tuple[int, list[Result]]] = []
|
||||
for distance, group in itertools.groupby(hashed_results, key=lambda r: r["Distance"]):
|
||||
dist.append((distance, list(group)))
|
||||
if aggressive_filtering and dist[0][0] < 6:
|
||||
logger.info(
|
||||
f"Aggressive filtering is enabled. Dropping {dist[0][1][0]['Hash']['Kind']} matches above {dist[0][0]}"
|
||||
)
|
||||
for _, res in dist[:1]:
|
||||
filtered_results.extend(res)
|
||||
|
||||
if filtered_results:
|
||||
hash_results[i] = filtered_results
|
||||
results_filtered = True
|
||||
if results_filtered:
|
||||
logger.debug(f"filtered_results={list(itertools.chain(*hash_results))}")
|
||||
return list(itertools.chain(*hash_results))
|
||||
|
||||
def display_simple_results(
|
||||
self, md_results: list[tuple[int, GenericMetadata]], tags: GenericMetadata, interactive: bool
|
||||
) -> GenericMetadata:
|
||||
if len(md_results) < 1:
|
||||
return GenericMetadata()
|
||||
if len(md_results) == 1 and md_results[0][0] <= 4:
|
||||
self.output("Found a single match <=4. Assuming it's correct")
|
||||
return md_results[0][1]
|
||||
series_match: list[GenericMetadata] = []
|
||||
for score, md in md_results:
|
||||
if (
|
||||
score < 10
|
||||
and tags.series
|
||||
and md.series
|
||||
and utils.titles_match(tags.series, md.series)
|
||||
and IssueString(tags.issue).as_string() == IssueString(md.issue).as_string()
|
||||
):
|
||||
series_match.append(md)
|
||||
if len(series_match) == 1:
|
||||
self.output(f"Found match with series name {series_match[0].series!r}")
|
||||
return series_match[0]
|
||||
|
||||
if not interactive:
|
||||
return GenericMetadata()
|
||||
|
||||
md_results.sort(key=lambda r: (r[0], len(r[1].publisher or "")))
|
||||
for counter, r in enumerate(md_results, 1):
|
||||
self.output(
|
||||
" {:2}. score: {} [{:15}] ({:02}/{:04}) - {} #{} - {}".format(
|
||||
counter,
|
||||
r[0],
|
||||
r[1].publisher,
|
||||
r[1].month or 0,
|
||||
r[1].year or 0,
|
||||
r[1].series,
|
||||
r[1].issue,
|
||||
r[1].title,
|
||||
),
|
||||
)
|
||||
while True:
|
||||
i = input(
|
||||
f'Please select a result to tag the comic with or "q" to quit: [1-{len(md_results)}] ',
|
||||
).casefold()
|
||||
if i.isdigit() and int(i) in range(1, len(md_results) + 1):
|
||||
break
|
||||
if i == "q":
|
||||
logger.warning("User quit without saving metadata")
|
||||
return GenericMetadata()
|
||||
|
||||
return md_results[int(i) - 1][1]
|
||||
|
||||
def display_results(
|
||||
self,
|
||||
md_results: list[tuple[int, Hash, GenericMetadata]],
|
||||
tags: GenericMetadata,
|
||||
interactive: bool,
|
||||
) -> GenericMetadata:
|
||||
if len(md_results) < 1:
|
||||
return GenericMetadata()
|
||||
if len(md_results) == 1 and md_results[0][0] <= 4:
|
||||
self.output("Found a single match <=4. Assuming it's correct")
|
||||
return md_results[0][2]
|
||||
series_match: dict[str, tuple[int, Hash, GenericMetadata]] = {}
|
||||
for score, cover_hash, md in md_results:
|
||||
if (
|
||||
score < 10
|
||||
and tags.series
|
||||
and md.series
|
||||
and utils.titles_match(tags.series, md.series)
|
||||
and IssueString(tags.issue).as_string() == IssueString(md.issue).as_string()
|
||||
):
|
||||
assert md.issue_id
|
||||
series_match[md.issue_id] = (score, cover_hash, md)
|
||||
|
||||
if len(series_match) == 1:
|
||||
score, cover_hash, md = list(series_match.values())[0]
|
||||
self.output(f"Found {cover_hash['Kind']} {score=} match with series name {md.series!r}")
|
||||
return md
|
||||
if not interactive:
|
||||
return GenericMetadata()
|
||||
md_results.sort(key=lambda r: (r[0], len(r[2].publisher or ""), r[1]["Kind"]))
|
||||
for counter, r in enumerate(md_results, 1):
|
||||
self.output(
|
||||
" {:2}. score: {} {}: {:064b} [{:15}] ({:02}/{:04}) - {} #{} - {}".format(
|
||||
counter,
|
||||
r[0],
|
||||
r[1]["Kind"],
|
||||
r[1]["Hash"],
|
||||
r[2].publisher or "",
|
||||
r[2].month or 0,
|
||||
r[2].year or 0,
|
||||
r[2].series or "",
|
||||
r[2].issue or "",
|
||||
r[2].title or "",
|
||||
),
|
||||
)
|
||||
while True:
|
||||
i = input(
|
||||
f'Please select a result to tag the comic with or "q" to quit: [1-{len(md_results)}] ',
|
||||
).casefold()
|
||||
if i.isdigit() and int(i) in range(1, len(md_results) + 1):
|
||||
break
|
||||
if i == "q":
|
||||
self.output("User quit without saving metadata")
|
||||
return GenericMetadata()
|
||||
|
||||
return md_results[int(i) - 1][2]
|
@ -22,7 +22,6 @@ import os
|
||||
import pathlib
|
||||
import platform
|
||||
import shutil
|
||||
import urllib.parse
|
||||
from typing import Any, cast
|
||||
|
||||
import settngs
|
||||
@ -105,7 +104,7 @@ Accepts the following variables:
|
||||
{characters} (string)
|
||||
{teams} (string)
|
||||
{locations} (string)
|
||||
{credits} (list of dict({'role': string, 'person': string, 'primary': boolean, 'language': str}))
|
||||
{credits} (list of dict({'role': string, 'person': string, 'primary': boolean}))
|
||||
{writer} (string)
|
||||
{penciller} (string)
|
||||
{inker} (string)
|
||||
@ -206,20 +205,6 @@ class SettingsWindow(QtWidgets.QDialog):
|
||||
self.filename_parser_test()
|
||||
self.update_rar_path()
|
||||
|
||||
dirs = self.config.values.Runtime_Options__config
|
||||
self.lbl_config_dir.setText(
|
||||
f"Config Dir: <a href='file://{urllib.parse.quote(str(dirs.user_config_dir))}'>{dirs.user_config_dir}</a>"
|
||||
)
|
||||
self.lbl_cache_dir.setText(
|
||||
f"Cache Dir: <a href='file://{urllib.parse.quote(str(dirs.user_cache_dir))}'>{dirs.user_cache_dir}</a>"
|
||||
)
|
||||
self.lbl_log_dir.setText(
|
||||
f"Log Dir: <a href='file://{urllib.parse.quote(str(dirs.user_log_dir))}'>{dirs.user_log_dir}</a>"
|
||||
)
|
||||
self.lbl_plugin_dir.setText(
|
||||
f"Plugin Dir: <a href='file://{urllib.parse.quote(str(dirs.user_plugin_dir))}'>{dirs.user_plugin_dir}</a>"
|
||||
)
|
||||
|
||||
# Set General as start tab
|
||||
self.tabWidget.setCurrentIndex(0)
|
||||
|
||||
|
@ -25,7 +25,6 @@ import platform
|
||||
import re
|
||||
import sys
|
||||
import webbrowser
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Callable, cast
|
||||
|
||||
import natsort
|
||||
@ -38,7 +37,7 @@ import comictaggerlib.ui
|
||||
from comicapi import utils
|
||||
from comicapi.comicarchive import ComicArchive, tags
|
||||
from comicapi.filenameparser import FileNameParser
|
||||
from comicapi.genericmetadata import Credit, GenericMetadata
|
||||
from comicapi.genericmetadata import GenericMetadata
|
||||
from comicapi.issuestring import IssueString
|
||||
from comictaggerlib import ctsettings, ctversion
|
||||
from comictaggerlib.applicationlogwindow import ApplicationLogWindow, QTextEditLogger
|
||||
@ -126,8 +125,7 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
"teams": self.teTeams,
|
||||
"locations": self.teLocations,
|
||||
"credits": (self.twCredits, self.btnAddCredit, self.btnEditCredit, self.btnRemoveCredit),
|
||||
"credits.person": 3,
|
||||
"credits.language": 2,
|
||||
"credits.person": 2,
|
||||
"credits.role": 1,
|
||||
"credits.primary": 0,
|
||||
"tags": self.teTags,
|
||||
@ -222,16 +220,16 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if config[0].Runtime_Options__tags_read:
|
||||
config[0].internal__read_tags = config[0].Runtime_Options__tags_read
|
||||
|
||||
for tag_id in config[0].internal__write_tags.copy():
|
||||
if tag_id not in self.enabled_tags():
|
||||
for tag_id in config[0].internal__write_tags:
|
||||
if tag_id not in tags:
|
||||
config[0].internal__write_tags.remove(tag_id)
|
||||
|
||||
for tag_id in config[0].internal__read_tags.copy():
|
||||
if tag_id not in self.enabled_tags():
|
||||
for tag_id in config[0].internal__read_tags:
|
||||
if tag_id not in tags:
|
||||
config[0].internal__read_tags.remove(tag_id)
|
||||
|
||||
self.selected_write_tags: list[str] = config[0].internal__write_tags or [self.enabled_tags()[0]]
|
||||
self.selected_read_tags: list[str] = config[0].internal__read_tags or [self.enabled_tags()[0]]
|
||||
self.selected_write_tags: list[str] = config[0].internal__write_tags
|
||||
self.selected_read_tags: list[str] = config[0].internal__read_tags
|
||||
|
||||
self.setAcceptDrops(True)
|
||||
self.view_tag_actions, self.remove_tag_actions = self.tag_actions()
|
||||
@ -335,26 +333,10 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
""",
|
||||
)
|
||||
self.config[0].Dialog_Flags__show_disclaimer = not checked
|
||||
if self.config[0].Dialog_Flags__notify_plugin_changes and getattr(sys, "frozen", False):
|
||||
checked = OptionalMessageDialog.msg(
|
||||
self,
|
||||
"Plugins Have moved!",
|
||||
f"""
|
||||
Due to techinical issues the Metron and GCD plugins are no longer bundled in ComicTagger!<br/><br/>
|
||||
You will need to download the .zip or .whl from the GitHub release page to:<br/><pre>{str(self.config[0].Runtime_Options__config.user_plugin_dir)}</pre><br/>
|
||||
GCD: <a href="https://github.com/comictagger/gcd_talker/releases">https://github.com/comictagger/gcd_talker/releases</a><br/>
|
||||
Metron: <a href="https://github.com/comictagger/metron_talker/releases">https://github.com/comictagger/metron_talker/releases</a><br/><br/>
|
||||
For more information on installing plugins see the wiki page:<br/><a href="https://github.com/comictagger/comictagger/wiki/Installing-plugins">https://github.com/comictagger/comictagger/wiki/Installing-plugins</a>
|
||||
""",
|
||||
)
|
||||
self.config[0].Dialog_Flags__notify_plugin_changes = not checked
|
||||
|
||||
if self.config[0].General__check_for_new_version:
|
||||
self.check_latest_version_online()
|
||||
|
||||
def enabled_tags(self) -> Sequence[str]:
|
||||
return [tag.id for tag in tags.values() if tag.enabled]
|
||||
|
||||
def tag_actions(self) -> tuple[dict[str, QtWidgets.QAction], dict[str, QtWidgets.QAction]]:
|
||||
view_raw_tags: dict[str, QtWidgets.QAction] = {}
|
||||
remove_raw_tags: dict[str, QtWidgets.QAction] = {}
|
||||
@ -492,14 +474,14 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
def repackage_archive(self) -> None:
|
||||
ca_list = self.fileSelectionList.get_selected_archive_list()
|
||||
non_zip_count = 0
|
||||
to_zip = []
|
||||
largest_page_size = 0
|
||||
zip_list = []
|
||||
for ca in ca_list:
|
||||
largest_page_size = max(largest_page_size, len(ca.get_page_name_list()))
|
||||
if not ca.is_zip():
|
||||
to_zip.append(ca)
|
||||
non_zip_count += 1
|
||||
else:
|
||||
zip_list.append(ca)
|
||||
|
||||
if not to_zip:
|
||||
if non_zip_count == 0:
|
||||
QtWidgets.QMessageBox.information(
|
||||
self, self.tr("Export as Zip Archive"), self.tr("Only ZIP archives are selected!")
|
||||
)
|
||||
@ -512,11 +494,11 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
):
|
||||
return
|
||||
|
||||
if to_zip:
|
||||
if non_zip_count != 0:
|
||||
EW = ExportWindow(
|
||||
self,
|
||||
(
|
||||
f"You have selected {len(to_zip)} archive(s) to export to Zip format. "
|
||||
f"You have selected {non_zip_count} archive(s) to export to Zip format. "
|
||||
""" New archives will be created in the same folder as the original.
|
||||
|
||||
Please choose config below, and select OK.
|
||||
@ -528,13 +510,11 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if not EW.exec():
|
||||
return
|
||||
|
||||
prog_dialog = None
|
||||
if len(to_zip) > 3 or largest_page_size > 24:
|
||||
prog_dialog = QtWidgets.QProgressDialog("", "Cancel", 0, non_zip_count, self)
|
||||
prog_dialog.setWindowTitle("Exporting as ZIP")
|
||||
prog_dialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
prog_dialog.setMinimumDuration(300)
|
||||
center_window_on_parent(prog_dialog)
|
||||
prog_dialog = QtWidgets.QProgressDialog("", "Cancel", 0, non_zip_count, self)
|
||||
prog_dialog.setWindowTitle("Exporting as ZIP")
|
||||
prog_dialog.setWindowModality(QtCore.Qt.WindowModality.WindowModal)
|
||||
prog_dialog.setMinimumDuration(300)
|
||||
center_window_on_parent(prog_dialog)
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
new_archives_to_add = []
|
||||
@ -542,16 +522,13 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
skipped_list = []
|
||||
failed_list = []
|
||||
success_count = 0
|
||||
logger.debug("Exporting %d comics to zip", len(to_zip))
|
||||
|
||||
for prog_idx, ca in enumerate(to_zip, 1):
|
||||
logger.debug("Exporting comic %d: %s", prog_idx, ca.path)
|
||||
for prog_idx, ca in enumerate(zip_list, 1):
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
if prog_dialog is not None:
|
||||
if prog_dialog.wasCanceled():
|
||||
break
|
||||
prog_dialog.setValue(prog_idx)
|
||||
prog_dialog.setLabelText(str(ca.path))
|
||||
if prog_dialog.wasCanceled():
|
||||
break
|
||||
prog_dialog.setValue(prog_idx)
|
||||
prog_dialog.setLabelText(str(ca.path))
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
||||
export_name = ca.path.with_suffix(".cbz")
|
||||
@ -565,7 +542,6 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
export_name = utils.unique_file(export_name)
|
||||
|
||||
if export:
|
||||
logger.debug("Exporting %s to %s", ca.path, export_name)
|
||||
if ca.export_as_zip(export_name):
|
||||
success_count += 1
|
||||
if EW.addToList:
|
||||
@ -580,9 +556,9 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if export_name.exists():
|
||||
export_name.unlink(missing_ok=True)
|
||||
|
||||
if prog_dialog is not None:
|
||||
prog_dialog.hide()
|
||||
prog_dialog.hide()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
self.fileSelectionList.add_path_list(new_archives_to_add)
|
||||
self.fileSelectionList.remove_archive_list(archives_to_remove)
|
||||
|
||||
summary = f"Successfully created {success_count} Zip archive(s)."
|
||||
@ -604,7 +580,6 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
dlg.set_text(summary)
|
||||
dlg.setWindowTitle("Archive Export to Zip Summary")
|
||||
dlg.exec()
|
||||
self.fileSelectionList.add_path_list(new_archives_to_add)
|
||||
|
||||
def about_app(self) -> None:
|
||||
website = "https://github.com/comictagger/comictagger"
|
||||
@ -872,48 +847,37 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
|
||||
for row, credit in enumerate(md.credits):
|
||||
# if the role-person pair already exists, just skip adding it to the list
|
||||
if self.is_dupe_credit(None, credit.role.title(), credit.person):
|
||||
if self.is_dupe_credit(credit.role.title(), credit.person):
|
||||
continue
|
||||
|
||||
self.add_new_credit_entry(row, credit)
|
||||
self.add_new_credit_entry(row, credit.role.title(), credit.person, credit.primary)
|
||||
|
||||
self.twCredits.setSortingEnabled(True)
|
||||
self.update_credit_colors()
|
||||
|
||||
def add_new_credit_entry(self, row: int, credit: Credit) -> None:
|
||||
def add_new_credit_entry(self, row: int, role: str, name: str, primary_flag: bool = False) -> None:
|
||||
self.twCredits.insertRow(row)
|
||||
|
||||
item = QtWidgets.QTableWidgetItem(credit.role)
|
||||
item_text = role
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, credit.role)
|
||||
self.twCredits.setItem(row, self.md_attributes["credits.role"], item)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
self.twCredits.setItem(row, 1, item)
|
||||
|
||||
language = utils.get_language_from_iso(credit.language) or credit.language
|
||||
item = QtWidgets.QTableWidgetItem(language)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, credit.language)
|
||||
item.setData(QtCore.Qt.ItemDataRole.UserRole, credit.language)
|
||||
item_text = name
|
||||
item = QtWidgets.QTableWidgetItem(item_text)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twCredits.setItem(row, self.md_attributes["credits.language"], item)
|
||||
|
||||
item = QtWidgets.QTableWidgetItem(credit.person)
|
||||
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, credit.person)
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twCredits.setItem(row, self.md_attributes["credits.person"], item)
|
||||
self.twCredits.setItem(row, 2, item)
|
||||
|
||||
item = QtWidgets.QTableWidgetItem("")
|
||||
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
|
||||
self.twCredits.setItem(row, self.md_attributes["credits.primary"], item)
|
||||
self.update_credit_primary_flag(row, credit.primary)
|
||||
self.twCredits.setItem(row, 0, item)
|
||||
self.update_credit_primary_flag(row, primary_flag)
|
||||
|
||||
def is_dupe_credit(self, row: int | None, role: str, name: str) -> bool:
|
||||
def is_dupe_credit(self, role: str, name: str) -> bool:
|
||||
for r in range(self.twCredits.rowCount()):
|
||||
if r == row:
|
||||
continue
|
||||
|
||||
if (
|
||||
self.twCredits.item(r, self.md_attributes["credits.role"]).text() == role
|
||||
and self.twCredits.item(r, self.md_attributes["credits.person"]).text() == name
|
||||
):
|
||||
if self.twCredits.item(r, 1).text() == role and self.twCredits.item(r, 2).text() == name:
|
||||
return True
|
||||
|
||||
return False
|
||||
@ -968,29 +932,14 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
|
||||
# get the credits from the table
|
||||
md.credits = []
|
||||
|
||||
for row in range(self.twCredits.rowCount()):
|
||||
role = self.twCredits.item(row, self.md_attributes["credits.role"]).text()
|
||||
lang = (
|
||||
self.twCredits.item(row, self.md_attributes["credits.language"]).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
or self.twCredits.item(row, self.md_attributes["credits.language"]).text()
|
||||
)
|
||||
name = self.twCredits.item(row, self.md_attributes["credits.person"]).text()
|
||||
primary_flag = self.twCredits.item(row, self.md_attributes["credits.primary"]).text() != ""
|
||||
role = self.twCredits.item(row, 1).text()
|
||||
name = self.twCredits.item(row, 2).text()
|
||||
primary_flag = self.twCredits.item(row, 0).text() != ""
|
||||
|
||||
md.add_credit(name, role, bool(primary_flag), lang)
|
||||
md.add_credit(name, role, bool(primary_flag))
|
||||
|
||||
md.pages = self.page_list_editor.get_page_list()
|
||||
|
||||
# Preserve hidden md values
|
||||
md.data_origin = self.metadata.data_origin
|
||||
md.issue_id = self.metadata.issue_id
|
||||
md.series_id = self.metadata.series_id
|
||||
|
||||
md.price = self.metadata.price
|
||||
md.identifier = self.metadata.identifier
|
||||
md.rights = self.metadata.rights
|
||||
|
||||
self.metadata = md
|
||||
|
||||
def use_filename(self) -> None:
|
||||
@ -1192,9 +1141,9 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if self.comic_archive is not None:
|
||||
self.load_archive(self.comic_archive)
|
||||
else:
|
||||
self.cbSelectedReadTags.dropdownClosed.disconnect()
|
||||
self.cbSelectedReadTags.itemChanged.disconnect()
|
||||
self.adjust_tags_combo()
|
||||
self.cbSelectedReadTags.dropdownClosed.connect(self.select_read_tags)
|
||||
self.cbSelectedReadTags.itemChanged.connect(self.select_read_tags)
|
||||
|
||||
def select_write_tags(self) -> None:
|
||||
self.selected_write_tags = self.cbSelectedWriteTags.currentData()
|
||||
@ -1251,51 +1200,52 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
def update_credit_primary_flag(self, row: int, primary: bool) -> None:
|
||||
# if we're clearing a flag do it and quit
|
||||
if not primary:
|
||||
self.twCredits.item(row, self.md_attributes["credits.primary"]).setText("")
|
||||
self.twCredits.item(row, 0).setText("")
|
||||
return
|
||||
|
||||
# otherwise, we need to check for, and clear, other primaries with same role
|
||||
role = str(self.twCredits.item(row, self.md_attributes["credits.role"]).text())
|
||||
role = str(self.twCredits.item(row, 1).text())
|
||||
r = 0
|
||||
for r in range(self.twCredits.rowCount()):
|
||||
if (
|
||||
self.twCredits.item(r, self.md_attributes["credits.primary"]).text() != ""
|
||||
and str(self.twCredits.item(r, self.md_attributes["credits.role"]).text()).casefold() == role.casefold()
|
||||
self.twCredits.item(r, 0).text() != ""
|
||||
and str(self.twCredits.item(r, 1).text()).casefold() == role.casefold()
|
||||
):
|
||||
self.twCredits.item(r, self.md_attributes["credits.primary"]).setText("")
|
||||
self.twCredits.item(r, 0).setText("")
|
||||
|
||||
# Now set our new primary
|
||||
self.twCredits.item(row, self.md_attributes["credits.primary"]).setText("Yes")
|
||||
self.twCredits.item(row, 0).setText("Yes")
|
||||
|
||||
def modify_credits(self, edit: bool) -> None:
|
||||
row = self.twCredits.rowCount()
|
||||
old = Credit()
|
||||
if edit:
|
||||
row = self.twCredits.currentRow()
|
||||
lang = str(
|
||||
self.twCredits.item(row, self.md_attributes["credits.language"]).data(QtCore.Qt.ItemDataRole.UserRole)
|
||||
or utils.get_language_iso(self.twCredits.item(row, self.md_attributes["credits.language"]).text())
|
||||
)
|
||||
old = Credit(
|
||||
self.twCredits.item(row, self.md_attributes["credits.person"]).text(),
|
||||
self.twCredits.item(row, self.md_attributes["credits.role"]).text(),
|
||||
self.twCredits.item(row, self.md_attributes["credits.primary"]).text() != "",
|
||||
lang,
|
||||
)
|
||||
role = self.twCredits.item(row, 1).text()
|
||||
name = self.twCredits.item(row, 2).text()
|
||||
primary = self.twCredits.item(row, 0).text() != ""
|
||||
else:
|
||||
row = self.twCredits.rowCount()
|
||||
role = ""
|
||||
name = ""
|
||||
primary = False
|
||||
|
||||
editor = CreditEditorWindow(self, CreditEditorWindow.ModeEdit, old)
|
||||
editor = CreditEditorWindow(self, CreditEditorWindow.ModeEdit, role, name, primary)
|
||||
editor.setModal(True)
|
||||
editor.exec()
|
||||
if editor.result():
|
||||
new = editor.get_credit()
|
||||
new_role, new_name, new_primary = editor.get_credits()
|
||||
|
||||
if new == old:
|
||||
if new_name == name and new_role == role and new_primary == primary:
|
||||
# nothing has changed, just quit
|
||||
return
|
||||
|
||||
# name and role is the same, but primary flag changed
|
||||
if new_name == name and new_role == role:
|
||||
self.update_credit_primary_flag(row, new_primary)
|
||||
return
|
||||
|
||||
# check for dupes
|
||||
ok_to_mod = True
|
||||
if self.is_dupe_credit(row, new.role, new.person):
|
||||
if self.is_dupe_credit(new_role, new_name):
|
||||
# delete the dupe credit from list
|
||||
qmsg = QtWidgets.QMessageBox()
|
||||
qmsg.setText("Duplicate Credit!")
|
||||
@ -1317,18 +1267,13 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if ok_to_mod:
|
||||
# modify it
|
||||
if edit:
|
||||
lang = utils.get_language_from_iso(new.language) or new.language
|
||||
self.twCredits.item(row, self.md_attributes["credits.role"]).setText(new.role)
|
||||
self.twCredits.item(row, self.md_attributes["credits.person"]).setText(new.person)
|
||||
self.twCredits.item(row, self.md_attributes["credits.language"]).setText(lang)
|
||||
self.twCredits.item(row, self.md_attributes["credits.language"]).setData(
|
||||
QtCore.Qt.ItemDataRole.UserRole, new.language
|
||||
)
|
||||
self.update_credit_primary_flag(row, new.primary)
|
||||
self.twCredits.item(row, 1).setText(new_role)
|
||||
self.twCredits.item(row, 2).setText(new_name)
|
||||
self.update_credit_primary_flag(row, new_primary)
|
||||
else:
|
||||
# add new entry
|
||||
row = self.twCredits.rowCount()
|
||||
self.add_new_credit_entry(row, new)
|
||||
self.add_new_credit_entry(row, new_role, new_name, new_primary)
|
||||
|
||||
self.update_credit_colors()
|
||||
self.set_dirty_flag()
|
||||
@ -1373,7 +1318,7 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
|
||||
def adjust_tags_combo(self) -> None:
|
||||
"""Select the enabled tags. Since tags are merged in an overlay fashion the last item in the list takes priority. We reverse the order for display to the user"""
|
||||
unchecked = set(self.enabled_tags()) - set(self.selected_read_tags)
|
||||
unchecked = set(tags.keys()) - set(self.selected_read_tags)
|
||||
for i, tag_id in enumerate(reversed(self.selected_read_tags)):
|
||||
if not tags[tag_id].enabled:
|
||||
continue
|
||||
@ -1383,15 +1328,19 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
if item_idx != i:
|
||||
self.cbSelectedReadTags.moveItem(item_idx, row=i)
|
||||
for tag_id in unchecked:
|
||||
if not tags[tag_id].enabled:
|
||||
continue
|
||||
self.cbSelectedReadTags.setItemChecked(self.cbSelectedReadTags.findData(tag_id), False)
|
||||
|
||||
# select the current tag_id
|
||||
unchecked = set(self.enabled_tags()) - set(self.selected_write_tags)
|
||||
unchecked = set(tags.keys()) - set(self.selected_write_tags)
|
||||
for tag_id in self.selected_write_tags:
|
||||
if not tags[tag_id].enabled:
|
||||
continue
|
||||
self.cbSelectedWriteTags.setItemChecked(self.cbSelectedWriteTags.findData(tag_id), True)
|
||||
for tag_id in unchecked:
|
||||
if not tags[tag_id].enabled:
|
||||
continue
|
||||
self.cbSelectedWriteTags.setItemChecked(self.cbSelectedWriteTags.findData(tag_id), False)
|
||||
self.update_tag_tweaks()
|
||||
|
||||
@ -1574,7 +1523,7 @@ class TaggerWindow(QtWidgets.QMainWindow):
|
||||
# Abandon any further tag removals to prevent any greater damage to archive
|
||||
break
|
||||
ca.reset_cache()
|
||||
ca.load_cache(self.enabled_tags())
|
||||
ca.load_cache(set(tags))
|
||||
|
||||
progdialog.hide()
|
||||
QtCore.QCoreApplication.processEvents()
|
||||
|
@ -97,7 +97,7 @@ tr:nth-child(even) {
|
||||
<tr><td>{characters}</td><td>string</td></tr>
|
||||
<tr><td>{teams}</td><td>string</td></tr>
|
||||
<tr><td>{locations}</td><td>string</td></tr>
|
||||
<tr><td>{credits}</td><td>list of dict({'role': string, 'person': string, 'primary': boolean, 'language': str})</td></tr>
|
||||
<tr><td>{credits}</td><td>list of dict({'role': string, 'person': string, 'primary': boolean})</td></tr>
|
||||
<tr><td>{writer}</td><td>(string)</td></tr>
|
||||
<tr><td>{penciller}</td><td>(string)</td></tr>
|
||||
<tr><td>{inker}</td><td>(string)</td></tr>
|
||||
|
@ -2,80 +2,80 @@
|
||||
<ui version="4.0">
|
||||
<class>dialogCreditEditor</class>
|
||||
<widget class="QDialog" name="dialogCreditEditor">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>0</x>
|
||||
<y>0</y>
|
||||
<width>400</width>
|
||||
<height>196</height>
|
||||
</rect>
|
||||
</property>
|
||||
<property name="windowTitle">
|
||||
<string>Modify Credit</string>
|
||||
</property>
|
||||
<property name="sizeGripEnabled">
|
||||
<bool>false</bool>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout">
|
||||
<item>
|
||||
<layout class="QFormLayout" name="formLayout">
|
||||
<item row="1" column="0">
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
<string>Role</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QComboBox" name="cbRole">
|
||||
<property name="editable">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<widget class="QLineEdit" name="leName"/>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QLabel" name="label_2">
|
||||
<property name="text">
|
||||
<string>Name</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="1">
|
||||
<widget class="QCheckBox" name="cbPrimary">
|
||||
<property name="text">
|
||||
<string>Primary</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<widget class="QComboBox" name="cbLanguage">
|
||||
<property name="editable">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="0">
|
||||
<widget class="QLabel" name="label_3">
|
||||
<property name="text">
|
||||
<string>Language</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QDialogButtonBox" name="buttonBox">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="standardButtons">
|
||||
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
<widget class="QDialogButtonBox" name="buttonBox">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>180</x>
|
||||
<y>140</y>
|
||||
<width>191</width>
|
||||
<height>30</height>
|
||||
</rect>
|
||||
</property>
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
<property name="standardButtons">
|
||||
<set>QDialogButtonBox::Cancel|QDialogButtonBox::Ok</set>
|
||||
</property>
|
||||
</widget>
|
||||
<widget class="QWidget" name="formLayoutWidget">
|
||||
<property name="geometry">
|
||||
<rect>
|
||||
<x>30</x>
|
||||
<y>30</y>
|
||||
<width>341</width>
|
||||
<height>91</height>
|
||||
</rect>
|
||||
</property>
|
||||
<layout class="QFormLayout" name="formLayout">
|
||||
<item row="1" column="0">
|
||||
<widget class="QLabel" name="label">
|
||||
<property name="text">
|
||||
<string>Role</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="1" column="1">
|
||||
<widget class="QComboBox" name="cbRole">
|
||||
<property name="editable">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="2" column="1">
|
||||
<widget class="QLineEdit" name="leName"/>
|
||||
</item>
|
||||
<item row="2" column="0">
|
||||
<widget class="QLabel" name="label_2">
|
||||
<property name="text">
|
||||
<string>Name</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="3" column="1">
|
||||
<widget class="QCheckBox" name="cbPrimary">
|
||||
<property name="text">
|
||||
<string>Primary</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</widget>
|
||||
<tabstops>
|
||||
<tabstop>cbRole</tabstop>
|
||||
<tabstop>leName</tabstop>
|
||||
<tabstop>cbLanguage</tabstop>
|
||||
<tabstop>cbPrimary</tabstop>
|
||||
</tabstops>
|
||||
<resources/>
|
||||
<connections>
|
||||
<connection>
|
||||
|
@ -123,49 +123,10 @@
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QGroupBox" name="groupBox_9">
|
||||
<layout class="QVBoxLayout" name="verticalLayout_3">
|
||||
<item>
|
||||
<widget class="QLabel" name="lbl_config_dir">
|
||||
<property name="text">
|
||||
<string>Config Directory:</string>
|
||||
</property>
|
||||
<property name="openExternalLinks">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLabel" name="lbl_cache_dir">
|
||||
<property name="text">
|
||||
<string>Cache Directory:</string>
|
||||
</property>
|
||||
<property name="openExternalLinks">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLabel" name="lbl_log_dir">
|
||||
<property name="text">
|
||||
<string>Log Directory:</string>
|
||||
</property>
|
||||
<property name="openExternalLinks">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLabel" name="lbl_plugin_dir">
|
||||
<property name="text">
|
||||
<string>Plugin Directory:</string>
|
||||
</property>
|
||||
<property name="openExternalLinks">
|
||||
<bool>true</bool>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
<widget class="Line" name="line_3">
|
||||
<property name="orientation">
|
||||
<enum>Qt::Horizontal</enum>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
|
@ -919,7 +919,7 @@ Source</string>
|
||||
<number>0</number>
|
||||
</property>
|
||||
<property name="columnCount">
|
||||
<number>4</number>
|
||||
<number>3</number>
|
||||
</property>
|
||||
<attribute name="horizontalHeaderMinimumSectionSize">
|
||||
<number>2</number>
|
||||
@ -937,12 +937,7 @@ Source</string>
|
||||
</column>
|
||||
<column>
|
||||
<property name="text">
|
||||
<string>Role</string>
|
||||
</property>
|
||||
</column>
|
||||
<column>
|
||||
<property name="text">
|
||||
<string>Language</string>
|
||||
<string>Credit</string>
|
||||
</property>
|
||||
</column>
|
||||
<column>
|
||||
|
@ -9,9 +9,9 @@ from collections.abc import Sequence
|
||||
from packaging.version import InvalidVersion, parse
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
from importlib_metadata import entry_points
|
||||
from importlib_metadata import EntryPoint, entry_points
|
||||
else:
|
||||
from importlib.metadata import entry_points
|
||||
from importlib.metadata import entry_points, EntryPoint
|
||||
|
||||
from comictalker.comictalker import ComicTalker, TalkerError
|
||||
|
||||
@ -24,14 +24,14 @@ __all__ = [
|
||||
|
||||
|
||||
def get_talkers(
|
||||
version: str, cache: pathlib.Path, local_plugins: Sequence[type[ComicTalker]] = tuple()
|
||||
version: str, cache: pathlib.Path, local_plugins: Sequence[EntryPoint] = tuple()
|
||||
) -> dict[str, ComicTalker]:
|
||||
"""Returns all comic talker instances"""
|
||||
talkers: dict[str, ComicTalker] = {}
|
||||
ct_version = parse(version)
|
||||
|
||||
# A dict is used, last plugin wins
|
||||
for talker in itertools.chain(entry_points(group="comictagger.talker")):
|
||||
for talker in itertools.chain(entry_points(group="comictagger.talker"), local_plugins):
|
||||
try:
|
||||
talker_cls = talker.load()
|
||||
obj = talker_cls(version, cache)
|
||||
@ -56,26 +56,4 @@ def get_talkers(
|
||||
except Exception:
|
||||
logger.exception("Failed to load talker: %s", talker.name)
|
||||
|
||||
# A dict is used, last plugin wins
|
||||
for talker_cls in local_plugins:
|
||||
try:
|
||||
obj = talker_cls(version, cache)
|
||||
try:
|
||||
if ct_version >= parse(talker_cls.comictagger_min_ver):
|
||||
talkers[talker_cls.id] = obj
|
||||
else:
|
||||
logger.error(
|
||||
f"Minimum ComicTagger version required of {talker_cls.comictagger_min_ver} for talker {talker_cls.id} is not met, will NOT load talker"
|
||||
)
|
||||
except InvalidVersion:
|
||||
logger.warning(
|
||||
f"Invalid minimum required ComicTagger version number for talker: {talker_cls.id} - version: {talker_cls.comictagger_min_ver}, will load talker anyway"
|
||||
)
|
||||
# Attempt to use the talker anyway
|
||||
# TODO flag this problem for later display to the user
|
||||
talkers[talker_cls.id] = obj
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to load talker: %s", talker_cls.id)
|
||||
|
||||
return talkers
|
||||
|
@ -295,9 +295,4 @@ class ComicCacher:
|
||||
set_slots += key + " = ?"
|
||||
|
||||
sql_ins = f"INSERT OR REPLACE INTO {tablename} ({keys}) VALUES ({ins_slots})"
|
||||
if not data.get("complete", True):
|
||||
sql_ins += f" ON CONFLICT DO UPDATE SET {set_slots} WHERE complete != ?"
|
||||
vals.extend(vals)
|
||||
vals.append(True) # If the cache is complete and this isn't complete we don't update it
|
||||
|
||||
cur.execute(sql_ins, vals)
|
||||
|
@ -21,7 +21,7 @@ from urllib.parse import urlsplit
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def fix_url(url: str | None) -> str:
|
||||
def fix_url(url: str) -> str:
|
||||
if not url:
|
||||
return ""
|
||||
tmp_url = urlsplit(url)
|
||||
@ -47,10 +47,9 @@ def cleanup_html(string: str | None, remove_html_tables: bool = False) -> str:
|
||||
|
||||
# put in our own
|
||||
string = re.sub(r"<br>|</li>", "\n", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"<li>", "* ", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"</p>", "\n\n", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"<h([1-6])>", lambda m: "#" * int(m.group(1)) + " ", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"</h[1-6]>", "\n", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"<h([1-6])>", "*", string, flags=re.IGNORECASE)
|
||||
string = re.sub(r"</h[1-6]>", "*\n", string, flags=re.IGNORECASE)
|
||||
|
||||
# remove the tables
|
||||
p = re.compile(r"<table[^<]*?>.*?</table>")
|
||||
|
@ -22,8 +22,8 @@ import json
|
||||
import logging
|
||||
import pathlib
|
||||
import time
|
||||
from typing import Any, Callable, Generic, TypeVar, cast
|
||||
from urllib.parse import parse_qsl, urljoin
|
||||
from typing import Any, Callable, Generic, TypeVar
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import settngs
|
||||
from pyrate_limiter import Limiter, RequestRate
|
||||
@ -43,8 +43,6 @@ except ImportError:
|
||||
import requests
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TWITTER_TOO_MANY_REQUESTS = 420
|
||||
|
||||
|
||||
class CVTypeID:
|
||||
Volume = "4050" # CV uses volume to mean series
|
||||
@ -92,7 +90,7 @@ class CVPersonCredit(TypedDict):
|
||||
role: str
|
||||
|
||||
|
||||
class CVSeries(TypedDict, total=False):
|
||||
class CVSeries(TypedDict):
|
||||
api_detail_url: str
|
||||
site_detail_url: str
|
||||
aliases: str
|
||||
@ -159,8 +157,8 @@ class CVResult(TypedDict, Generic[T]):
|
||||
|
||||
# https://comicvine.gamespot.com/forums/api-developers-2334/api-rate-limiting-1746419/
|
||||
# "Space out your requests so AT LEAST one second passes between each and you can make requests all day."
|
||||
custom_limiter = Limiter(RequestRate(10, 10), RequestRate(200, 1 * 60 * 60))
|
||||
default_limiter = Limiter(RequestRate(1, 10), RequestRate(100, 1 * 60 * 60))
|
||||
custom_limiter = Limiter(RequestRate(10, 10))
|
||||
default_limiter = Limiter(RequestRate(1, 5))
|
||||
|
||||
|
||||
class ComicVineTalker(ComicTalker):
|
||||
@ -172,8 +170,8 @@ class ComicVineTalker(ComicTalker):
|
||||
about: str = (
|
||||
f"<a href='{website}'>{name}</a> has the largest collection of comic book data available through "
|
||||
f"its public facing API. "
|
||||
f"<p>NOTE: Using the default API key will severely limit access times. A personal API "
|
||||
f"key will allow for a <b>10 times increase</b> in online search speed. See the "
|
||||
f"<p>NOTE: Using the default API key will serverly limit access times. A personal API "
|
||||
f"key will allow for a <b>5 times increase</b> in online search speed. See the "
|
||||
"<a href='https://github.com/comictagger/comictagger/wiki/UserGuide#comic-vine'>Wiki page</a> for "
|
||||
"more information.</p>"
|
||||
)
|
||||
@ -185,11 +183,6 @@ class ComicVineTalker(ComicTalker):
|
||||
self.default_api_url = self.api_url = f"{self.website}/api/"
|
||||
self.default_api_key = self.api_key = "27431e6787042105bd3e47e169a624521f89f3a4"
|
||||
self.use_series_start_as_volume: bool = False
|
||||
self.total_requests_made: dict[str, int] = utils.DefaultDict(default=lambda x: 0)
|
||||
self.custom_url_parameters: dict[str, str] = {}
|
||||
|
||||
def _log_total_requests(self) -> None:
|
||||
logger.debug("Total requests made to cv: %s", dict(self.total_requests_made))
|
||||
|
||||
def register_settings(self, parser: settngs.Manager) -> None:
|
||||
parser.add_setting(
|
||||
@ -212,19 +205,12 @@ class ComicVineTalker(ComicTalker):
|
||||
display_name="API URL",
|
||||
help=f"Use the given Comic Vine URL. (default: {self.default_api_url})",
|
||||
)
|
||||
parser.add_setting(
|
||||
f"--{self.id}-custom-parameters",
|
||||
display_name="Custom URL Parameters",
|
||||
help="Custom url parameters to add to the url, must already be url encoded. (eg. refresh_cache=true)",
|
||||
)
|
||||
|
||||
def parse_settings(self, settings: dict[str, Any]) -> dict[str, Any]:
|
||||
settings = super().parse_settings(settings)
|
||||
|
||||
self.use_series_start_as_volume = settings["cv_use_series_start_as_volume"]
|
||||
|
||||
self.custom_url_parameters = dict(parse_qsl(settings[f"{self.id}_custom_parameters"]))
|
||||
|
||||
# Set a different limit if using the default API key
|
||||
if self.api_key == self.default_api_key:
|
||||
self.limiter = default_limiter
|
||||
@ -238,28 +224,24 @@ class ComicVineTalker(ComicTalker):
|
||||
if not url:
|
||||
url = self.default_api_url
|
||||
try:
|
||||
test_url = urljoin(url, "team/1/")
|
||||
test_url = urljoin(url, "issue/1/")
|
||||
|
||||
self.total_requests_made[test_url] += 1
|
||||
cv_response: CVResult = requests.get( # type: ignore[type-arg]
|
||||
test_url,
|
||||
headers={"user-agent": "comictagger/" + self.version},
|
||||
params={
|
||||
"api_key": settings[f"{self.id}_key"] or self.default_api_key,
|
||||
"format": "json",
|
||||
"field_list": "name",
|
||||
},
|
||||
timeout=10,
|
||||
).json()
|
||||
|
||||
# Bogus request, but if the key is wrong, you get error 100: "Invalid API Key"
|
||||
if cv_response["status_code"] != 100:
|
||||
self._log_total_requests()
|
||||
return "The API key is valid", True
|
||||
else:
|
||||
self._log_total_requests()
|
||||
return "The API key is INVALID!", False
|
||||
except Exception:
|
||||
self._log_total_requests()
|
||||
return "Failed to connect to the URL!", False
|
||||
|
||||
def search_for_series(
|
||||
@ -286,9 +268,7 @@ class ComicVineTalker(ComicTalker):
|
||||
cached_search_results = cvc.get_search_results(self.id, series_name)
|
||||
|
||||
if len(cached_search_results) > 0:
|
||||
logger.debug("Search for %s cached: True", repr(series_name))
|
||||
return self._format_search_results([json.loads(x[0].data) for x in cached_search_results])
|
||||
logger.debug("Search for %s cached: False", repr(series_name))
|
||||
|
||||
params = { # CV uses volume to mean series
|
||||
"api_key": self.api_key,
|
||||
@ -387,46 +367,10 @@ class ComicVineTalker(ComicTalker):
|
||||
def fetch_issues_by_series_issue_num_and_year(
|
||||
self, series_id_list: list[str], issue_number: str, year: str | int | None
|
||||
) -> list[GenericMetadata]:
|
||||
logger.debug("Fetching comics by series ids: %s and number: %s", series_id_list, issue_number)
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_results: list[GenericMetadata] = []
|
||||
needed_volumes: set[int] = set()
|
||||
for series_id in series_id_list:
|
||||
series = cvc.get_series_info(series_id, self.id, expire_stale=False)
|
||||
issues = []
|
||||
# Explicitly mark count_of_issues at an impossible value
|
||||
cvseries = CVSeries(id=int(series_id), count_of_issues=-1)
|
||||
if series:
|
||||
cvseries = cast(CVSeries, json.loads(series[0].data))
|
||||
issues = cvc.get_series_issues_info(series_id, self.id, expire_stale=True)
|
||||
issue_found = False
|
||||
for issue, _ in issues:
|
||||
cvissue = cast(CVIssue, json.loads(issue.data))
|
||||
if cvissue.get("issue_number") == issue_number:
|
||||
cached_results.append(
|
||||
self._map_comic_issue_to_metadata(
|
||||
cvissue,
|
||||
self._fetch_series([int(cvissue["volume"]["id"])])[0][0],
|
||||
),
|
||||
)
|
||||
issue_found = True
|
||||
break
|
||||
if not issues:
|
||||
needed_volumes.add(int(series_id)) # we got no results from cache, we definitely need to check online
|
||||
|
||||
# If we didn't find the issue and we don't have all the issues we don't know if the issue exists, we have to check
|
||||
if (not issue_found) and cvseries.get("count_of_issues") != len(issues):
|
||||
needed_volumes.add(int(series_id))
|
||||
|
||||
logger.debug("Found %d issues cached need %d issues", len(cached_results), len(needed_volumes))
|
||||
if not needed_volumes:
|
||||
return cached_results
|
||||
|
||||
series_filter = ""
|
||||
for vid in needed_volumes:
|
||||
for vid in series_id_list:
|
||||
series_filter += str(vid) + "|"
|
||||
flt = f"volume:{series_filter[:-1]},issue_number:{issue_number}" # CV uses volume to mean series
|
||||
flt = f"volume:{series_filter},issue_number:{issue_number}" # CV uses volume to mean series
|
||||
|
||||
int_year = utils.xlate_int(year)
|
||||
if int_year is not None:
|
||||
@ -459,162 +403,20 @@ class ComicVineTalker(ComicTalker):
|
||||
filtered_issues_result.extend(cv_response["results"])
|
||||
current_result_count += cv_response["number_of_page_results"]
|
||||
|
||||
cvc.add_issues_info(
|
||||
self.id,
|
||||
[
|
||||
Issue(str(x["id"]), str(x["volume"]["id"]), json.dumps(x).encode("utf-8"))
|
||||
for x in filtered_issues_result
|
||||
],
|
||||
False,
|
||||
)
|
||||
|
||||
formatted_filtered_issues_result = [
|
||||
self._map_comic_issue_to_metadata(x, self._fetch_series_data(x["volume"]["id"])[0])
|
||||
for x in filtered_issues_result
|
||||
]
|
||||
formatted_filtered_issues_result.extend(cached_results)
|
||||
|
||||
return formatted_filtered_issues_result
|
||||
|
||||
def fetch_comics(self, *, issue_ids: list[str]) -> list[GenericMetadata]:
|
||||
logger.debug("Fetching comic IDs: %s", issue_ids)
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_results: list[GenericMetadata] = []
|
||||
needed_issues: list[int] = []
|
||||
for issue_id in issue_ids:
|
||||
cached_issue = cvc.get_issue_info(issue_id, self.id)
|
||||
|
||||
if cached_issue and cached_issue[1]:
|
||||
cached_results.append(
|
||||
self._map_comic_issue_to_metadata(
|
||||
json.loads(cached_issue[0].data),
|
||||
self._fetch_series([int(cached_issue[0].series_id)])[0][0],
|
||||
),
|
||||
)
|
||||
else:
|
||||
needed_issues.append(int(issue_id)) # CV uses integers for it's IDs
|
||||
|
||||
logger.debug("Found %d issues cached need %d issues", len(cached_results), len(needed_issues))
|
||||
if not needed_issues:
|
||||
return cached_results
|
||||
|
||||
issue_filter = ""
|
||||
for iid in needed_issues:
|
||||
issue_filter += str(iid) + "|"
|
||||
flt = "id:" + issue_filter.rstrip("|")
|
||||
|
||||
issue_url = urljoin(self.api_url, "issues/")
|
||||
params: dict[str, Any] = {
|
||||
"api_key": self.api_key,
|
||||
"format": "json",
|
||||
"filter": flt,
|
||||
}
|
||||
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(issue_url, params)
|
||||
|
||||
issue_results = cv_response["results"]
|
||||
page = 1
|
||||
offset = 0
|
||||
current_result_count = cv_response["number_of_page_results"]
|
||||
total_result_count = cv_response["number_of_total_results"]
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while current_result_count < total_result_count:
|
||||
page += 1
|
||||
offset += cv_response["number_of_page_results"]
|
||||
|
||||
params["offset"] = offset
|
||||
cv_response = self._get_cv_content(issue_url, params)
|
||||
|
||||
issue_results.extend(cv_response["results"])
|
||||
current_result_count += cv_response["number_of_page_results"]
|
||||
|
||||
series_info = {s[0].id: s[0] for s in self._fetch_series([int(i["volume"]["id"]) for i in issue_results])}
|
||||
|
||||
for issue in issue_results:
|
||||
cvc.add_issues_info(
|
||||
self.id,
|
||||
[
|
||||
Issue(
|
||||
id=str(issue["id"]),
|
||||
series_id=str(issue["volume"]["id"]),
|
||||
data=json.dumps(issue).encode("utf-8"),
|
||||
),
|
||||
],
|
||||
False, # The /issues/ endpoint never provides credits
|
||||
)
|
||||
cached_results.append(
|
||||
self._map_comic_issue_to_metadata(issue, series_info[str(issue["volume"]["id"])]),
|
||||
)
|
||||
|
||||
return cached_results
|
||||
|
||||
def _fetch_series(self, series_ids: list[int]) -> list[tuple[ComicSeries, bool]]:
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_results: list[tuple[ComicSeries, bool]] = []
|
||||
needed_series: list[int] = []
|
||||
for series_id in series_ids:
|
||||
cached_series = cvc.get_series_info(str(series_id), self.id)
|
||||
if cached_series is not None:
|
||||
cached_results.append((self._format_series(json.loads(cached_series[0].data)), cached_series[1]))
|
||||
else:
|
||||
needed_series.append(series_id)
|
||||
|
||||
if needed_series == []:
|
||||
return cached_results
|
||||
|
||||
series_filter = ""
|
||||
for vid in needed_series:
|
||||
series_filter += str(vid) + "|"
|
||||
flt = "id:" + series_filter.rstrip("|") # CV uses volume to mean series
|
||||
|
||||
series_url = urljoin(self.api_url, "volumes/") # CV uses volume to mean series
|
||||
params: dict[str, Any] = {
|
||||
"api_key": self.api_key,
|
||||
"format": "json",
|
||||
"filter": flt,
|
||||
}
|
||||
cv_response: CVResult[list[CVSeries]] = self._get_cv_content(series_url, params)
|
||||
|
||||
series_results = cv_response["results"]
|
||||
page = 1
|
||||
offset = 0
|
||||
current_result_count = cv_response["number_of_page_results"]
|
||||
total_result_count = cv_response["number_of_total_results"]
|
||||
|
||||
# see if we need to keep asking for more pages...
|
||||
while current_result_count < total_result_count:
|
||||
page += 1
|
||||
offset += cv_response["number_of_page_results"]
|
||||
|
||||
params["offset"] = offset
|
||||
cv_response = self._get_cv_content(series_url, params)
|
||||
|
||||
series_results.extend(cv_response["results"])
|
||||
current_result_count += cv_response["number_of_page_results"]
|
||||
|
||||
if series_results:
|
||||
for series in series_results:
|
||||
cvc.add_series_info(
|
||||
self.id,
|
||||
Series(id=str(series["id"]), data=json.dumps(series).encode("utf-8")),
|
||||
True,
|
||||
)
|
||||
cached_results.append((self._format_series(series), True))
|
||||
|
||||
return cached_results
|
||||
|
||||
def _get_cv_content(self, url: str, params: dict[str, Any]) -> CVResult[T]:
|
||||
"""
|
||||
Get the content from the CV server.
|
||||
"""
|
||||
ratelimit_key = url
|
||||
if self.api_key == self.default_api_key:
|
||||
ratelimit_key = "cv"
|
||||
with self.limiter.ratelimit(ratelimit_key, delay=True):
|
||||
|
||||
with self.limiter.ratelimit("cv", delay=True):
|
||||
cv_response: CVResult[T] = self._get_url_content(url, params)
|
||||
|
||||
if cv_response["status_code"] != 1:
|
||||
logger.debug(
|
||||
f"{self.name} query failed with error #{cv_response['status_code']}: [{cv_response['error']}]."
|
||||
@ -626,25 +428,19 @@ class ComicVineTalker(ComicTalker):
|
||||
def _get_url_content(self, url: str, params: dict[str, Any]) -> Any:
|
||||
# if there is a 500 error, try a few more times before giving up
|
||||
limit_counter = 0
|
||||
final_params = self.custom_url_parameters.copy()
|
||||
final_params.update(params)
|
||||
|
||||
for tries in range(1, 5):
|
||||
try:
|
||||
self.total_requests_made[url.removeprefix(self.api_url)] += 1
|
||||
resp = requests.get(
|
||||
url, params=final_params, headers={"user-agent": "comictagger/" + self.version}, timeout=10
|
||||
)
|
||||
resp = requests.get(url, params=params, headers={"user-agent": "comictagger/" + self.version})
|
||||
if resp.status_code == 200:
|
||||
return resp.json()
|
||||
elif resp.status_code == 500:
|
||||
if resp.status_code == 500:
|
||||
logger.debug(f"Try #{tries}: ")
|
||||
time.sleep(1)
|
||||
logger.debug(str(resp.status_code))
|
||||
|
||||
elif resp.status_code in (requests.status_codes.codes.TOO_MANY_REQUESTS, TWITTER_TOO_MANY_REQUESTS):
|
||||
if resp.status_code == requests.status_codes.codes.TOO_MANY_REQUESTS:
|
||||
logger.info(f"{self.name} rate limit encountered. Waiting for 10 seconds\n")
|
||||
self._log_total_requests()
|
||||
time.sleep(10)
|
||||
limit_counter += 1
|
||||
if limit_counter > 3:
|
||||
@ -667,10 +463,8 @@ class ComicVineTalker(ComicTalker):
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(f"JSON decode error: {e}")
|
||||
raise TalkerDataError(self.name, 2, "ComicVine did not provide json")
|
||||
except Exception as e:
|
||||
raise TalkerNetworkError(self.name, 5, str(e))
|
||||
|
||||
raise TalkerNetworkError(self.name, 5, "Unknown error occurred")
|
||||
raise TalkerNetworkError(self.name, 5)
|
||||
|
||||
def _format_search_results(self, search_results: list[CVSeries]) -> list[ComicSeries]:
|
||||
formatted_results = []
|
||||
@ -709,20 +503,17 @@ class ComicVineTalker(ComicTalker):
|
||||
)
|
||||
|
||||
def _fetch_issues_in_series(self, series_id: str) -> list[tuple[GenericMetadata, bool]]:
|
||||
logger.debug("Fetching all issues in series: %s", series_id)
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_results = cvc.get_series_issues_info(series_id, self.id)
|
||||
cached_series_issues_result = cvc.get_series_issues_info(series_id, self.id)
|
||||
|
||||
series = self._fetch_series_data(int(series_id))[0]
|
||||
|
||||
logger.debug(
|
||||
"Found %d issues cached need %d issues",
|
||||
len(cached_results),
|
||||
cast(int, series.count_of_issues) - len(cached_results),
|
||||
)
|
||||
if len(cached_results) == series.count_of_issues:
|
||||
return [(self._map_comic_issue_to_metadata(json.loads(x[0].data), series), x[1]) for x in cached_results]
|
||||
if len(cached_series_issues_result) == series.count_of_issues:
|
||||
return [
|
||||
(self._map_comic_issue_to_metadata(json.loads(x[0].data), series), x[1])
|
||||
for x in cached_series_issues_result
|
||||
]
|
||||
|
||||
params = { # CV uses volume to mean series
|
||||
"api_key": self.api_key,
|
||||
@ -766,12 +557,10 @@ class ComicVineTalker(ComicTalker):
|
||||
return [(x, False) for x in formatted_series_issues_result]
|
||||
|
||||
def _fetch_series_data(self, series_id: int) -> tuple[ComicSeries, bool]:
|
||||
logger.debug("Fetching series info: %s", series_id)
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_series = cvc.get_series_info(str(series_id), self.id)
|
||||
|
||||
logger.debug("Series cached: %s", bool(cached_series))
|
||||
if cached_series is not None:
|
||||
return (self._format_series(json.loads(cached_series[0].data)), cached_series[1])
|
||||
|
||||
@ -793,7 +582,6 @@ class ComicVineTalker(ComicTalker):
|
||||
return self._format_series(series_results), True
|
||||
|
||||
def _fetch_issue_data(self, series_id: int, issue_number: str) -> GenericMetadata:
|
||||
logger.debug("Fetching issue by series ID: %s and issue number: %s", series_id, issue_number)
|
||||
issues_list_results = self._fetch_issues_in_series(str(series_id))
|
||||
|
||||
# Loop through issue list to find the required issue info
|
||||
@ -814,12 +602,10 @@ class ComicVineTalker(ComicTalker):
|
||||
return GenericMetadata()
|
||||
|
||||
def _fetch_issue_data_by_issue_id(self, issue_id: str) -> GenericMetadata:
|
||||
logger.debug("Fetching issue by issue ID: %s", issue_id)
|
||||
# before we search online, look in our cache, since we might already have this info
|
||||
cvc = ComicCacher(self.cache_folder, self.version)
|
||||
cached_issue = cvc.get_issue_info(issue_id, self.id)
|
||||
|
||||
logger.debug("Issue cached: %s", bool(cached_issue and cached_issue[1]))
|
||||
if cached_issue and cached_issue[1]:
|
||||
return self._map_comic_issue_to_metadata(
|
||||
json.loads(cached_issue[0].data), self._fetch_series_data(int(cached_issue[0].series_id))[0]
|
||||
|
50
setup.cfg
50
setup.cfg
@ -55,9 +55,7 @@ install_requires =
|
||||
python_requires = >=3.9
|
||||
|
||||
[options.packages.find]
|
||||
exclude =
|
||||
tests*
|
||||
testing*
|
||||
exclude = tests; testing
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts = comictagger=comictaggerlib.main:main
|
||||
@ -68,55 +66,43 @@ comicapi.archiver =
|
||||
folder = comicapi.archivers.folder:FolderArchiver
|
||||
comicapi.tags =
|
||||
cr = comicapi.tags.comicrack:ComicRack
|
||||
cbi = comicapi.tags.comicbookinfo:ComicBookInfo
|
||||
comet = comicapi.tags.comet:CoMet
|
||||
comictagger.talker =
|
||||
comicvine = comictalker.talkers.comicvine:ComicVineTalker
|
||||
pyinstaller40 =
|
||||
hook-dirs = comictaggerlib.__pyinstaller:get_hook_dirs
|
||||
|
||||
[options.extras_require]
|
||||
7z =
|
||||
7Z =
|
||||
py7zr
|
||||
CBR =
|
||||
rarfile>=4.0
|
||||
GUI =
|
||||
PyQt5
|
||||
ICU =
|
||||
pyicu;sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
QTW =
|
||||
PyQt5
|
||||
PyQtWebEngine
|
||||
all =
|
||||
PyQt5
|
||||
PyQtWebEngine
|
||||
comicinfoxml==0.4.*
|
||||
comicinfoxml>=0.2.0
|
||||
gcd-talker>0.1.0
|
||||
metron-talker>0.1.5
|
||||
pillow-avif-plugin>=1.4.1
|
||||
pillow-jxl-plugin>=1.2.5
|
||||
py7zr
|
||||
rarfile>=4.0
|
||||
pyicu;sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
archived_tags =
|
||||
ct-archived-tags
|
||||
avif =
|
||||
pillow-avif-plugin>=1.4.1
|
||||
cbr =
|
||||
rarfile>=4.0
|
||||
cix =
|
||||
comicinfoxml==0.4.*
|
||||
comicinfoxml>=0.2.0
|
||||
gcd =
|
||||
gcd-talker>0.1.0
|
||||
gui =
|
||||
PyQt5
|
||||
icu =
|
||||
pyicu;sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
jxl =
|
||||
pillow-jxl-plugin>=1.2.5
|
||||
gcd-talker>=0.1.0
|
||||
metron =
|
||||
metron-talker>0.1.5
|
||||
pyinstaller =
|
||||
PyQt5
|
||||
PyQtWebEngine
|
||||
comicinfoxml==0.4.*
|
||||
pillow-avif-plugin>=1.4.1
|
||||
pillow-jxl-plugin>=1.2.5
|
||||
py7zr
|
||||
rarfile>=4.0
|
||||
pyicu;sys_platform == 'linux' or sys_platform == 'darwin'
|
||||
qtw =
|
||||
PyQt5
|
||||
PyQtWebEngine
|
||||
metron-talker>=0.1.3
|
||||
|
||||
[options.package_data]
|
||||
comicapi =
|
||||
@ -255,7 +241,7 @@ depends =
|
||||
deps =
|
||||
pyinstaller>=5.6.2,!=6.0.0
|
||||
extras =
|
||||
pyinstaller
|
||||
all
|
||||
commands =
|
||||
pyrcc5 comictaggerlib/graphics/graphics.qrc -o comictaggerlib/graphics/resources.py
|
||||
pyinstaller -y build-tools/comictagger.spec
|
||||
|
@ -118,7 +118,7 @@ cv_volume_result: dict[str, Any] = {
|
||||
"results": {
|
||||
"aliases": None,
|
||||
"api_detail_url": "https://comicvine.gamespot.com/api/volume/4050-23437/",
|
||||
"count_of_issues": 1,
|
||||
"count_of_issues": 6,
|
||||
"date_added": "2008-10-16 05:25:47",
|
||||
"date_last_updated": "2012-01-18 17:21:57",
|
||||
"deck": None,
|
||||
|
@ -25,38 +25,6 @@ datadir = importlib.resources.files(__package__).joinpath("data")
|
||||
cbz_path = datadir.joinpath("Cory Doctorow's Futuristic Tales of the Here and Now #001 - Anda's Game (2007).cbz")
|
||||
|
||||
names: list[tuple[str, str, dict[str, str | bool], tuple[bool, bool]]] = [
|
||||
(
|
||||
"De Psy #6 Bonjour l'angoisse!.cbz",
|
||||
"'",
|
||||
{
|
||||
"issue": "6",
|
||||
"series": "De Psy",
|
||||
"title": "Bonjour l'angoisse!",
|
||||
"volume": "",
|
||||
"year": "",
|
||||
"remainder": "",
|
||||
"issue_count": "",
|
||||
"alternate": "",
|
||||
"archive": "cbz",
|
||||
},
|
||||
(False, True),
|
||||
),
|
||||
(
|
||||
"Airfiles #4 The 'Big Show'.cbz",
|
||||
"'",
|
||||
{
|
||||
"issue": "4",
|
||||
"series": "Airfiles",
|
||||
"title": "The 'Big Show'",
|
||||
"volume": "",
|
||||
"year": "",
|
||||
"remainder": "",
|
||||
"issue_count": "",
|
||||
"alternate": "",
|
||||
"archive": "cbz",
|
||||
},
|
||||
(False, True),
|
||||
),
|
||||
(
|
||||
"Conceptions #1 Conceptions I.cbz",
|
||||
"&",
|
||||
@ -1146,22 +1114,6 @@ for p in names:
|
||||
)
|
||||
|
||||
file_renames = [
|
||||
(
|
||||
"{series} #{issue} - {title} ({year}) ({price!c})", # conversion on None
|
||||
False,
|
||||
False,
|
||||
"universal",
|
||||
"Cory Doctorow's Futuristic Tales of the Here and Now #001 - Anda's Game (2007).cbz",
|
||||
does_not_raise(),
|
||||
),
|
||||
(
|
||||
"{country[0]} {price} {year}", # Indexing a None value
|
||||
False,
|
||||
False,
|
||||
"universal",
|
||||
"2007.cbz",
|
||||
does_not_raise(),
|
||||
),
|
||||
(
|
||||
"{series!c} {price} {year}", # Capitalize
|
||||
False,
|
||||
|
@ -8,7 +8,6 @@ import pytest
|
||||
from importlib_metadata import entry_points
|
||||
|
||||
import comicapi.archivers.rar
|
||||
import comicapi.archivers.zip
|
||||
import comicapi.comicarchive
|
||||
import comicapi.genericmetadata
|
||||
from testing.filenames import datadir
|
||||
@ -52,6 +51,15 @@ def test_write_cr(tmp_comic):
|
||||
md = tmp_comic.read_tags("cr")
|
||||
|
||||
|
||||
def test_write_cbi(tmp_comic):
|
||||
md = tmp_comic.read_tags("cr")
|
||||
md.apply_default_page_list(tmp_comic.get_page_name_list())
|
||||
|
||||
assert tmp_comic.write_tags(md, "cbi")
|
||||
|
||||
md = tmp_comic.read_tags("cbi")
|
||||
|
||||
|
||||
@pytest.mark.xfail(not (comicapi.archivers.rar.rar_support and shutil.which("rar")), reason="rar support")
|
||||
def test_save_cr_rar(tmp_path, md_saved):
|
||||
cbr_path = datadir / "fake_cbr.cbr"
|
||||
@ -69,6 +77,20 @@ def test_save_cr_rar(tmp_path, md_saved):
|
||||
assert md == md_saved
|
||||
|
||||
|
||||
@pytest.mark.xfail(not (comicapi.archivers.rar.rar_support and shutil.which("rar")), reason="rar support")
|
||||
def test_save_cbi_rar(tmp_path, md_saved):
|
||||
cbr_path = pathlib.Path(str(datadir)) / "fake_cbr.cbr"
|
||||
shutil.copy(cbr_path, tmp_path)
|
||||
|
||||
tmp_comic = comicapi.comicarchive.ComicArchive(tmp_path / cbr_path.name)
|
||||
assert tmp_comic.seems_to_be_a_comic_archive()
|
||||
assert tmp_comic.write_tags(comicapi.genericmetadata.md_test, "cbi")
|
||||
|
||||
md = tmp_comic.read_tags("cbi")
|
||||
supported_attributes = comicapi.comicarchive.tags["cbi"].supported_attributes
|
||||
assert md.get_clean_metadata(*supported_attributes) == md_saved.get_clean_metadata(*supported_attributes)
|
||||
|
||||
|
||||
def test_page_type_write(tmp_comic):
|
||||
md = tmp_comic.read_tags("cr")
|
||||
t = md.pages[0]
|
||||
@ -79,14 +101,12 @@ def test_page_type_write(tmp_comic):
|
||||
md = tmp_comic.read_tags("cr")
|
||||
|
||||
|
||||
def test_invalid_zip(tmp_comic: comicapi.comicarchive.ComicArchive):
|
||||
def test_invalid_zip(tmp_comic):
|
||||
with open(tmp_comic.path, mode="b+r") as f:
|
||||
# This only corrupts the first file. If it is never read then no exception will be caused
|
||||
f.write(b"PK\000\000")
|
||||
|
||||
result = tmp_comic.write_tags(comicapi.genericmetadata.md_test, "cr") # This is not the first file
|
||||
assert result
|
||||
assert not tmp_comic.seems_to_be_a_comic_archive() # Calls archiver.is_valid
|
||||
result = tmp_comic.write_tags(comicapi.genericmetadata.md_test, "cr")
|
||||
assert not result
|
||||
|
||||
|
||||
archivers = []
|
||||
|
@ -28,32 +28,10 @@ def test_search_results(comic_cache):
|
||||
@pytest.mark.parametrize("series_info", search_results)
|
||||
def test_series_info(comic_cache, series_info):
|
||||
comic_cache.add_series_info(
|
||||
series=comictalker.comiccacher.Series(id=series_info["id"], data=json.dumps(series_info).encode("utf-8")),
|
||||
series=comictalker.comiccacher.Series(id=series_info["id"], data=json.dumps(series_info)),
|
||||
source="test",
|
||||
complete=True,
|
||||
)
|
||||
vi = series_info.copy()
|
||||
cache_result = json.loads(comic_cache.get_series_info(series_id=series_info["id"], source="test")[0].data)
|
||||
assert vi == cache_result
|
||||
|
||||
|
||||
@pytest.mark.parametrize("series_info", search_results)
|
||||
def test_cache_overwrite(comic_cache, series_info):
|
||||
vi = series_info.copy()
|
||||
comic_cache.add_series_info(
|
||||
series=comictalker.comiccacher.Series(id=series_info["id"], data=json.dumps(series_info).encode("utf-8")),
|
||||
source="test",
|
||||
complete=True,
|
||||
) # Populate the cache
|
||||
|
||||
# Try to insert an incomplete series with different data
|
||||
series_info["name"] = "test 3"
|
||||
comic_cache.add_series_info(
|
||||
series=comictalker.comiccacher.Series(id=series_info["id"], data=json.dumps(series_info).encode("utf-8")),
|
||||
source="test",
|
||||
complete=False,
|
||||
)
|
||||
cache_result = json.loads(comic_cache.get_series_info(series_id=series_info["id"], source="test")[0].data)
|
||||
|
||||
# Validate that the Series marked complete is still in the cache
|
||||
assert vi == cache_result
|
||||
|
@ -46,24 +46,13 @@ def test_fetch_issue_data_by_issue_id(comicvine_api):
|
||||
assert result == testing.comicvine.cv_md
|
||||
|
||||
|
||||
def test_fetch_issues_in_series_issue_num_and_year(comicvine_api, cv_requests_get):
|
||||
def test_fetch_issues_in_series_issue_num_and_year(comicvine_api):
|
||||
results = comicvine_api.fetch_issues_by_series_issue_num_and_year([23437], "1", None)
|
||||
cv_expected = testing.comicvine.comic_issue_result.copy()
|
||||
|
||||
assert results[0].series == cv_expected.series
|
||||
assert results[0] == cv_expected
|
||||
assert cv_requests_get.call_count == 2
|
||||
|
||||
results = comicvine_api.fetch_issues_by_series_issue_num_and_year([23437], "1", None)
|
||||
|
||||
assert results[0].series == cv_expected.series
|
||||
assert results[0] == cv_expected
|
||||
assert cv_requests_get.call_count == 2 # verify caching works
|
||||
|
||||
results = comicvine_api.fetch_issues_by_series_issue_num_and_year([23437], "2", None)
|
||||
|
||||
assert not results
|
||||
assert cv_requests_get.call_count == 2 # verify negative caching works
|
||||
for r, e in zip(results, [cv_expected]):
|
||||
assert r.series == e.series
|
||||
assert r == e
|
||||
|
||||
|
||||
cv_issue = [
|
||||
|
@ -70,7 +70,7 @@ def no_requests(monkeypatch) -> None:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cv_requests_get(monkeypatch, cbz, comic_cache) -> unittest.mock.Mock:
|
||||
def comicvine_api(monkeypatch, cbz, comic_cache, mock_version, config) -> comictalker.talkers.comicvine.ComicVineTalker:
|
||||
# Any arguments may be passed and mock_get() will always return our
|
||||
# mocked object, which only has the .json() method or None for invalid urls.
|
||||
|
||||
@ -88,18 +88,16 @@ def cv_requests_get(monkeypatch, cbz, comic_cache) -> unittest.mock.Mock:
|
||||
return comicvine.MockResponse(cv_result)
|
||||
if args[0].startswith("https://comicvine.gamespot.com/api/issue/4000-140529"):
|
||||
return comicvine.MockResponse(comicvine.cv_issue_result)
|
||||
flt = kwargs.get("params", {}).get("filter", "").split(",")
|
||||
if (
|
||||
args[0].startswith("https://comicvine.gamespot.com/api/issues/")
|
||||
and "params" in kwargs
|
||||
and "filter" in kwargs["params"]
|
||||
and "volume:23437" in flt
|
||||
and "23437" in kwargs["params"]["filter"]
|
||||
):
|
||||
if "issue_number" not in kwargs["params"]["filter"] or ("issue_number:1" in flt):
|
||||
cv_list = make_list(comicvine.cv_issue_result)
|
||||
for cv in cv_list["results"]:
|
||||
comicvine.filter_field_list(cv, kwargs)
|
||||
return comicvine.MockResponse(cv_list)
|
||||
cv_list = make_list(comicvine.cv_issue_result)
|
||||
for cv in cv_list["results"]:
|
||||
comicvine.filter_field_list(cv, kwargs)
|
||||
return comicvine.MockResponse(cv_list)
|
||||
if (
|
||||
args[0].startswith("https://comicvine.gamespot.com/api/search")
|
||||
and "params" in kwargs
|
||||
@ -128,11 +126,6 @@ def cv_requests_get(monkeypatch, cbz, comic_cache) -> unittest.mock.Mock:
|
||||
|
||||
# apply the monkeypatch for requests.get to mock_get
|
||||
monkeypatch.setattr(requests, "get", m_get)
|
||||
return m_get
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def comicvine_api(monkeypatch, cv_requests_get, mock_version, config) -> comictalker.talkers.comicvine.ComicVineTalker:
|
||||
monkeypatch.setattr(comictalker.talkers.comicvine, "custom_limiter", Limiter(RequestRate(100, 1)))
|
||||
monkeypatch.setattr(comictalker.talkers.comicvine, "default_limiter", Limiter(RequestRate(100, 1)))
|
||||
|
||||
@ -204,7 +197,7 @@ def config(tmp_path):
|
||||
from comictaggerlib.main import App
|
||||
|
||||
app = App()
|
||||
app.register_settings(False)
|
||||
app.register_settings()
|
||||
|
||||
defaults = app.parse_settings(comictaggerlib.ctsettings.ComicTaggerPaths(tmp_path / "config"), "")
|
||||
defaults[0].Runtime_Options__config.user_config_dir.mkdir(parents=True, exist_ok=True)
|
||||
@ -219,15 +212,15 @@ def plugin_config(tmp_path):
|
||||
from comictaggerlib.main import App
|
||||
|
||||
ns = Namespace(config=comictaggerlib.ctsettings.ComicTaggerPaths(tmp_path / "config"))
|
||||
ns.config.user_config_dir.mkdir(parents=True, exist_ok=True)
|
||||
ns.config.user_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
ns.config.user_log_dir.mkdir(parents=True, exist_ok=True)
|
||||
ns.config.user_plugin_dir.mkdir(parents=True, exist_ok=True)
|
||||
app = App()
|
||||
app.load_plugins(ns)
|
||||
app.register_settings(False)
|
||||
app.register_settings()
|
||||
|
||||
defaults = app.parse_settings(ns.config, "")
|
||||
defaults[0].Runtime_Options__config.user_config_dir.mkdir(parents=True, exist_ok=True)
|
||||
defaults[0].Runtime_Options__config.user_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
defaults[0].Runtime_Options__config.user_log_dir.mkdir(parents=True, exist_ok=True)
|
||||
defaults[0].Runtime_Options__config.user_plugin_dir.mkdir(parents=True, exist_ok=True)
|
||||
yield (defaults, app.talkers)
|
||||
|
||||
|
||||
|
@ -52,8 +52,6 @@ def test_save(
|
||||
|
||||
# This is inserted here because otherwise several other tests
|
||||
# unrelated to comicvine need to be re-worked
|
||||
# the comicvine response is mocked to 1 for caching tests and adding the remaining 5 issues is more work
|
||||
md_saved.issue_count = 1
|
||||
md_saved.credits.insert(
|
||||
1,
|
||||
comicapi.genericmetadata.Credit(
|
||||
|
@ -9,15 +9,12 @@ from comictaggerlib.md import prepare_metadata
|
||||
|
||||
tags = []
|
||||
|
||||
for x in entry_points(group="comicapi.tags"):
|
||||
for x in entry_points(group="comicapi.tag"):
|
||||
tag = x.load()
|
||||
supported = tag.enabled
|
||||
exe_found = True
|
||||
tags.append(pytest.param(tag, marks=pytest.mark.xfail(not supported, reason="tags not enabled")))
|
||||
|
||||
if not tags:
|
||||
raise Exception("No tags found")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tag_type", tags)
|
||||
def test_metadata(mock_version, tmp_comic, md_saved, tag_type):
|
||||
@ -25,24 +22,20 @@ def test_metadata(mock_version, tmp_comic, md_saved, tag_type):
|
||||
supported_attributes = tag.supported_attributes
|
||||
tag.write_tags(comicapi.genericmetadata.md_test, tmp_comic.archiver)
|
||||
written_metadata = tag.read_tags(tmp_comic.archiver)
|
||||
md = md_saved._get_clean_metadata(*supported_attributes)
|
||||
md = md_saved.get_clean_metadata(*supported_attributes)
|
||||
|
||||
# Hack back in the pages variable because CoMet supports identifying the cover by the filename
|
||||
if tag.id == "comet":
|
||||
md.pages = [
|
||||
comicapi.genericmetadata.PageMetadata(
|
||||
archive_index=0,
|
||||
bookmark="",
|
||||
display_index=0,
|
||||
filename="!cover.jpg",
|
||||
type=comicapi.genericmetadata.PageType.FrontCover,
|
||||
comicapi.genericmetadata.ImageMetadata(
|
||||
image_index=0, filename="!cover.jpg", type=comicapi.genericmetadata.PageType.FrontCover
|
||||
)
|
||||
]
|
||||
written_metadata = written_metadata._get_clean_metadata(*supported_attributes).replace(
|
||||
written_metadata = written_metadata.get_clean_metadata(*supported_attributes).replace(
|
||||
pages=written_metadata.pages
|
||||
)
|
||||
else:
|
||||
written_metadata = written_metadata._get_clean_metadata(*supported_attributes)
|
||||
written_metadata = written_metadata.get_clean_metadata(*supported_attributes)
|
||||
|
||||
assert written_metadata == md
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user