Compare commits

...

9 Commits

Author SHA1 Message Date
ca8f36d105 Fix toasts and modal dialogs
Toasts calculated the duration bar in python this is now a QPropertyAnimation

Series/Issue Selection windows now use signals/slots to communicate
2025-01-14 21:23:17 -08:00
13ee9e9ad8 Use signals a little better and avoid QDialog.exec 2025-01-14 17:41:00 -08:00
a9da87bff3 Improve canceling during a ratelimit 2025-01-04 15:14:32 -08:00
d011975fd0 Fix typo 2024-12-24 21:17:59 -08:00
4d767f026a Fix dark mode 2024-12-23 20:10:18 -08:00
b1c164add0 Skip GUI tests on Windows and Linux 2024-12-22 20:27:23 -08:00
e184353493 Display toast notification longer 2024-12-18 22:34:03 -08:00
94ca1fd58b Add tests 2024-12-18 21:15:51 -08:00
63c836a327 Display message when a ratelimit is hit 2024-12-16 01:03:31 -08:00
55 changed files with 6390 additions and 901 deletions

View File

@ -18,12 +18,10 @@ from __future__ import annotations
import logging
import os
from typing import Callable
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from comicapi.comicarchive import ComicArchive, tags
from comicapi.genericmetadata import GenericMetadata
from comictaggerlib.coverimagewidget import CoverImageWidget
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.md import prepare_metadata
@ -40,7 +38,6 @@ class AutoTagMatchWindow(QtWidgets.QDialog):
parent: QtWidgets.QWidget,
match_set_list: list[Result],
read_tags: list[str],
fetch_func: Callable[[IssueResult], GenericMetadata],
config: ct_ns,
talker: ComicTalker,
) -> None:
@ -79,7 +76,7 @@ class AutoTagMatchWindow(QtWidgets.QDialog):
self.match_set_list = match_set_list
self._tags = read_tags
self.fetch_func = fetch_func
self.talker = talker
self.current_match_set_idx = 0
@ -226,7 +223,7 @@ class AutoTagMatchWindow(QtWidgets.QDialog):
def save_match(self) -> None:
match = self.current_match()
ca = ComicArchive(self.current_match_set.original_path)
md, error = self.parent().read_selected_tags(self._tags, ca)
md, _, error = self.parent().read_selected_tags(self._tags, ca)
if error is not None:
logger.error("Failed to load tags for %s: %s", ca.path, error)
QtWidgets.QApplication.restoreOverrideCursor()
@ -248,7 +245,7 @@ class AutoTagMatchWindow(QtWidgets.QDialog):
# now get the particular issue data
try:
self.current_match_set.md = ct_md = self.fetch_func(match)
self.current_match_set.md = ct_md = self.talker.fetch_comic_data(issue_id=match.issue_id)
except TalkerError as e:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.critical(self, f"{e.source} {e.code_name} Error", f"{e}")

View File

@ -17,16 +17,206 @@
from __future__ import annotations
import logging
import pathlib
import re
from PyQt5 import QtCore, QtWidgets, uic
from comicapi import utils
from comicapi.comicarchive import ComicArchive, tags
from comicapi.genericmetadata import GenericMetadata
from comictaggerlib.coverimagewidget import CoverImageWidget
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS
from comictaggerlib.issueidentifier import IssueIdentifierCancelled
from comictaggerlib.md import read_selected_tags
from comictaggerlib.resulttypes import Action, OnlineMatchResults, Result, Status
from comictaggerlib.tag import identify_comic
from comictaggerlib.ui import ui_path
from comictalker.comictalker import ComicTalker
from comictalker.comictalker import ComicTalker, RLCallBack
logger = logging.getLogger(__name__)
class AutoTagThread(QtCore.QThread):
autoTagComplete = QtCore.pyqtSignal(OnlineMatchResults, list)
autoTagLogMsg = QtCore.pyqtSignal(str)
autoTagProgress = QtCore.pyqtSignal(object, object, object, bytes, bytes) # see progress_callback
ratelimit = QtCore.pyqtSignal(float, float)
def __init__(
self, series_override: str, ca_list: list[ComicArchive], config: SettngsNS, talker: ComicTalker
) -> None:
QtCore.QThread.__init__(self)
self.series_override = series_override
self.ca_list = ca_list
self.config = config
self.talker = talker
self.canceled = False
def log_output(self, text: str) -> None:
self.autoTagLogMsg.emit(str(text))
def progress_callback(
self, cur: int | None, total: int | None, path: pathlib.Path | None, archive_image: bytes, remote_image: bytes
) -> None:
self.autoTagProgress.emit(cur, total, path, archive_image, remote_image)
def run(self) -> None:
match_results = OnlineMatchResults()
archives_to_remove = []
for prog_idx, ca in enumerate(self.ca_list):
self.log_output("==========================================================================\n")
self.log_output(f"Auto-Tagging {prog_idx} of {len(self.ca_list)}\n")
self.log_output(f"{ca.path}\n")
try:
cover_idx = ca.read_tags(self.config.internal__read_tags[0]).get_cover_page_index_list()[0]
except Exception as e:
cover_idx = 0
logger.error("Failed to load metadata for %s: %s", ca.path, e)
image_data = ca.get_page(cover_idx)
self.progress_callback(prog_idx, len(self.ca_list), ca.path, image_data, b"")
if self.canceled:
break
if ca.is_writable():
success, match_results = self.identify_and_tag_single_archive(ca, match_results)
if self.canceled:
break
if success and self.config.internal__remove_archive_after_successful_match:
archives_to_remove.append(ca)
self.autoTagComplete.emit(match_results, archives_to_remove)
def on_rate_limit(self, full_time: float, sleep_time: float) -> None:
if self.canceled:
raise IssueIdentifierCancelled
self.log_output(
f"Rate limit reached: {full_time:.0f}s until next request. Waiting {sleep_time:.0f}s for ratelimit"
)
self.ratelimit.emit(full_time, sleep_time)
def identify_and_tag_single_archive(
self, ca: ComicArchive, match_results: OnlineMatchResults
) -> tuple[Result, OnlineMatchResults]:
ratelimit_callback = RLCallBack(
self.on_rate_limit,
60,
)
# read in tags, and parse file name if not there
md, tags_used, error = read_selected_tags(self.config.internal__read_tags, ca)
if error is not None:
QtWidgets.QMessageBox.warning(
None,
"Aborting...",
f"One or more of the read tags failed to load for {ca.path}. Aborting to prevent any possible further damage. Check log for details.",
)
logger.error("Failed to load tags from %s: %s", ca.path, error)
return (
Result(
Action.save,
original_path=ca.path,
status=Status.read_failure,
),
match_results,
)
if md.is_empty:
md = ca.metadata_from_filename(
self.config.Filename_Parsing__filename_parser,
self.config.Filename_Parsing__remove_c2c,
self.config.Filename_Parsing__remove_fcbd,
self.config.Filename_Parsing__remove_publisher,
self.config.Filename_Parsing__split_words,
self.config.Filename_Parsing__allow_issue_start_with_letter,
self.config.Filename_Parsing__protofolius_issue_number_scheme,
)
if self.config.Auto_Tag__ignore_leading_numbers_in_filename and md.series is not None:
# remove all leading numbers
md.series = re.sub(r"(^[\d.]*)(.*)", r"\2", md.series)
# use the dialog specified search string
if self.series_override:
md.series = self.series_override
if not self.config.Auto_Tag__use_year_when_identifying:
md.year = None
# If it's empty we need it to stay empty for identify_comic to report the correct error
if (md.issue is None or md.issue == "") and not md.is_empty:
if self.config.Auto_Tag__assume_issue_one:
md.issue = "1"
else:
md.issue = utils.xlate(md.volume)
def on_progress(x: int, y: int, image: bytes) -> None:
# We don't (currently) care about the progress of an individual comic here we just want the cover for the autotagprogresswindow
self.progress_callback(None, None, None, b"", image)
if self.canceled:
return (
Result(
Action.save,
original_path=ca.path,
status=Status.read_failure,
),
match_results,
)
try:
res, match_results = identify_comic(
ca,
md,
tags_used,
match_results,
self.config,
self.talker,
self.log_output,
on_rate_limit=ratelimit_callback,
on_progress=on_progress,
)
except IssueIdentifierCancelled:
return (
Result(
Action.save,
original_path=ca.path,
status=Status.fetch_data_failure,
),
match_results,
)
if self.canceled:
return res, match_results
if res.status == Status.success:
assert res.md
def write_Tags(ca: ComicArchive, md: GenericMetadata) -> bool:
for tag_id in self.config.Runtime_Options__tags_write:
# write out the new data
if not ca.write_tags(md, tag_id):
self.log_output(f"{tags[tag_id].name()} save failed! Aborting any additional tag saves.\n")
return False
return True
# Save tags
if write_Tags(ca, res.md):
match_results.good_matches.append(res)
res.tags_written = self.config.Runtime_Options__tags_write
self.log_output("Save complete!\n")
else:
res.status = Status.write_failure
match_results.write_failures.append(res)
ca.reset_cache()
ca.load_cache({*self.config.Runtime_Options__tags_read})
return res, match_results
def cancel(self) -> None:
self.canceled = True
class AutoTagProgressWindow(QtWidgets.QDialog):
def __init__(self, parent: QtWidgets.QWidget, talker: ComicTalker) -> None:
super().__init__(parent)
@ -46,8 +236,6 @@ class AutoTagProgressWindow(QtWidgets.QDialog):
gridlayout.addWidget(self.testCoverWidget)
gridlayout.setContentsMargins(0, 0, 0, 0)
self.isdone = False
self.setWindowFlags(
QtCore.Qt.WindowType(
self.windowFlags()
@ -67,6 +255,20 @@ class AutoTagProgressWindow(QtWidgets.QDialog):
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
# @QtCore.pyqtSlot(int, int, 'Optional[pathlib.Path]', bytes, bytes)
def on_progress(
self, x: int | None, y: int | None, title: pathlib.Path | None, archive_image: bytes, remote_image: bytes
) -> None:
if x is not None and y is not None:
self.progressBar: QtWidgets.QProgressBar
self.progressBar.setValue(x)
self.progressBar.setMaximum(y)
if title:
self.setWindowTitle(str(title))
if archive_image:
self.set_archive_image(archive_image)
if remote_image:
self.set_test_image(remote_image)
def reject(self) -> None:
QtWidgets.QDialog.reject(self)
self.isdone = True

View File

@ -17,14 +17,13 @@
from __future__ import annotations
import dataclasses
import functools
import json
import logging
import os
import pathlib
import re
import sys
from collections.abc import Collection
from functools import partial
from typing import Any, TextIO
from comicapi import merge, utils
@ -34,10 +33,10 @@ from comictaggerlib.cbltransformer import CBLTransformer
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.filerenamer import FileRenamer, get_rename_dir
from comictaggerlib.graphics import graphics_path
from comictaggerlib.issueidentifier import IssueIdentifier
from comictaggerlib.md import prepare_metadata
from comictaggerlib.quick_tag import QuickTag
from comictaggerlib.resulttypes import Action, IssueResult, MatchStatus, OnlineMatchResults, Result, Status
from comictaggerlib.tag import identify_comic
from comictalker.comictalker import ComicTalker, TalkerError
logger = logging.getLogger(__name__)
@ -130,7 +129,7 @@ class CLI:
def fetch_metadata(self, issue_id: str) -> GenericMetadata:
# now get the particular issue data
try:
ct_md = self.current_talker().fetch_comic_data(issue_id)
ct_md = self.current_talker().fetch_comic_data(issue_id=issue_id, on_rate_limit=None)
except TalkerError as e:
logger.exception(f"Error retrieving issue details. Save aborted.\n{e}")
return GenericMetadata()
@ -441,123 +440,6 @@ class CLI:
logger.exception("Quick Tagging failed")
return None
def normal_tag(
self, ca: ComicArchive, tags_read: list[str], md: GenericMetadata, match_results: OnlineMatchResults
) -> tuple[GenericMetadata, list[IssueResult], Result | None, OnlineMatchResults]:
# ct_md, results, matches, match_results
if md is None or md.is_empty:
logger.error("No metadata given to search online with!")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
match_status=MatchStatus.no_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.no_matches.append(res)
return GenericMetadata(), [], res, match_results
ii = IssueIdentifier(ca, self.config, self.current_talker())
ii.set_output_function(functools.partial(self.output, already_logged=True))
if not self.config.Auto_Tag__use_year_when_identifying:
md.year = None
if self.config.Auto_Tag__ignore_leading_numbers_in_filename and md.series is not None:
md.series = re.sub(r"^([\d.]+)(.*)", r"\2", md.series)
result, matches = ii.identify(ca, md)
found_match = False
choices = False
low_confidence = False
if result == IssueIdentifier.result_no_matches:
pass
elif result == IssueIdentifier.result_found_match_but_bad_cover_score:
low_confidence = True
found_match = True
elif result == IssueIdentifier.result_found_match_but_not_first_page:
found_match = True
elif result == IssueIdentifier.result_multiple_matches_with_bad_image_scores:
low_confidence = True
choices = True
elif result == IssueIdentifier.result_one_good_match:
found_match = True
elif result == IssueIdentifier.result_multiple_good_matches:
choices = True
if choices:
if low_confidence:
logger.error("Online search: Multiple low confidence matches. Save aborted")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.low_confidence_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.low_confidence_matches.append(res)
return GenericMetadata(), matches, res, match_results
logger.error("Online search: Multiple good matches. Save aborted")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.multiple_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.multiple_matches.append(res)
return GenericMetadata(), matches, res, match_results
if low_confidence and self.config.Runtime_Options__abort_on_low_confidence:
logger.error("Online search: Low confidence match. Save aborted")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.low_confidence_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.low_confidence_matches.append(res)
return GenericMetadata(), matches, res, match_results
if not found_match:
logger.error("Online search: No match found. Save aborted")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.no_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.no_matches.append(res)
return GenericMetadata(), matches, res, match_results
# we got here, so we have a single match
# now get the particular issue data
ct_md = self.fetch_metadata(matches[0].issue_id)
if ct_md.is_empty:
res = Result(
Action.save,
status=Status.fetch_data_failure,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.good_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.fetch_data_failures.append(res)
return GenericMetadata(), matches, res, match_results
return ct_md, matches, None, match_results
def save(self, ca: ComicArchive, match_results: OnlineMatchResults) -> tuple[Result, OnlineMatchResults]:
if self.config.Runtime_Options__skip_existing_tags:
for tag_id in self.config.Runtime_Options__tags_write:
@ -568,7 +450,6 @@ class CLI:
Action.save,
original_path=ca.path,
status=Status.existing_tags,
tags_written=self.config.Runtime_Options__tags_write,
),
match_results,
)
@ -581,22 +462,30 @@ class CLI:
if self.config.Auto_Tag__assume_issue_one:
md.issue = "1"
matches: list[IssueResult] = []
# matches: list[IssueResult] = []
# now, search online
ct_md = GenericMetadata()
res = Result(
Action.save,
status=Status.success,
original_path=ca.path,
md=prepare_metadata(md, ct_md, self.config),
tags_read=tags_read,
)
if self.config.Auto_Tag__online:
if self.config.Auto_Tag__issue_id is not None:
# we were given the actual issue ID to search with
try:
ct_md = self.current_talker().fetch_comic_data(self.config.Auto_Tag__issue_id)
ct_md = self.current_talker().fetch_comic_data(
issue_id=self.config.Auto_Tag__issue_id, on_rate_limit=None
)
except TalkerError as e:
logger.exception(f"Error retrieving issue details. Save aborted.\n{e}")
res = Result(
Action.save,
original_path=ca.path,
status=Status.fetch_data_failure,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.fetch_data_failures.append(res)
@ -609,53 +498,69 @@ class CLI:
status=Status.match_failure,
original_path=ca.path,
match_status=MatchStatus.no_match,
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
match_results.no_matches.append(res)
return res, match_results
res = Result(
Action.save,
status=Status.success,
original_path=ca.path,
match_status=MatchStatus.good_match,
md=prepare_metadata(md, ct_md, self.config),
tags_read=tags_read,
)
else:
qt_md = self.try_quick_tag(ca, md)
if qt_md is None or qt_md.is_empty:
if qt_md is not None and not qt_md.is_empty:
ct_md = qt_md
res = Result(
Action.save,
status=Status.success,
original_path=ca.path,
online_results=[
IssueResult(
series=ct_md.series or "",
distance=-1,
issue_number=ct_md.issue or "",
issue_count=ct_md.issue_count,
url_image_hash=-1,
issue_title=ct_md.title or "",
issue_id=ct_md.issue_id or "",
series_id=ct_md.issue_id or "",
month=ct_md.month,
year=ct_md.year,
publisher=ct_md.publisher,
image_url=ct_md._cover_image or "",
alt_image_urls=[],
description=ct_md.description or "",
)
],
match_status=MatchStatus.good_match,
md=prepare_metadata(md, ct_md, self.config),
tags_read=tags_read,
)
else:
if qt_md is not None:
self.output("Failed to find match via quick tag")
ct_md, matches, res, match_results = self.normal_tag(ca, tags_read, md, match_results) # type: ignore[assignment]
if res is not None:
return res, match_results
else:
self.output("Successfully matched via quick tag")
ct_md = qt_md
matches = [
IssueResult(
series=ct_md.series or "",
distance=-1,
issue_number=ct_md.issue or "",
issue_count=ct_md.issue_count,
url_image_hash=-1,
issue_title=ct_md.title or "",
issue_id=ct_md.issue_id or "",
series_id=ct_md.issue_id or "",
month=ct_md.month,
year=ct_md.year,
publisher=None,
image_url=ct_md._cover_image or "",
alt_image_urls=[],
description=ct_md.description or "",
)
]
res = Result(
Action.save,
status=Status.success,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.good_match,
md=prepare_metadata(md, ct_md, self.config),
tags_written=self.config.Runtime_Options__tags_write,
tags_read=tags_read,
)
res, match_results = identify_comic(
ca,
md,
tags_read,
match_results,
self.config,
self.current_talker(),
partial(self.output, already_logged=True),
on_rate_limit=None,
)
if res.status != Status.success:
return res, match_results
self.output("Successfully matched via quick tag")
assert res.md
res.tags_written = self.config.Runtime_Options__tags_write
# ok, done building our metadata. time to save
if self.write_tags(ca, res.md):
match_results.good_matches.append(res)

View File

@ -128,7 +128,8 @@ def open_tagger_window(
ctypes.windll.user32.SetWindowPos(console_wnd, None, 0, 0, 0, 0, swp_hidewindow) # type: ignore[attr-defined]
if platform.system() != "Linux":
img = QtGui.QPixmap(str(graphics_path / "tags.png"))
img = QtGui.QPixmap()
img.loadFromData((graphics_path / "tags.png").read_bytes())
splash = QtWidgets.QSplashScreen(img)
splash.show()

View File

@ -16,8 +16,12 @@
# limitations under the License.
from __future__ import annotations
import dataclasses
import functools
import io
import logging
import pathlib
from enum import Enum, auto
from operator import attrgetter
from typing import Any, Callable
@ -27,11 +31,10 @@ from comicapi import utils
from comicapi.comicarchive import ComicArchive
from comicapi.genericmetadata import ComicSeries, GenericMetadata
from comicapi.issuestring import IssueString
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.imagefetcher import ImageFetcher, ImageFetcherException
from comictaggerlib.imagehasher import ImageHasher
from comictaggerlib.resulttypes import IssueResult
from comictalker.comictalker import ComicTalker, TalkerError
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerError
logger = logging.getLogger(__name__)
@ -69,25 +72,36 @@ class IssueIdentifierNetworkError(Exception): ...
class IssueIdentifierCancelled(Exception): ...
class IssueIdentifier:
result_no_matches = 0
result_found_match_but_bad_cover_score = 1
result_found_match_but_not_first_page = 2
result_multiple_matches_with_bad_image_scores = 3
result_one_good_match = 4
result_multiple_good_matches = 5
class Result(Enum):
single_good_match = auto()
no_matches = auto()
single_bad_cover_score = auto()
multiple_bad_cover_scores = auto()
multiple_good_matches = auto()
@dataclasses.dataclass
class IssueIdentifierOptions:
series_match_search_thresh: int
series_match_identify_thresh: int
use_publisher_filter: bool
publisher_filter: list[str]
quiet: bool
cache_dir: pathlib.Path
border_crop_percent: int
talker: ComicTalker
class IssueIdentifier:
def __init__(
self,
comic_archive: ComicArchive,
config: ct_ns,
talker: ComicTalker,
metadata: GenericMetadata = GenericMetadata(),
config: IssueIdentifierOptions,
on_rate_limit: RLCallBack | None,
output: Callable[[str], Any] = print,
on_progress: Callable[[int, int, bytes], Any] | None = None,
) -> None:
self.config = config
self.talker = talker
self.comic_archive: ComicArchive = comic_archive
self.md = metadata
self.talker = config.talker
self.image_hasher = 1
self.only_use_additional_meta_data = False
@ -108,30 +122,24 @@ class IssueIdentifier:
# used to eliminate series names that are too long based on our search
# string
self.series_match_thresh = config.Issue_Identifier__series_match_identify_thresh
self.series_match_thresh = config.series_match_identify_thresh
# used to eliminate unlikely publishers
self.use_publisher_filter = config.Auto_Tag__use_publisher_filter
self.publisher_filter = [s.strip().casefold() for s in config.Auto_Tag__publisher_filter]
self.use_publisher_filter = config.use_publisher_filter
self.publisher_filter = [s.strip().casefold() for s in config.publisher_filter]
self.additional_metadata = GenericMetadata()
self.output_function: Callable[[str], None] = print
self.progress_callback: Callable[[int, int], None] | None = None
self.cover_url_callback: Callable[[bytes], None] | None = None
self.search_result = self.result_no_matches
self.output_function = output
self.progress_callback: Callable[[int, int, bytes], Any] = lambda *x: ...
if on_progress:
self.progress_callback = on_progress
self.on_rate_limit = on_rate_limit
self.search_result = Result.no_matches
self.cancel = False
self.current_progress = (0, 0)
self.match_list: list[IssueResult] = []
def set_output_function(self, func: Callable[[str], None]) -> None:
self.output_function = func
def set_progress_callback(self, cb_func: Callable[[int, int], None]) -> None:
self.progress_callback = cb_func
def set_cover_url_callback(self, cb_func: Callable[[bytes], None]) -> None:
self.cover_url_callback = cb_func
def calculate_hash(self, image_data: bytes) -> int:
if self.image_hasher == 3:
return ImageHasher(data=image_data).p_hash()
@ -161,23 +169,23 @@ class IssueIdentifier:
# Always send to logger so that we have a record for troubleshooting
logger.info(log_msg, **kwargs)
# If we are verbose or quiet we don't need to call the output function
if self.config.Runtime_Options__verbose > 0 or self.config.Runtime_Options__quiet:
# If we are quiet we don't need to call the output function
if self.config.quiet:
return
# default output is stdout
self.output_function(*args, **kwargs)
def identify(self, ca: ComicArchive, md: GenericMetadata) -> tuple[int, list[IssueResult]]:
def identify(self, ca: ComicArchive, md: GenericMetadata) -> tuple[Result, list[IssueResult]]:
if not self._check_requirements(ca):
return self.result_no_matches, []
return Result.no_matches, []
terms, images, extra_images = self._get_search_terms(ca, md)
# we need, at minimum, a series and issue number
if not (terms["series"] and terms["issue_number"]):
self.log_msg("Not enough info for a search!")
return self.result_no_matches, []
return Result.no_matches, []
self._print_terms(terms, images)
@ -207,28 +215,28 @@ class IssueIdentifier:
self.log_msg("--------------------------------------------------------------------------")
self._print_match(final_cover_matching[0])
self.log_msg("--------------------------------------------------------------------------")
search_result = self.result_found_match_but_bad_cover_score
search_result = Result.single_bad_cover_score
else:
self.log_msg("--------------------------------------------------------------------------")
self.log_msg("Multiple bad cover matches! Need to use other info...")
self.log_msg("--------------------------------------------------------------------------")
search_result = self.result_multiple_matches_with_bad_image_scores
search_result = Result.multiple_bad_cover_scores
else:
if len(final_cover_matching) == 1:
self.log_msg("--------------------------------------------------------------------------")
self._print_match(final_cover_matching[0])
self.log_msg("--------------------------------------------------------------------------")
search_result = self.result_one_good_match
search_result = Result.single_good_match
elif len(self.match_list) == 0:
self.log_msg("--------------------------------------------------------------------------")
self.log_msg("No matches found :(")
self.log_msg("--------------------------------------------------------------------------")
search_result = self.result_no_matches
search_result = Result.no_matches
else:
# we've got multiple good matches:
self.log_msg("More than one likely candidate.")
search_result = self.result_multiple_good_matches
search_result = Result.multiple_good_matches
self.log_msg("--------------------------------------------------------------------------")
for match_item in final_cover_matching:
self._print_match(match_item)
@ -289,14 +297,16 @@ class IssueIdentifier:
remote_hashes: list[tuple[str, int]] = []
for url in urls:
try:
alt_url_image_data = ImageFetcher(self.config.Runtime_Options__config.user_cache_dir).fetch(
url, blocking=True
)
alt_url_image_data = ImageFetcher(self.config.cache_dir).fetch(url, blocking=True)
except ImageFetcherException as e:
self.log_msg(f"Network issue while fetching alt. cover image from {self.talker.name}. Aborting...")
raise IssueIdentifierNetworkError from e
self._user_canceled(self.cover_url_callback, alt_url_image_data)
self._user_canceled(
functools.partial(
self.progress_callback, self.current_progress[0], self.current_progress[1], alt_url_image_data
)
)
remote_hashes.append((url, self.calculate_hash(alt_url_image_data)))
@ -318,7 +328,7 @@ class IssueIdentifier:
if not primary_img_url:
return Score(score=100, url="", remote_hash=0)
self._user_canceled()
# self._user_canceled()
urls = [primary_img_url]
if use_alt_urls:
@ -381,7 +391,7 @@ class IssueIdentifier:
images.append(("double page", im))
# Check and remove black borders. Helps in identifying comics with an excessive black border like https://comicvine.gamespot.com/marvel-graphic-novel-1-the-death-of-captain-marvel/4000-21782/
cropped = self._crop_border(cover_image, self.config.Issue_Identifier__border_crop_percent)
cropped = self._crop_border(cover_image, self.config.border_crop_percent)
if cropped is not None:
images.append(("black border cropped", cropped))
@ -421,11 +431,11 @@ class IssueIdentifier:
) -> tuple[SearchKeys, list[tuple[str, Image.Image]], list[tuple[str, Image.Image]]]:
return self._get_search_keys(md), self._get_images(ca, md), self._get_extra_images(ca, md)
def _user_canceled(self, callback: Callable[..., Any] | None = None, *args: Any) -> Any:
def _user_canceled(self, callback: Callable[[], Any] | None = None) -> Any:
if self.cancel:
raise IssueIdentifierCancelled
if callback is not None:
return callback(*args)
return callback()
def _print_terms(self, keys: SearchKeys, images: list[tuple[str, Image.Image]]) -> None:
assert keys["series"]
@ -508,7 +518,8 @@ class IssueIdentifier:
if use_alternates:
alternate = " Alternate"
for series, issue in issues:
self._user_canceled(self.progress_callback, counter, len(issues))
self.current_progress = counter, len(issues)
self._user_canceled(functools.partial(self.progress_callback, counter, len(issues), b""))
counter += 1
self.log_msg(
@ -566,8 +577,9 @@ class IssueIdentifier:
try:
search_results = self.talker.search_for_series(
terms["series"],
callback=lambda x, y: self._user_canceled(self.progress_callback, x, y),
series_match_thresh=self.config.Issue_Identifier__series_match_search_thresh,
callback=lambda x, y: self._user_canceled(functools.partial(self.progress_callback, x, y, b"")),
series_match_thresh=self.config.series_match_search_thresh,
on_rate_limit=self.on_rate_limit,
)
except TalkerError as e:
self.log_msg(f"Error searching for series.\n{e}")
@ -584,13 +596,16 @@ class IssueIdentifier:
self.log_msg(f"Searching in {len(filtered_series)} series")
self._user_canceled(self.progress_callback, 0, len(filtered_series))
self._user_canceled(functools.partial(self.progress_callback, 0, len(filtered_series), b""))
series_by_id = {series.id: series for series in filtered_series}
try:
talker_result = self.talker.fetch_issues_by_series_issue_num_and_year(
list(series_by_id.keys()), terms["issue_number"], terms["year"]
list(series_by_id.keys()),
terms["issue_number"],
terms["year"],
on_rate_limit=self.on_rate_limit,
)
except TalkerError as e:
self.log_msg(f"Issue with while searching for series details. Aborting...\n{e}")
@ -601,7 +616,7 @@ class IssueIdentifier:
if not talker_result:
return []
self._user_canceled(self.progress_callback, 0, 0)
self._user_canceled(functools.partial(self.progress_callback, 0, 0, b""))
issues: list[tuple[ComicSeries, GenericMetadata]] = []

View File

@ -18,15 +18,15 @@ from __future__ import annotations
import logging
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5 import QtCore, QtGui, QtWidgets
from comicapi.genericmetadata import GenericMetadata
from comicapi.issuestring import IssueString
from comictaggerlib.coverimagewidget import CoverImageWidget
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.ui import qtutils, ui_path
from comictaggerlib.ui.qtutils import new_web_view
from comictalker.comictalker import ComicTalker, TalkerError
from comictaggerlib.seriesselectionwindow import SelectionWindow
from comictaggerlib.ui import ui_path
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerError
logger = logging.getLogger(__name__)
@ -39,117 +39,84 @@ class IssueNumberTableWidgetItem(QtWidgets.QTableWidgetItem):
return (IssueString(self_str).as_float() or 0) < (IssueString(other_str).as_float() or 0)
class IssueSelectionWindow(QtWidgets.QDialog):
class QueryThread(QtCore.QThread):
def __init__(
self,
talker: ComicTalker,
series_id: str,
finish: QtCore.pyqtSignal,
on_ratelimit: QtCore.pyqtSignal,
) -> None:
super().__init__()
self.series_id = series_id
self.talker = talker
self.finish = finish
self.on_ratelimit = on_ratelimit
def run(self) -> None:
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
try:
issue_list = [
x
for x in self.talker.fetch_issues_in_series(
self.series_id, on_rate_limit=RLCallBack(lambda x, y: self.on_ratelimit.emit(x, y), 10)
)
if x.issue_id is not None
]
except TalkerError as e:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.critical(None, f"{e.source} {e.code_name} Error", f"{e}")
return
QtWidgets.QApplication.restoreOverrideCursor()
self.finish.emit(issue_list)
class IssueSelectionWindow(SelectionWindow):
ui_file = ui_path / "issueselectionwindow.ui"
CoverImageMode = CoverImageWidget.AltCoverMode
finish = QtCore.pyqtSignal(list)
def __init__(
self,
parent: QtWidgets.QWidget,
config: ct_ns,
talker: ComicTalker,
series_id: str,
issue_number: str,
series_id: str = "",
issue_number: str = "",
) -> None:
super().__init__(parent)
with (ui_path / "issueselectionwindow.ui").open(encoding="utf-8") as uifile:
uic.loadUi(uifile, self)
self.coverWidget = CoverImageWidget(
self.coverImageContainer,
CoverImageWidget.AltCoverMode,
config.Runtime_Options__config.user_cache_dir,
)
gridlayout = QtWidgets.QGridLayout(self.coverImageContainer)
gridlayout.addWidget(self.coverWidget)
gridlayout.setContentsMargins(0, 0, 0, 0)
self.teDescription: QtWidgets.QWidget
webengine = new_web_view(self)
if webengine:
self.teDescription = qtutils.replaceWidget(self.splitter, self.teDescription, webengine)
logger.info("successfully loaded QWebEngineView")
else:
logger.info("failed to open QWebEngineView")
self.setWindowFlags(
QtCore.Qt.WindowType(
self.windowFlags()
| QtCore.Qt.WindowType.WindowSystemMenuHint
| QtCore.Qt.WindowType.WindowMaximizeButtonHint
)
)
super().__init__(parent, config, talker)
self.series_id = series_id
self.issue_id: str = ""
self.config = config
self.talker = talker
self.issue_list: dict[str, GenericMetadata] = {}
# Display talker logo and set url
self.lblIssuesSourceName.setText(talker.attribution)
self.imageIssuesSourceWidget = CoverImageWidget(
self.imageIssuesSourceLogo,
CoverImageWidget.URLMode,
config.Runtime_Options__config.user_cache_dir,
False,
)
self.imageIssuesSourceWidget.showControls = False
gridlayoutIssuesSourceLogo = QtWidgets.QGridLayout(self.imageIssuesSourceLogo)
gridlayoutIssuesSourceLogo.addWidget(self.imageIssuesSourceWidget)
gridlayoutIssuesSourceLogo.setContentsMargins(0, 2, 0, 0)
self.imageIssuesSourceWidget.set_url(talker.logo_url)
self.issue_number = issue_number
if issue_number is None or issue_number == "":
self.issue_number = "1"
else:
self.issue_number = issue_number
self.initial_id: str = ""
self.leFilter.textChanged.connect(self.filter)
self.finish.connect(self.query_finished)
def showEvent(self, event: QtGui.QShowEvent) -> None:
self.perform_query()
self.twList.resizeColumnsToContents()
self.twList.currentItemChanged.connect(self.current_item_changed)
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
# now that the list has been sorted, find the initial record, and
# select it
if not self.initial_id:
self.twList.selectRow(0)
else:
for r in range(0, self.twList.rowCount()):
issue_id = self.twList.item(r, 0).data(QtCore.Qt.ItemDataRole.UserRole)
if issue_id == self.initial_id:
self.twList.selectRow(r)
break
self.leFilter.textChanged.connect(self.filter)
def filter(self, text: str) -> None:
rows = set(range(self.twList.rowCount()))
for r in rows:
self.twList.showRow(r)
if text.strip():
shown_rows = {x.row() for x in self.twList.findItems(text, QtCore.Qt.MatchFlag.MatchContains)}
for r in rows - shown_rows:
self.twList.hideRow(r)
def perform_query(self) -> None:
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
try:
self.issue_list = {
x.issue_id: x for x in self.talker.fetch_issues_in_series(self.series_id) if x.issue_id is not None
}
except TalkerError as e:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.critical(self, f"{e.source} {e.code_name} Error", f"{e}")
return
def perform_query(self) -> None: # type: ignore[override]
self.querythread = QueryThread(
self.talker,
self.series_id,
self.finish,
self.ratelimit,
)
self.querythread.start()
def query_finished(self, issues: list[GenericMetadata]) -> None:
self.twList.setRowCount(0)
self.twList.setSortingEnabled(False)
for row, issue in enumerate(self.issue_list.values()):
self.issue_list = {i.issue_id: i for i in issues if i.issue_id is not None}
self.twList.clear()
for row, issue in enumerate(issues):
self.twList.insertRow(row)
self.twList.setItem(row, 0, IssueNumberTableWidgetItem())
self.twList.setItem(row, 1, QtWidgets.QTableWidgetItem())
@ -162,20 +129,21 @@ class IssueSelectionWindow(QtWidgets.QDialog):
self.twList.setSortingEnabled(True)
self.twList.sortItems(0, QtCore.Qt.SortOrder.AscendingOrder)
QtWidgets.QApplication.restoreOverrideCursor()
self.twList: QtWidgets.QTableWidget
if self.initial_id:
for r in range(0, self.twList.rowCount()):
item = self.twList.item(r, 0)
issue_id = item.data(QtCore.Qt.ItemDataRole.UserRole)
if issue_id == self.initial_id:
self.twList.selectRow(r)
self.twList.scrollToItem(item, QtWidgets.QAbstractItemView.ScrollHint.EnsureVisible)
break
def cell_double_clicked(self, r: int, c: int) -> None:
self.accept()
def set_description(self, widget: QtWidgets.QWidget, text: str) -> None:
if isinstance(widget, QtWidgets.QTextEdit):
widget.setText(text.replace("</figure>", "</div>").replace("<figure", "<div"))
else:
html = text
widget.setHtml(html, QtCore.QUrl(self.talker.website))
def update_row(self, row: int, issue: GenericMetadata) -> None:
def update_row(self, row: int, issue: GenericMetadata) -> None: # type: ignore[override]
self.twList.setStyleSheet(self.twList.styleSheet())
item_text = issue.issue or ""
item = self.twList.item(row, 0)
item.setText(item_text)
@ -201,31 +169,19 @@ class IssueSelectionWindow(QtWidgets.QDialog):
qtw_item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
qtw_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
def current_item_changed(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
if curr is None:
return
if prev is not None and prev.row() == curr.row():
return
row = curr.row()
def _fetch(self, row: int) -> GenericMetadata: # type: ignore[override]
self.issue_id = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)
# list selection was changed, update the issue cover
issue = self.issue_list[self.issue_id]
if not (issue.issue and issue.year and issue.month and issue._cover_image and issue.title):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
try:
issue = self.talker.fetch_comic_data(issue_id=self.issue_id)
issue = self.talker.fetch_comic_data(
issue_id=self.issue_id, on_rate_limit=RLCallBack(self.on_ratelimit, 10)
)
except TalkerError:
pass
QtWidgets.QApplication.restoreOverrideCursor()
self.issue_number = issue.issue or ""
self.coverWidget.set_issue_details(self.issue_id, [issue._cover_image or "", *issue._alternate_images])
if issue.description is None:
self.set_description(self.teDescription, "")
else:
self.set_description(self.teDescription, issue.description)
# Update current record information
self.update_row(row, issue)
self.cover_widget.set_issue_details(self.issue_id, [issue._cover_image or "", *issue._alternate_images])
self.set_description(self.teDescription, issue.description or "")
return issue

View File

@ -28,7 +28,6 @@ def setup_logging(verbose: int, log_dir: pathlib.Path) -> None:
logging.getLogger("comicapi").setLevel(logging.DEBUG)
logging.getLogger("comictaggerlib").setLevel(logging.DEBUG)
logging.getLogger("comictalker").setLevel(logging.DEBUG)
logging.getLogger("pyrate_limiter").setLevel(logging.DEBUG)
log_file = log_dir / "ComicTagger.log"
log_dir.mkdir(parents=True, exist_ok=True)

View File

@ -2,7 +2,8 @@ from __future__ import annotations
from datetime import datetime
from comicapi import utils
from comicapi import merge, utils
from comicapi.comicarchive import ComicArchive
from comicapi.genericmetadata import GenericMetadata
from comictaggerlib import ctversion
from comictaggerlib.cbltransformer import CBLTransformer
@ -37,3 +38,25 @@ def prepare_metadata(md: GenericMetadata, new_md: GenericMetadata, config: Settn
notes=utils.combine_notes(final_md.notes, notes, "Tagged with ComicTagger"),
description=cleanup_html(final_md.description, config.Metadata_Options__remove_html_tables) or None,
)
def read_selected_tags(
tag_ids: list[str], ca: ComicArchive, mode: merge.Mode = merge.Mode.OVERLAY, merge_lists: bool = False
) -> tuple[GenericMetadata, list[str], Exception | None]:
md = GenericMetadata()
error = None
tags_used = []
try:
for tag_id in tag_ids:
metadata = ca.read_tags(tag_id)
if not metadata.is_empty:
md.overlay(
metadata,
mode=mode,
merge_lists=merge_lists,
)
tags_used.append(tag_id)
except Exception as e:
error = e
return md, tags_used, error

View File

@ -158,7 +158,7 @@ class QuickTag:
metadata_results = self.get_results(filtered_results)
chosen_result = self.display_results(metadata_results, tags, interactive)
return self.talker.fetch_comic_data(issue_id=chosen_result.issue_id)
return self.talker.fetch_comic_data(issue_id=chosen_result.issue_id, on_rate_limit=None)
def SearchHashes(
self, simple: bool, max_hamming_distance: int, ahash: str, dhash: str, phash: str, exact_only: bool
@ -196,10 +196,10 @@ class QuickTag:
self.output(f"Retrieving basic {self.talker.name} data")
# Try to do a bulk feth of basic issue data
if hasattr(self.talker, "fetch_comics"):
md_results = self.talker.fetch_comics(issue_ids=list(all_ids))
md_results = self.talker.fetch_comics(issue_ids=list(all_ids), on_rate_limit=None)
else:
for md_id in all_ids:
md_results.append(self.talker.fetch_comic_data(issue_id=md_id))
md_results.append(self.talker.fetch_comic_data(issue_id=md_id, on_rate_limit=None))
return md_results
def get_simple_results(self, results: list[SimpleResult]) -> list[tuple[int, GenericMetadata]]:

View File

@ -82,7 +82,7 @@ class RenameWindow(QtWidgets.QDialog):
new_ext = ca.extension()
if md is None or md.is_empty:
md, error = self.parent().read_selected_tags(self.read_tag_ids, ca)
md, _, error = self.parent().read_selected_tags(self.read_tag_ids, ca)
if error is not None:
logger.error("Failed to load tags from %s: %s", ca.path, error)
QtWidgets.QMessageBox.warning(

View File

@ -18,24 +18,26 @@ from __future__ import annotations
import itertools
import logging
from abc import ABCMeta, abstractmethod
from collections import deque
import natsort
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtCore import QUrl, pyqtSignal
from PyQt5.QtCore import Qt, QUrl, pyqtSignal
from comicapi import utils
from comicapi.comicarchive import ComicArchive
from comicapi.genericmetadata import ComicSeries, GenericMetadata
from comictaggerlib.coverimagewidget import CoverImageWidget
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.issueidentifier import IssueIdentifier
from comictaggerlib.issueselectionwindow import IssueSelectionWindow
from comictaggerlib.ctsettings.settngs_namespace import SettngsNS
from comictaggerlib.issueidentifier import IssueIdentifier, IssueIdentifierOptions
from comictaggerlib.issueidentifier import Result as IIResult
from comictaggerlib.matchselectionwindow import MatchSelectionWindow
from comictaggerlib.progresswindow import IDProgressWindow
from comictaggerlib.resulttypes import IssueResult
from comictaggerlib.ui import qtutils, ui_path
from comictalker.comictalker import ComicTalker, TalkerError
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerError
logger = logging.getLogger(__name__)
@ -43,6 +45,7 @@ logger = logging.getLogger(__name__)
class SearchThread(QtCore.QThread):
searchComplete = pyqtSignal()
progressUpdate = pyqtSignal(int, int)
ratelimit = pyqtSignal(float, float)
def __init__(
self, talker: ComicTalker, series_name: str, refresh: bool, literal: bool = False, series_match_thresh: int = 90
@ -61,7 +64,12 @@ class SearchThread(QtCore.QThread):
try:
self.ct_error = False
self.ct_search_results = self.talker.search_for_series(
self.series_name, self.prog_callback, self.refresh, self.literal, self.series_match_thresh
self.series_name,
callback=self.prog_callback,
refresh_cache=self.refresh,
literal=self.literal,
series_match_thresh=self.series_match_thresh,
on_rate_limit=RLCallBack(self.on_ratelimit, 10),
)
except TalkerError as e:
self.ct_search_results = []
@ -74,60 +82,94 @@ class SearchThread(QtCore.QThread):
def prog_callback(self, current: int, total: int) -> None:
self.progressUpdate.emit(current, total)
def on_ratelimit(self, full_time: float, sleep_time: float) -> None:
self.ratelimit.emit(full_time, sleep_time)
class IdentifyThread(QtCore.QThread):
identifyComplete = pyqtSignal((int, list))
ratelimit = pyqtSignal(float, float)
identifyComplete = pyqtSignal(IIResult, list)
identifyLogMsg = pyqtSignal(str)
identifyProgress = pyqtSignal(int, int)
def __init__(self, identifier: IssueIdentifier, ca: ComicArchive, md: GenericMetadata) -> None:
def __init__(self, ca: ComicArchive, config: SettngsNS, talker: ComicTalker, md: GenericMetadata) -> None:
QtCore.QThread.__init__(self)
self.identifier = identifier
self.identifier.set_output_function(self.log_output)
self.identifier.set_progress_callback(self.progress_callback)
iio = IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=talker,
)
self.identifier = IssueIdentifier(
iio,
on_rate_limit=RLCallBack(self.on_ratelimit, 10),
output=self.log_output,
on_progress=self.progress_callback,
)
self.ca = ca
self.md = md
def log_output(self, text: str) -> None:
self.identifyLogMsg.emit(str(text))
def progress_callback(self, cur: int, total: int) -> None:
def progress_callback(self, cur: int, total: int, image: bytes) -> None:
self.identifyProgress.emit(cur, total)
def run(self) -> None:
self.identifyComplete.emit(*self.identifier.identify(self.ca, self.md))
def cancel(self) -> None:
self.identifier.cancel = True
def on_ratelimit(self, full_time: float, sleep_time: float) -> None:
self.ratelimit.emit(full_time, sleep_time)
class SelectionWindow(QtWidgets.QDialog):
__metaclass__ = ABCMeta
ui_file = ui_path / "seriesselectionwindow.ui"
CoverImageMode = CoverImageWidget.URLMode
ratelimit = pyqtSignal(float, float)
class SeriesSelectionWindow(QtWidgets.QDialog):
def __init__(
self,
parent: QtWidgets.QWidget,
series_name: str,
issue_number: str,
year: int | None,
issue_count: int | None,
comic_archive: ComicArchive | None,
config: ct_ns,
talker: ComicTalker,
series_name: str = "",
issue_number: str = "",
comic_archive: ComicArchive | None = None,
year: int | None = None,
issue_count: int | None = None,
autoselect: bool = False,
literal: bool = False,
) -> None:
super().__init__(parent)
self.setWindowModality(Qt.WindowModality.WindowModal)
with (ui_path / "seriesselectionwindow.ui").open(encoding="utf-8") as uifile:
with self.ui_file.open(encoding="utf-8") as uifile:
uic.loadUi(uifile, self)
self.imageWidget = CoverImageWidget(
self.imageContainer, CoverImageWidget.URLMode, config.Runtime_Options__config.user_cache_dir
self.cover_widget = CoverImageWidget(
self.coverImageContainer,
self.CoverImageMode,
config.Runtime_Options__config.user_cache_dir,
)
gridlayout = QtWidgets.QGridLayout(self.imageContainer)
gridlayout.addWidget(self.imageWidget)
gridlayout = QtWidgets.QGridLayout(self.coverImageContainer)
gridlayout.addWidget(self.cover_widget)
gridlayout.setContentsMargins(0, 0, 0, 0)
self.teDetails: QtWidgets.QWidget
self.teDescription: QtWidgets.QWidget
webengine = qtutils.new_web_view(self)
if webengine:
self.teDetails = qtutils.replaceWidget(self.splitter, self.teDetails, webengine)
self.teDescription = qtutils.replaceWidget(self.splitter, self.teDescription, webengine)
logger.info("successfully loaded QWebEngineView")
else:
logger.info("failed to open QWebEngineView")
self.setWindowFlags(
QtCore.Qt.WindowType(
@ -138,29 +180,11 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
)
self.config = config
self.series_name = series_name
self.issue_number = issue_number
self.issue_id: str = ""
self.year = year
self.issue_count = issue_count
self.series_id: str = ""
self.comic_archive = comic_archive
self.immediate_autoselect = autoselect
self.series_list: dict[str, ComicSeries] = {}
self.literal = literal
self.ii: IssueIdentifier | None = None
self.iddialog: IDProgressWindow | None = None
self.id_thread: IdentifyThread | None = None
self.progdialog: QtWidgets.QProgressDialog | None = None
self.search_thread: SearchThread | None = None
self.use_filter = self.config.Auto_Tag__use_publisher_filter
# Load to retrieve settings
self.talker = talker
self.issue_id: str = ""
# Display talker logo and set url
self.lblSourceName.setText(talker.attribution)
self.lblIssuesSourceName.setText(talker.attribution)
self.imageSourceWidget = CoverImageWidget(
self.imageSourceLogo,
@ -177,19 +201,27 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
# Set the minimum row height to the default.
# this way rows will be more consistent when resizeRowsToContents is called
self.twList.verticalHeader().setMinimumSectionSize(self.twList.verticalHeader().defaultSectionSize())
self.twList.resizeColumnsToContents()
self.twList.currentItemChanged.connect(self.current_item_changed)
self.twList.cellDoubleClicked.connect(self.cell_double_clicked)
self.btnRequery.clicked.connect(self.requery)
self.btnIssues.clicked.connect(self.show_issues)
self.btnAutoSelect.clicked.connect(self.auto_select)
self.cbxFilter.setChecked(self.use_filter)
self.cbxFilter.toggled.connect(self.filter_toggled)
self.update_buttons()
self.leFilter.textChanged.connect(self.filter)
self.twList.selectRow(0)
self.leFilter.textChanged.connect(self.filter)
@abstractmethod
def perform_query(self, refresh: bool = False) -> None: ...
@abstractmethod
def cell_double_clicked(self, r: int, c: int) -> None: ...
@abstractmethod
def update_row(self, row: int, series: ComicSeries) -> None: ...
def set_description(self, widget: QtWidgets.QWidget, text: str) -> None:
if isinstance(widget, QtWidgets.QTextEdit):
widget.setText(text.replace("</figure>", "</div>").replace("<figure", "<div"))
else:
html = text
widget.setHtml(html, QUrl(self.talker.website))
def filter(self, text: str) -> None:
rows = set(range(self.twList.rowCount()))
@ -200,6 +232,187 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
for r in rows - shown_rows:
self.twList.hideRow(r)
@abstractmethod
def _fetch(self, row: int) -> ComicSeries: ...
def on_ratelimit(self, full_time: float, sleep_time: float) -> None:
self.ratelimit.emit(full_time, sleep_time)
def current_item_changed(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
if curr is None:
return
if prev is not None and prev.row() == curr.row():
return
row = curr.row()
item = self._fetch(row)
QtWidgets.QApplication.restoreOverrideCursor()
# Update current record information
self.update_row(row, item)
class SeriesSelectionWindow(SelectionWindow):
ui_file = ui_path / "seriesselectionwindow.ui"
CoverImageMode = CoverImageWidget.URLMode
def __init__(
self,
parent: QtWidgets.QWidget,
config: ct_ns,
talker: ComicTalker,
series_name: str = "",
issue_number: str = "",
comic_archive: ComicArchive | None = None,
year: int | None = None,
issue_count: int | None = None,
autoselect: bool = False,
literal: bool = False,
) -> None:
super().__init__(
parent,
config,
talker,
series_name,
issue_number,
comic_archive,
year,
issue_count,
autoselect,
literal,
)
self.series_name = series_name
self.issue_number = issue_number
self.year = year
self.issue_count = issue_count
self.series_id: str = ""
self.comic_archive = comic_archive
self.immediate_autoselect = autoselect
self.series_list: dict[str, ComicSeries] = {}
self.literal = literal
self.iddialog: IDProgressWindow | None = None
self.id_thread: IdentifyThread | None = None
self.progdialog: QtWidgets.QProgressDialog | None = None
self.search_thread: SearchThread | None = None
self.use_publisher_filter = self.config.Auto_Tag__use_publisher_filter
self.btnRequery.clicked.connect(self.requery)
self.btnIssues.clicked.connect(self.show_issues)
self.btnAutoSelect.clicked.connect(self.auto_select)
self.cbxPublisherFilter.setChecked(self.use_publisher_filter)
self.cbxPublisherFilter.toggled.connect(self.publisher_filter_toggled)
self.ratelimit.connect(self.ratelimit_message)
self.update_buttons()
def showEvent(self, event: QtGui.QShowEvent) -> None:
self.perform_query()
if not self.series_list:
QtCore.QCoreApplication.processEvents()
QtWidgets.QMessageBox.information(self, "Search Result", "No matches found!")
QtCore.QTimer.singleShot(200, self.close_me)
elif self.immediate_autoselect:
# defer the immediate autoselect so this dialog has time to pop up
QtCore.QCoreApplication.processEvents()
QtCore.QTimer.singleShot(10, self.do_immediate_autoselect)
def perform_query(self, refresh: bool = False) -> None:
self.search_thread = SearchThread(
self.talker,
self.series_name,
refresh,
self.literal,
self.config.Issue_Identifier__series_match_search_thresh,
)
self.search_thread.searchComplete.connect(self.search_complete)
self.search_thread.progressUpdate.connect(self.search_progress_update)
self.search_thread.ratelimit.connect(self.ratelimit)
self.search_thread.start()
self.progdialog = QtWidgets.QProgressDialog("Searching Online", "Cancel", 0, 100, self)
self.progdialog.setWindowTitle("Online Search")
self.progdialog.canceled.connect(self.search_canceled)
self.progdialog.setModal(True)
self.progdialog.setMinimumDuration(300)
if refresh or self.search_thread.isRunning():
self.progdialog.exec()
else:
self.progdialog = None
def cell_double_clicked(self, r: int, c: int) -> None:
self.show_issues()
def update_row(self, row: int, series: ComicSeries) -> None:
item_text = series.name
item = self.twList.item(row, 0)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setData(QtCore.Qt.ItemDataRole.UserRole, series.id)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = f"{series.start_year:04}" if series.start_year is not None else ""
item = self.twList.item(row, 1)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = f"{series.count_of_issues:04}" if series.count_of_issues is not None else ""
item = self.twList.item(row, 2)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setData(QtCore.Qt.ItemDataRole.DisplayRole, series.count_of_issues)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = series.publisher if series.publisher is not None else ""
item = self.twList.item(row, 3)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
def set_description(self, widget: QtWidgets.QWidget, text: str) -> None:
if isinstance(widget, QtWidgets.QTextEdit):
widget.setText(text.replace("</figure>", "</div>").replace("<figure", "<div"))
else:
html = text
widget.setHtml(html, QUrl(self.talker.website))
def filter(self, text: str) -> None:
rows = set(range(self.twList.rowCount()))
for r in rows:
self.twList.showRow(r)
if text.strip():
shown_rows = {x.row() for x in self.twList.findItems(text, QtCore.Qt.MatchFlag.MatchContains)}
for r in rows - shown_rows:
self.twList.hideRow(r)
def _fetch(self, row: int) -> ComicSeries:
self.series_id = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)
# list selection was changed, update the info on the series
series = self.series_list[self.series_id]
if not (
series.name
and series.start_year
and series.count_of_issues
and series.publisher
and series.description
and series.image_url
):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
try:
series = self.talker.fetch_series(self.series_id, on_rate_limit=RLCallBack(self.on_ratelimit, 10))
except TalkerError:
pass
self.set_description(self.teDescription, series.description or "")
self.cover_widget.set_url(series.image_url)
return series
def update_buttons(self) -> None:
enabled = bool(self.series_list)
@ -214,8 +427,8 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
self.perform_query(refresh=True)
self.twList.selectRow(0)
def filter_toggled(self) -> None:
self.use_filter = not self.use_filter
def publisher_filter_toggled(self) -> None:
self.use_publisher_filter = self.cbxPublisherFilter.isChecked()
self.perform_query(refresh=False)
def auto_select(self) -> None:
@ -229,70 +442,53 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
self.iddialog = IDProgressWindow(self)
self.iddialog.setModal(True)
self.iddialog.rejected.connect(self.identify_cancel)
self.iddialog.show()
self.ii = IssueIdentifier(self.comic_archive, self.config, self.talker)
md = GenericMetadata()
md.series = self.series_name
md.issue = self.issue_number
md.year = self.year
md.issue_count = self.issue_count
self.id_thread = IdentifyThread(self.ii, self.comic_archive, md)
self.id_thread = IdentifyThread(self.comic_archive, self.config, self.talker, md)
self.id_thread.identifyComplete.connect(self.identify_complete)
self.id_thread.identifyLogMsg.connect(self.log_id_output)
self.id_thread.identifyLogMsg.connect(self.log_output)
self.id_thread.identifyProgress.connect(self.identify_progress)
self.id_thread.ratelimit.connect(self.ratelimit)
self.iddialog.rejected.connect(self.id_thread.cancel)
self.id_thread.start()
self.iddialog.exec()
def log_id_output(self, text: str) -> None:
if self.iddialog is not None:
self.iddialog.textEdit.append(text.rstrip())
self.iddialog.textEdit.ensureCursorVisible()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
self.selector = None
def identify_progress(self, cur: int, total: int) -> None:
if self.iddialog is not None:
self.iddialog.progressBar.setMaximum(total)
self.iddialog.progressBar.setValue(cur)
def identify_cancel(self) -> None:
if self.ii is not None:
self.ii.cancel = True
def identify_complete(self, result: int, issues: list[IssueResult]) -> None:
def identify_complete(self, result: IIResult, issues: list[IssueResult]) -> None:
if self.iddialog is not None and self.comic_archive is not None:
found_match = None
choices = False
if result == IssueIdentifier.result_no_matches:
if result == IIResult.no_matches:
QtWidgets.QMessageBox.information(self, "Auto-Select Result", " No issues found :-(")
elif result == IssueIdentifier.result_found_match_but_bad_cover_score:
elif result == IIResult.single_bad_cover_score:
QtWidgets.QMessageBox.information(
self,
"Auto-Select Result",
" Found a match, but cover doesn't seem the same. Verify before committing!",
)
found_match = issues[0]
elif result == IssueIdentifier.result_found_match_but_not_first_page:
QtWidgets.QMessageBox.information(
self, "Auto-Select Result", " Found a match, but not with the first page of the archive."
)
found_match = issues[0]
elif result == IssueIdentifier.result_multiple_matches_with_bad_image_scores:
elif result == IIResult.multiple_bad_cover_scores:
QtWidgets.QMessageBox.information(
self, "Auto-Select Result", " Found some possibilities, but no confidence. Proceed manually."
)
choices = True
elif result == IssueIdentifier.result_one_good_match:
elif result == IIResult.single_good_match:
found_match = issues[0]
elif result == IssueIdentifier.result_multiple_good_matches:
elif result == IIResult.multiple_good_matches:
QtWidgets.QMessageBox.information(
self, "Auto-Select Result", " Found multiple likely matches. Please select."
)
@ -302,7 +498,6 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
selector = MatchSelectionWindow(
self, issues, self.comic_archive, talker=self.talker, config=self.config
)
selector.setModal(True)
selector.exec()
if selector.result():
# we should now have a list index
@ -317,23 +512,28 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
self.show_issues()
def show_issues(self) -> None:
selector = IssueSelectionWindow(self, self.config, self.talker, self.series_id, self.issue_number)
from comictaggerlib.issueselectionwindow import IssueSelectionWindow
self.selector = IssueSelectionWindow(self, self.config, self.talker, self.series_id, self.issue_number)
self.selector.ratelimit.connect(self.ratelimit)
title = ""
for series in self.series_list.values():
if series.id == self.series_id:
title = f"{series.name} ({series.start_year:04}) - " if series.start_year else f"{series.name} - "
break
selector.setWindowTitle(title + "Select Issue")
selector.setModal(True)
selector.exec()
if selector.result():
self.selector.setWindowTitle(title + "Select Issue")
self.selector.finished.connect(self.issue_selected)
self.selector.show()
def issue_selected(self, result) -> None:
if result and self.selector:
# we should now have a series ID
self.issue_number = selector.issue_number
self.issue_id = selector.issue_id
self.issue_number = self.selector.issue_number
self.issue_id = self.selector.issue_id
self.accept()
else:
self.imageWidget.update_content()
self.cover_widget.update_content()
def select_by_id(self) -> None:
for r in range(self.twList.rowCount()):
@ -341,29 +541,6 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
self.twList.selectRow(r)
break
def perform_query(self, refresh: bool = False) -> None:
self.search_thread = SearchThread(
self.talker,
self.series_name,
refresh,
self.literal,
self.config.Issue_Identifier__series_match_search_thresh,
)
self.search_thread.searchComplete.connect(self.search_complete)
self.search_thread.progressUpdate.connect(self.search_progress_update)
self.search_thread.start()
self.progdialog = QtWidgets.QProgressDialog("Searching Online", "Cancel", 0, 100, self)
self.progdialog.setWindowTitle("Online Search")
self.progdialog.canceled.connect(self.search_canceled)
self.progdialog.setModal(True)
self.progdialog.setMinimumDuration(300)
if refresh or self.search_thread.isRunning():
self.progdialog.exec()
else:
self.progdialog = None
def search_canceled(self) -> None:
if self.progdialog is not None:
logger.info("query cancelled")
@ -379,8 +556,10 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
def search_progress_update(self, current: int, total: int) -> None:
if self.progdialog is not None:
QtCore.QCoreApplication.processEvents()
self.progdialog.setMaximum(total)
self.progdialog.setValue(current + 1)
QtCore.QCoreApplication.processEvents()
def search_complete(self) -> None:
if self.progdialog is not None:
@ -398,7 +577,7 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
tmp_list = self.search_thread.ct_search_results if self.search_thread is not None else []
self.series_list = {x.id: x for x in tmp_list}
# filter the publishers if enabled set
if self.use_filter:
if self.use_publisher_filter:
try:
publisher_filter = {s.strip().casefold() for s in self.config.Auto_Tag__publisher_filter}
# use '' as publisher name if None
@ -487,59 +666,10 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
# Resize row height so the whole series can still be seen
self.twList.resizeRowsToContents()
def showEvent(self, event: QtGui.QShowEvent) -> None:
self.perform_query()
if not self.series_list:
QtCore.QCoreApplication.processEvents()
QtWidgets.QMessageBox.information(self, "Search Result", "No matches found!")
QtCore.QTimer.singleShot(200, self.close_me)
elif self.immediate_autoselect:
# defer the immediate autoselect so this dialog has time to pop up
QtCore.QCoreApplication.processEvents()
QtCore.QTimer.singleShot(10, self.do_immediate_autoselect)
def do_immediate_autoselect(self) -> None:
self.immediate_autoselect = False
self.auto_select()
def cell_double_clicked(self, r: int, c: int) -> None:
self.show_issues()
def set_description(self, widget: QtWidgets.QWidget, text: str) -> None:
if isinstance(widget, QtWidgets.QTextEdit):
widget.setText(text.replace("</figure>", "</div>").replace("<figure", "<div"))
else:
html = text
widget.setHtml(html, QUrl(self.talker.website))
def update_row(self, row: int, series: ComicSeries) -> None:
item_text = series.name
item = self.twList.item(row, 0)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setData(QtCore.Qt.ItemDataRole.UserRole, series.id)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = f"{series.start_year:04}" if series.start_year is not None else ""
item = self.twList.item(row, 1)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = f"{series.count_of_issues:04}" if series.count_of_issues is not None else ""
item = self.twList.item(row, 2)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setData(QtCore.Qt.ItemDataRole.DisplayRole, series.count_of_issues)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
item_text = series.publisher if series.publisher is not None else ""
item = self.twList.item(row, 3)
item.setText(item_text)
item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text)
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
def current_item_changed(self, curr: QtCore.QModelIndex | None, prev: QtCore.QModelIndex | None) -> None:
if curr is None:
return
@ -547,31 +677,22 @@ class SeriesSelectionWindow(QtWidgets.QDialog):
return
row = curr.row()
self.series_id = self.twList.item(row, 0).data(QtCore.Qt.ItemDataRole.UserRole)
# list selection was changed, update the info on the series
series = self.series_list[self.series_id]
if not (
series.name
and series.start_year
and series.count_of_issues
and series.publisher
and series.description
and series.image_url
):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
# Changing of usernames and passwords with using cache can cause talker errors to crash out
try:
series = self.talker.fetch_series(self.series_id)
except TalkerError:
pass
item = self._fetch(row)
QtWidgets.QApplication.restoreOverrideCursor()
if series.description is None:
self.set_description(self.teDetails, "")
else:
self.set_description(self.teDetails, series.description)
self.imageWidget.set_url(series.image_url)
# Update current record information
self.update_row(row, series)
self.update_row(row, item)
def ratelimit_message(self, full_time: float, sleep_time: float) -> None:
self.log_output(
f"Rate limit reached: {full_time:.0f}s until next request. Waiting {sleep_time:.0f}s for ratelimit"
)
def log_output(self, text: str) -> None:
if self.iddialog is not None:
self.iddialog.textEdit.append(text.rstrip())
self.iddialog.textEdit.ensureCursorVisible()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()

View File

@ -669,7 +669,6 @@ class SettingsWindow(QtWidgets.QDialog):
def show_template_help(self) -> None:
template_help_win = TemplateHelpWindow(self)
template_help_win.setModal(False)
template_help_win.show()

123
comictaggerlib/tag.py Normal file
View File

@ -0,0 +1,123 @@
from __future__ import annotations
import logging
import re
from typing import Any, Callable
from comicapi.comicarchive import ComicArchive
from comicapi.genericmetadata import GenericMetadata
from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.issueidentifier import IssueIdentifier, IssueIdentifierOptions
from comictaggerlib.issueidentifier import Result as IIResult
from comictaggerlib.md import prepare_metadata
from comictaggerlib.resulttypes import Action, MatchStatus, OnlineMatchResults, Result, Status
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerError
logger = logging.getLogger(__name__)
def identify_comic(
ca: ComicArchive,
md: GenericMetadata,
tags_read: list[str],
match_results: OnlineMatchResults,
config: ct_ns,
talker: ComicTalker,
output: Callable[[str], Any],
on_rate_limit: RLCallBack | None,
on_progress: Callable[[int, int, bytes], Any] | None = None,
) -> tuple[Result, OnlineMatchResults]:
# ct_md, results, matches, match_results
if md is None or md.is_empty:
logger.error("No metadata given to search online with!")
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
match_status=MatchStatus.no_match,
tags_read=tags_read,
)
match_results.no_matches.append(res)
return res, match_results
iio = IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=talker,
)
ii = IssueIdentifier(
iio,
output=output,
on_rate_limit=on_rate_limit,
on_progress=on_progress,
)
if not config.Auto_Tag__use_year_when_identifying:
md.year = None
if config.Auto_Tag__ignore_leading_numbers_in_filename and md.series is not None:
md.series = re.sub(r"^([\d.]+)", "", md.series)
result, matches = ii.identify(ca, md)
res = Result(
Action.save,
status=Status.match_failure,
original_path=ca.path,
online_results=matches,
tags_read=tags_read,
)
if result == IIResult.multiple_bad_cover_scores:
res.match_status = MatchStatus.low_confidence_match
logger.error("Online search: Multiple low confidence matches. Save aborted")
match_results.low_confidence_matches.append(res)
return res, match_results
if result == IIResult.single_bad_cover_score and config.Runtime_Options__abort_on_low_confidence:
logger.error("Online search: Low confidence match. Save aborted")
res.match_status = MatchStatus.low_confidence_match
match_results.low_confidence_matches.append(res)
return res, match_results
if result == IIResult.multiple_good_matches:
logger.error("Online search: Multiple good matches. Save aborted")
res.match_status = MatchStatus.multiple_match
match_results.multiple_matches.append(res)
return res, match_results
if result == IIResult.no_matches:
logger.error("Online search: No match found. Save aborted")
res.match_status = MatchStatus.no_match
match_results.no_matches.append(res)
return res, match_results
# we got here, so we have a single match
# now get the particular issue data
try:
ct_md = talker.fetch_comic_data(issue_id=matches[0].issue_id, on_rate_limit=on_rate_limit)
except TalkerError as e:
logger.exception(f"Error retrieving issue details. Save aborted.\n{e}")
ct_md = GenericMetadata()
ct_md = prepare_metadata(md, ct_md, config)
if ct_md.is_empty:
res.status = Status.fetch_data_failure
res.match_status = MatchStatus.good_match
match_results.fetch_data_failures.append(res)
return res, match_results
res.status = Status.success
res.md = ct_md
if result == IIResult.single_good_match:
res.match_status = MatchStatus.good_match
return res, match_results

View File

@ -22,7 +22,6 @@ import operator
import os
import pickle
import platform
import re
import sys
import webbrowser
from collections.abc import Sequence
@ -43,7 +42,7 @@ from comicapi.issuestring import IssueString
from comictaggerlib import ctsettings, ctversion
from comictaggerlib.applicationlogwindow import ApplicationLogWindow, QTextEditLogger
from comictaggerlib.autotagmatchwindow import AutoTagMatchWindow
from comictaggerlib.autotagprogresswindow import AutoTagProgressWindow
from comictaggerlib.autotagprogresswindow import AutoTagProgressWindow, AutoTagThread
from comictaggerlib.autotagstartwindow import AutoTagStartWindow
from comictaggerlib.cbltransformer import CBLTransformer
from comictaggerlib.coverimagewidget import CoverImageWidget
@ -52,20 +51,20 @@ from comictaggerlib.ctsettings import ct_ns
from comictaggerlib.exportwindow import ExportConflictOpts, ExportWindow
from comictaggerlib.fileselectionlist import FileSelectionList
from comictaggerlib.graphics import graphics_path
from comictaggerlib.issueidentifier import IssueIdentifier
from comictaggerlib.logwindow import LogWindow
from comictaggerlib.md import prepare_metadata
from comictaggerlib.md import prepare_metadata, read_selected_tags
from comictaggerlib.optionalmsgdialog import OptionalMessageDialog
from comictaggerlib.pagebrowser import PageBrowserWindow
from comictaggerlib.pagelisteditor import PageListEditor
from comictaggerlib.renamewindow import RenameWindow
from comictaggerlib.resulttypes import Action, MatchStatus, OnlineMatchResults, Result, Status
from comictaggerlib.resulttypes import OnlineMatchResults
from comictaggerlib.seriesselectionwindow import SeriesSelectionWindow
from comictaggerlib.settingswindow import SettingsWindow
from comictaggerlib.ui import ui_path
from comictaggerlib.ui import qtutils, ui_path
from comictaggerlib.ui.pyqttoast import Toast, ToastPreset
from comictaggerlib.ui.qtutils import center_window_on_parent, enable_widget
from comictaggerlib.versionchecker import VersionChecker
from comictalker.comictalker import ComicTalker, TalkerError
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerError
logger = logging.getLogger(__name__)
@ -77,6 +76,8 @@ def execute(f: Callable[[], Any]) -> None:
class TaggerWindow(QtWidgets.QMainWindow):
appName = "ComicTagger"
version = ctversion.version
ratelimit = QtCore.pyqtSignal(float, float)
query_finished = QtCore.pyqtSignal(GenericMetadata, str)
def __init__(
self,
@ -288,6 +289,9 @@ class TaggerWindow(QtWidgets.QMainWindow):
self.page_list_editor.set_blur(self.config[0].General__blur)
self.ratelimit.connect(self.on_ratelimit)
self.query_finished.connect(self.apply_query_metadata)
def _sync_blur(*args: Any) -> None:
self.config[0].General__blur = self.page_list_editor.blur
@ -1069,50 +1073,102 @@ class TaggerWindow(QtWidgets.QMainWindow):
issue_count = utils.xlate_int(self.leIssueCount.text())
selector = SeriesSelectionWindow(
self.selector = SeriesSelectionWindow(
self,
series_name,
issue_number,
year,
issue_count,
self.comic_archive,
self.config[0],
self.current_talker(),
series_name,
issue_number,
self.comic_archive,
year,
issue_count,
autoselect,
literal,
)
self.selector.ratelimit.connect(self.on_ratelimit)
selector.setWindowTitle(f"Search: '{series_name}' - Select Series")
self.selector.setWindowTitle(f"Search: '{series_name}' - Select Series")
self.selector.finished.connect(self.finish_query)
selector.setModal(True)
selector.exec()
self.selector.show()
if selector.result():
# we should now have a series ID
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
def finish_query(self, result) -> None:
if result and self.selector:
# copy the form onto metadata object
self.form_to_metadata()
class QueryThread(QtCore.QThread):
def __init__(
self,
talker: ComicTalker,
issue_id: str,
series_id: str,
issue_number: str,
finish: QtCore.pyqtSignal,
on_rate_limit: QtCore.pyqtSignal,
) -> None:
super().__init__()
self.issue_id = issue_id
self.series_id = series_id
self.issue_number = issue_number
self.talker = talker
self.finish = finish
self.on_rate_limit = on_rate_limit
try:
new_metadata = self.current_talker().fetch_comic_data(
issue_id=selector.issue_id, series_id=selector.series_id, issue_number=selector.issue_number
)
except TalkerError as e:
QtWidgets.QApplication.restoreOverrideCursor()
QtWidgets.QMessageBox.critical(self, f"{e.source} {e.code_name} Error", f"{e}")
return
QtWidgets.QApplication.restoreOverrideCursor()
def run(self) -> None:
try:
new_metadata = self.talker.fetch_comic_data(
issue_id=self.issue_id,
series_id=self.series_id,
issue_number=self.issue_number,
on_rate_limit=RLCallBack(lambda x, y: self.on_rate_limit.emit(x, y), 60),
)
except TalkerError as e:
QtWidgets.QMessageBox.critical(None, f"{e.source} {e.code_name} Error", f"{e}")
return
self.finish.emit(new_metadata, self.issue_number)
if new_metadata is None or new_metadata.is_empty:
QtWidgets.QMessageBox.critical(
self, "Search", f"Could not find an issue {selector.issue_number} for that series"
)
return
self.querythread = QueryThread(
self.current_talker(),
self.selector.issue_id,
self.selector.series_id,
self.selector.issue_number,
self.query_finished,
self.ratelimit,
)
self.querythread.start()
self.metadata = prepare_metadata(self.metadata, new_metadata, self.config[0])
# Now push the new combined data into the edit controls
self.metadata_to_form()
def apply_query_metadata(self, new_metadata: GenericMetadata, issue_number: str) -> None:
# we should now have a series ID
# QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
QtWidgets.QApplication.restoreOverrideCursor()
# copy the form onto metadata object
self.form_to_metadata()
if new_metadata is None or new_metadata.is_empty:
QtWidgets.QMessageBox.critical(None, "Search", f"Could not find an issue {new_metadata} for that series")
return
self.metadata = prepare_metadata(self.metadata, new_metadata, self.config[0])
# Now push the new combined data into the edit controls
self.metadata_to_form()
def on_ratelimit(self, full_time: float, sleep_time: float) -> None:
self.toast = Toast(self)
if qtutils.is_dark_mode():
self.toast.applyPreset(ToastPreset.WARNING_DARK)
else:
self.toast.applyPreset(ToastPreset.WARNING)
# Convert to milliseconds, add 200ms because python is slow
self.toast.setDuration(abs(int(sleep_time * 1000) + 200))
self.toast.setResetDurationOnHover(False)
self.toast.setFadeOutDuration(50)
self.toast.setTitle("Rate Limit Hit!")
self.toast.setText(
f"Rate limit reached: {full_time:.0f}s until next request. Waiting {sleep_time:.0f}s for ratelimit"
)
self.toast.setPositionRelativeToWidget(self)
self.toast.show()
def write_tags(self) -> None:
if self.metadata is not None and self.comic_archive is not None:
@ -1155,7 +1211,7 @@ class TaggerWindow(QtWidgets.QMainWindow):
self.update_menus()
# Only try to read if write was successful
self.metadata, error = self.read_selected_tags(self.selected_read_tags, self.comic_archive)
self.metadata, _, error = self.read_selected_tags(self.selected_read_tags, self.comic_archive)
if error is not None:
QtWidgets.QMessageBox.warning(
self,
@ -1340,7 +1396,6 @@ class TaggerWindow(QtWidgets.QMainWindow):
def show_settings(self) -> None:
settingswin = SettingsWindow(self, self.config, self.talkers)
settingswin.setModal(True)
settingswin.exec()
settingswin.result()
self.adjust_source_combo()
@ -1640,7 +1695,7 @@ class TaggerWindow(QtWidgets.QMainWindow):
success_count = 0
for prog_idx, ca in enumerate(ca_list, 1):
ca_saved = False
md, error = self.read_selected_tags(src_tag_ids, ca)
md, _, error = self.read_selected_tags(src_tag_ids, ca)
if error is not None:
failed_list.append(ca.path)
continue
@ -1696,193 +1751,6 @@ class TaggerWindow(QtWidgets.QMainWindow):
QtCore.QCoreApplication.processEvents()
QtCore.QCoreApplication.processEvents()
def identify_and_tag_single_archive(
self, ca: ComicArchive, match_results: OnlineMatchResults, dlg: AutoTagStartWindow
) -> tuple[bool, OnlineMatchResults]:
success = False
ii = IssueIdentifier(ca, self.config[0], self.current_talker())
# read in tags, and parse file name if not there
md, error = self.read_selected_tags(self.selected_read_tags, ca)
if error is not None:
QtWidgets.QMessageBox.warning(
self,
"Aborting...",
f"One or more of the read tags failed to load for {ca.path}. Aborting to prevent any possible further damage. Check log for details.",
)
logger.error("Failed to load tags from %s: %s", self.ca.path, error)
return False, match_results
if md.is_empty:
md = ca.metadata_from_filename(
self.config[0].Filename_Parsing__filename_parser,
self.config[0].Filename_Parsing__remove_c2c,
self.config[0].Filename_Parsing__remove_fcbd,
self.config[0].Filename_Parsing__remove_publisher,
dlg.split_words,
self.config[0].Filename_Parsing__allow_issue_start_with_letter,
self.config[0].Filename_Parsing__protofolius_issue_number_scheme,
)
if dlg.ignore_leading_digits_in_filename and md.series is not None:
# remove all leading numbers
md.series = re.sub(r"(^[\d.]*)(.*)", r"\2", md.series)
# use the dialog specified search string
if dlg.search_string:
md.series = dlg.search_string
if md is None or md.is_empty:
logger.error("No metadata given to search online with!")
return False, match_results
if dlg.dont_use_year:
md.year = None
if md.issue is None or md.issue == "":
if dlg.assume_issue_one:
md.issue = "1"
else:
md.issue = utils.xlate(md.volume)
ii.set_output_function(self.auto_tag_log)
if self.atprogdialog is not None:
ii.set_cover_url_callback(self.atprogdialog.set_test_image)
ii.series_match_thresh = dlg.name_length_match_tolerance
result, matches = ii.identify(ca, md)
found_match = False
choices = False
low_confidence = False
if result == ii.result_no_matches:
pass
elif result == ii.result_found_match_but_bad_cover_score:
low_confidence = True
found_match = True
elif result == ii.result_found_match_but_not_first_page:
found_match = True
elif result == ii.result_multiple_matches_with_bad_image_scores:
low_confidence = True
choices = True
elif result == ii.result_one_good_match:
found_match = True
elif result == ii.result_multiple_good_matches:
choices = True
if choices:
if low_confidence:
self.auto_tag_log("Online search: Multiple low-confidence matches. Save aborted\n")
match_results.low_confidence_matches.append(
Result(
Action.save,
Status.match_failure,
ca.path,
online_results=matches,
match_status=MatchStatus.low_confidence_match,
)
)
else:
self.auto_tag_log("Online search: Multiple matches. Save aborted\n")
match_results.multiple_matches.append(
Result(
Action.save,
Status.match_failure,
ca.path,
online_results=matches,
match_status=MatchStatus.multiple_match,
)
)
elif low_confidence and not dlg.auto_save_on_low:
self.auto_tag_log("Online search: Low confidence match. Save aborted\n")
match_results.low_confidence_matches.append(
Result(
Action.save,
Status.match_failure,
ca.path,
online_results=matches,
match_status=MatchStatus.low_confidence_match,
)
)
elif not found_match:
self.auto_tag_log("Online search: No match found. Save aborted\n")
match_results.no_matches.append(
Result(
Action.save,
Status.match_failure,
ca.path,
online_results=matches,
match_status=MatchStatus.no_match,
)
)
else:
# a single match!
if low_confidence:
self.auto_tag_log("Online search: Low confidence match, but saving anyways, as indicated...\n")
# now get the particular issue data
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor))
try:
ct_md = self.current_talker().fetch_comic_data(matches[0].issue_id)
except TalkerError:
logger.exception("Save aborted.")
return False, match_results
QtWidgets.QApplication.restoreOverrideCursor()
if ct_md is None or ct_md.is_empty:
match_results.fetch_data_failures.append(
Result(
Action.save,
Status.fetch_data_failure,
ca.path,
online_results=matches,
match_status=MatchStatus.good_match,
)
)
if ct_md is not None:
temp_opts = cast(ct_ns, settngs.get_namespace(self.config, True, True, True, False)[0])
temp_opts.Auto_Tag__clear_tags = dlg.cbxClearMetadata.isChecked()
md = prepare_metadata(md, ct_md, temp_opts)
res = Result(
Action.save,
status=Status.success,
original_path=ca.path,
online_results=matches,
match_status=MatchStatus.good_match,
md=md,
tags_written=self.selected_write_tags,
)
def write_Tags() -> bool:
for tag_id in self.selected_write_tags:
# write out the new data
if not ca.write_tags(md, tag_id):
self.auto_tag_log(
f"{tags[tag_id].name()} save failed! Aborting any additional tag saves.\n"
)
return False
return True
# Save tags
if write_Tags():
match_results.good_matches.append(res)
success = True
self.auto_tag_log("Save complete!\n")
else:
res.status = Status.write_failure
match_results.write_failures.append(res)
ca.reset_cache()
ca.load_cache({*self.selected_read_tags, *self.selected_write_tags})
return success, match_results
def auto_tag(self) -> None:
ca_list = self.fileSelectionList.get_selected_archive_list()
tag_names = ", ".join([tags[tag_id].name() for tag_id in self.selected_write_tags])
@ -1911,48 +1779,40 @@ class TaggerWindow(QtWidgets.QMainWindow):
return
self.atprogdialog = AutoTagProgressWindow(self, self.current_talker())
self.atprogdialog.setModal(True)
self.atprogdialog.show()
self.atprogdialog.open()
self.atprogdialog.progressBar.setMaximum(len(ca_list))
self.atprogdialog.setWindowTitle("Auto-Tagging")
center_window_on_parent(self.atprogdialog)
temp_opts = cast(ct_ns, settngs.get_namespace(self.config, True, True, True, False)[0])
temp_opts.Auto_Tag__clear_tags = atstartdlg.cbxClearMetadata.isChecked()
temp_opts.Issue_Identifier__series_match_identify_thresh = atstartdlg.name_length_match_tolerance
temp_opts.Auto_Tag__ignore_leading_numbers_in_filename = atstartdlg.ignore_leading_digits_in_filename
temp_opts.Auto_Tag__use_year_when_identifying = not atstartdlg.dont_use_year
temp_opts.Auto_Tag__assume_issue_one = atstartdlg.assume_issue_one
temp_opts.internal__remove_archive_after_successful_match = atstartdlg.remove_after_success
temp_opts.Runtime_Options__tags_read = self.selected_read_tags
temp_opts.Runtime_Options__tags_write = self.selected_write_tags
self.autotagthread = AutoTagThread(atstartdlg.search_string, ca_list, self.config[0], self.current_talker())
self.autotagthread.autoTagComplete.connect(self.auto_tag_finished)
self.autotagthread.autoTagLogMsg.connect(self.auto_tag_log)
self.autotagthread.autoTagProgress.connect(self.atprogdialog.on_progress)
self.autotagthread.ratelimit.connect(self.ratelimit)
self.atprogdialog.rejected.connect(self.autotagthread.cancel)
self.auto_tag_log("==========================================================================\n")
self.auto_tag_log(f"Auto-Tagging Started for {len(ca_list)} items\n")
self.autotagthread.start()
match_results = OnlineMatchResults()
archives_to_remove = []
for prog_idx, ca in enumerate(ca_list):
self.auto_tag_log("==========================================================================\n")
self.auto_tag_log(f"Auto-Tagging {prog_idx} of {len(ca_list)}\n")
self.auto_tag_log(f"{ca.path}\n")
try:
cover_idx = ca.read_tags(self.selected_read_tags[0]).get_cover_page_index_list()[0]
except Exception as e:
cover_idx = 0
logger.error("Failed to load metadata for %s: %s", ca.path, e)
image_data = ca.get_page(cover_idx)
self.atprogdialog.set_archive_image(image_data)
self.atprogdialog.set_test_image(b"")
def auto_tag_finished(self, match_results: OnlineMatchResults, archives_to_remove: list[ComicArchive]) -> None:
tag_names = ", ".join([tags[tag_id].name() for tag_id in self.selected_write_tags])
if self.atprogdialog:
self.atprogdialog.close()
QtCore.QCoreApplication.processEvents()
if self.atprogdialog.isdone:
break
self.atprogdialog.progressBar.setValue(prog_idx)
self.atprogdialog.label.setText(str(ca.path))
QtCore.QCoreApplication.processEvents()
if ca.is_writable():
success, match_results = self.identify_and_tag_single_archive(ca, match_results, atstartdlg)
if success and atstartdlg.remove_after_success:
archives_to_remove.append(ca)
self.atprogdialog.close()
if atstartdlg.remove_after_success:
self.fileSelectionList.remove_archive_list(archives_to_remove)
self.fileSelectionList.remove_archive_list(archives_to_remove)
self.fileSelectionList.update_selected_rows()
new_ca = self.fileSelectionList.get_current_archive()
@ -1998,11 +1858,9 @@ class TaggerWindow(QtWidgets.QMainWindow):
self,
match_results.multiple_matches,
self.selected_write_tags,
lambda match: self.current_talker().fetch_comic_data(match.issue_id),
self.config[0],
self.current_talker(),
)
matchdlg.setModal(True)
matchdlg.exec()
self.fileSelectionList.update_selected_rows()
new_ca = self.fileSelectionList.get_current_archive()
@ -2120,7 +1978,6 @@ class TaggerWindow(QtWidgets.QMainWindow):
"File Rename", "If you rename files now, unsaved data in the form will be lost. Are you sure?"
):
dlg = RenameWindow(self, ca_list, self.selected_read_tags, self.config, self.talkers)
dlg.setModal(True)
if dlg.exec() and self.comic_archive is not None:
self.fileSelectionList.update_selected_rows()
self.load_archive(self.comic_archive)
@ -2139,28 +1996,19 @@ class TaggerWindow(QtWidgets.QMainWindow):
self.config[0].internal__last_opened_folder = os.path.abspath(os.path.split(comic_archive.path)[0])
self.comic_archive = comic_archive
self.metadata, error = self.read_selected_tags(self.selected_read_tags, self.comic_archive)
self.metadata, _, error = self.read_selected_tags(self.selected_read_tags, self.comic_archive)
if error is not None:
logger.error("Failed to load tags from %s: %s", self.comic_archive.path, error)
self.exception(f"Failed to load tags from {self.comic_archive.path}, see log for details\n\n")
self.update_ui_for_archive()
def read_selected_tags(self, tag_ids: list[str], ca: ComicArchive) -> tuple[GenericMetadata, Exception | None]:
md = GenericMetadata()
error = None
try:
for tag_id in tag_ids:
metadata = ca.read_tags(tag_id)
md.overlay(
metadata,
mode=self.config[0].Metadata_Options__tag_merge,
merge_lists=self.config[0].Metadata_Options__tag_merge_lists,
)
except Exception as e:
error = e
return md, error
def read_selected_tags(
self, tag_ids: list[str], ca: ComicArchive
) -> tuple[GenericMetadata, list[str], Exception | None]:
return read_selected_tags(
tag_ids, ca, self.config[0].Metadata_Options__tag_merge, self.config[0].Metadata_Options__tag_merge_lists
)
def file_list_cleared(self) -> None:
self.reset_app()

View File

@ -49,6 +49,15 @@
<verstretch>7</verstretch>
</sizepolicy>
</property>
<property name="styleSheet">
<string notr="true">QTableWidget[rowCount=&quot;0&quot;] {
background-image: url(&quot;:/graphics/about.png&quot;);
background-attachment: fixed;
background-position: top center;
background-repeat: no-repeat;
background-color: white;
}</string>
</property>
<property name="selectionMode">
<enum>QAbstractItemView::SingleSelection</enum>
</property>
@ -153,7 +162,7 @@
</widget>
</item>
<item>
<widget class="QWidget" name="imageIssuesSourceLogo" native="true">
<widget class="QWidget" name="imageSourceLogo" native="true">
<property name="minimumSize">
<size>
<width>300</width>

BIN
comictaggerlib/ui/pyqttoast/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Niklas Henning
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,231 @@
# PyQt Toast
[![PyPI](https://img.shields.io/badge/pypi-v1.3.2-blue)](https://pypi.org/project/pyqt-toast-notification/)
[![Python](https://img.shields.io/badge/python-3.7+-blue)](https://github.com/niklashenning/pyqttoast)
[![Build](https://img.shields.io/badge/build-passing-neon)](https://github.com/niklashenning/pyqttoast)
[![Coverage](https://img.shields.io/badge/coverage-95%25-green)](https://github.com/niklashenning/pyqttoast)
[![License](https://img.shields.io/badge/license-MIT-green)](https://github.com/niklashenning/pyqttoast/blob/master/LICENSE)
A fully customizable and modern toast notification library for PyQt and PySide
![pyqttoast](https://github.com/niklashenning/pyqt-toast/assets/58544929/c104f10e-08df-4665-98d8-3785822a20dc)
## Features
* Supports showing multiple toasts at the same time
* Supports queueing of toasts
* Supports 7 different positions
* Supports multiple screens
* Supports positioning relative to widgets
* Modern and fully customizable UI
* Works with `PyQt5`, `PyQt6`, `PySide2`, and `PySide6`
## Installation
```
pip install pyqt-toast-notification
```
## Usage
Import the `Toast` class, instantiate it, and show the toast notification with the `show()` method:
```python
from PyQt6.QtWidgets import QMainWindow, QPushButton
from pyqttoast import Toast, ToastPreset
class Window(QMainWindow):
def __init__(self):
super().__init__(parent=None)
# Add button and connect click event
self.button = QPushButton(self)
self.button.setText('Show toast')
self.button.clicked.connect(self.show_toast)
# Shows a toast notification every time the button is clicked
def show_toast(self):
toast = Toast(self)
toast.setDuration(5000) # Hide after 5 seconds
toast.setTitle('Success! Confirmation email sent.')
toast.setText('Check your email to complete signup.')
toast.applyPreset(ToastPreset.SUCCESS) # Apply style preset
toast.show()
```
> **IMPORTANT:** <br>An instance of `Toast` can only be shown **once**. If you want to show another one, even if the content is exactly the same, you have to create another instance.
## Customization
* **Setting the position of the toasts (<u>static</u>):**
```python
Toast.setPosition(ToastPosition.BOTTOM_MIDDLE) # Default: ToastPosition.BOTTOM_RIGHT
```
> **AVAILABLE POSITIONS:** <br> `BOTTOM_LEFT`, `BOTTOM_MIDDLE`, `BOTTOM_RIGHT`, `TOP_LEFT`, `TOP_MIDDLE`, `TOP_RIGHT`, `CENTER`
* **Setting whether the toasts should always be shown on the main screen (<u>static</u>):**
```python
Toast.setAlwaysOnMainScreen(True) # Default: False
```
* **Positioning the toasts relative to a widget instead of a screen (<u>static</u>):**
```python
Toast.setPositionRelativeToWidget(some_widget) # Default: None
```
* **Setting a limit on how many toasts can be shown at the same time (<u>static</u>):**
```python
Toast.setMaximumOnScreen(5) # Default: 3
```
> If you try to show more toasts than the maximum amount on screen, they will get added to a queue and get shown as soon as one of the currently showing toasts is closed.
* **Setting the vertical spacing between the toasts (<u>static</u>):**
```python
Toast.setSpacing(20) # Default: 10
```
* **Setting the x and y offset of the toast position (<u>static</u>):**
```python
Toast.setOffset(30, 55) # Default: 20, 45
```
* **Making the toast show forever until it is closed:**
```python
toast.setDuration(0) # Default: 5000
```
* **Enabling or disabling the duration bar:**
```python
toast.setShowDurationBar(False) # Default: True
```
* **Adding an icon:**
```python
toast.setIcon(ToastIcon.SUCCESS) # Default: ToastIcon.INFORMATION
toast.setShowIcon(True) # Default: False
# Or setting a custom icon:
toast.setIcon(QPixmap('path/to/your/icon.png'))
# If you want to show the icon without recoloring it, set the icon color to None:
toast.setIconColor(None) # Default: #5C5C5C
```
> **AVAILABLE ICONS:** <br> `SUCCESS`, `WARNING`, `ERROR`, `INFORMATION`, `CLOSE`
* **Setting the icon size:**
```python
toast.setIconSize(QSize(14, 14)) # Default: QSize(18, 18)
```
* **Enabling or disabling the icon separator:**
```python
toast.setShowIconSeparator(False) # Default: True
```
* **Setting the close button alignment:**
```python
toast.setCloseButtonAlignment(ToastButtonAlignment.MIDDLE) # Default: ToastButtonAlignment.TOP
```
> **AVAILABLE ALIGNMENTS:** <br> `TOP`, `MIDDLE`, `BOTTOM`
* **Enabling or disabling the close button:**
```python
toast.setShowCloseButton(False) # Default: True
```
* **Customizing the duration of the fade animations (milliseconds):**
```python
toast.setFadeInDuration(100) # Default: 250
toast.setFadeOutDuration(150) # Default: 250
```
* **Enabling or disabling duration reset on hover:**
```python
toast.setResetDurationOnHover(False) # Default: True
```
* **Making the corners rounded:**
```python
toast.setBorderRadius(3) # Default: 0
```
* **Setting custom colors:**
```python
toast.setBackgroundColor(QColor('#292929')) # Default: #E7F4F9
toast.setTitleColor(QColor('#FFFFFF')) # Default: #000000
toast.setTextColor(QColor('#D0D0D0')) # Default: #5C5C5C
toast.setDurationBarColor(QColor('#3E9141')) # Default: #5C5C5C
toast.setIconColor(QColor('#3E9141')) # Default: #5C5C5C
toast.setIconSeparatorColor(QColor('#585858')) # Default: #D9D9D9
toast.setCloseButtonIconColor(QColor('#C9C9C9')) # Default: #000000
```
* **Setting custom fonts:**
```python
# Init font
font = QFont('Times', 10, QFont.Weight.Bold)
# Set fonts
toast.setTitleFont(font) # Default: QFont('Arial', 9, QFont.Weight.Bold)
toast.setTextFont(font) # Default: QFont('Arial', 9)
```
* **Applying a style preset:**
```python
toast.applyPreset(ToastPreset.ERROR)
```
> **AVAILABLE PRESETS:** <br> `SUCCESS`, `WARNING`, `ERROR`, `INFORMATION`, `SUCCESS_DARK`, `WARNING_DARK`, `ERROR_DARK`, `INFORMATION_DARK`
* **Setting toast size constraints:**
```python
# Minimum and maximum size
toast.setMinimumWidth(100)
toast.setMaximumWidth(350)
toast.setMinimumHeight(50)
toast.setMaximumHeight(120)
# Fixed size (not recommended)
toast.setFixedSize(QSize(350, 80))
```
**<br>Other customization options:**
| Option | Description | Default |
|-------------------------------|---------------------------------------------------------------------------------|----------------------------|
| `setFixedScreen()` | Fixed screen where the toasts will be shown (static) | `None` |
| `setMovePositionWithWidget()` | Whether the toasts should move with widget if positioned relative to a widget | `True` |
| `setIconSeparatorWidth()` | Width of the icon separator that separates the icon and text section | `2` |
| `setCloseButtonIcon()` | Icon of the close button | `ToastIcon.CLOSE` |
| `setCloseButtonIconSize()` | Size of the close button icon | `QSize(10, 10)` |
| `setCloseButtonSize()` | Size of the close button | `QSize(24, 24)` |
| `setStayOnTop()` | Whether the toast stays on top of other windows even when they are focused | `True` |
| `setTextSectionSpacing()` | Vertical spacing between the title and the text | `8` |
| `setMargins()` | Margins around the whole toast content | `QMargins(20, 18, 10, 18)` |
| `setIconMargins()` | Margins around the icon | `QMargins(0, 0, 15, 0)` |
| `setIconSectionMargins()` | Margins around the icon section (the area with the icon and the icon separator) | `QMargins(0, 0, 15, 0)` |
| `setTextSectionMargins()` | Margins around the text section (the area with the title and the text) | `QMargins(0, 0, 15, 0)` |
| `setCloseButtonMargins()` | Margins around the close button | `QMargins(0, -8, 0, -8)` |
## Demo
https://github.com/niklashenning/pyqt-toast/assets/58544929/f4d7f4a4-6d69-4087-ae19-da54b6da499d
The demos for PyQt5, PyQt6, and PySide6 can be found in the [demo](demo) folder.
## Tests
Installing the required test dependencies [PyQt6](https://pypi.org/project/PyQt6/), [pytest](https://github.com/pytest-dev/pytest), and [coveragepy](https://github.com/nedbat/coveragepy):
```
pip install PyQt6 pytest coverage
```
To run the tests with coverage, clone this repository, go into the main directory and run:
```
coverage run -m pytest
coverage report --ignore-errors -m
```
## License
This software is licensed under the [MIT license](https://github.com/niklashenning/pyqttoast/blob/master/LICENSE).

View File

@ -0,0 +1,11 @@
from __future__ import annotations
from .toast import Toast, ToastButtonAlignment, ToastIcon, ToastPosition, ToastPreset
__all__ = [
"Toast",
"ToastButtonAlignment",
"ToastIcon",
"ToastPosition",
"ToastPreset",
]

View File

@ -0,0 +1,41 @@
from __future__ import annotations
from PyQt5.QtGui import QColor
UPDATE_POSITION_DURATION = 200
DROP_SHADOW_SIZE = 5
SUCCESS_ACCENT_COLOR = QColor("#3E9141")
WARNING_ACCENT_COLOR = QColor("#E8B849")
ERROR_ACCENT_COLOR = QColor("#BA2626")
INFORMATION_ACCENT_COLOR = QColor("#007FFF")
DEFAULT_ACCENT_COLOR = QColor("#5C5C5C")
DEFAULT_BACKGROUND_COLOR = QColor("#E7F4F9")
DEFAULT_TITLE_COLOR = QColor("#000000")
DEFAULT_TEXT_COLOR = QColor("#5C5C5C")
DEFAULT_ICON_SEPARATOR_COLOR = QColor("#D9D9D9")
DEFAULT_CLOSE_BUTTON_ICON_COLOR = QColor("#000000")
DEFAULT_BACKGROUND_COLOR_DARK = QColor("#292929")
DEFAULT_TITLE_COLOR_DARK = QColor("#FFFFFF")
DEFAULT_TEXT_COLOR_DARK = QColor("#D0D0D0")
DEFAULT_ICON_SEPARATOR_COLOR_DARK = QColor("#585858")
DEFAULT_CLOSE_BUTTON_ICON_COLOR_DARK = QColor("#C9C9C9")
__all__ = [
"UPDATE_POSITION_DURATION",
"DROP_SHADOW_SIZE",
"SUCCESS_ACCENT_COLOR",
"WARNING_ACCENT_COLOR",
"ERROR_ACCENT_COLOR",
"INFORMATION_ACCENT_COLOR",
"DEFAULT_ACCENT_COLOR",
"DEFAULT_BACKGROUND_COLOR",
"DEFAULT_TITLE_COLOR",
"DEFAULT_TEXT_COLOR",
"DEFAULT_ICON_SEPARATOR_COLOR",
"DEFAULT_CLOSE_BUTTON_ICON_COLOR",
"DEFAULT_BACKGROUND_COLOR_DARK",
"DEFAULT_TITLE_COLOR_DARK",
"DEFAULT_TEXT_COLOR_DARK",
"DEFAULT_ICON_SEPARATOR_COLOR_DARK",
"DEFAULT_CLOSE_BUTTON_ICON_COLOR_DARK",
]

View File

@ -0,0 +1,5 @@
from __future__ import annotations
import importlib.resources
css_path = importlib.resources.files(__package__)

View File

@ -0,0 +1,24 @@
#drop-shadow-layer-1 {
background: rgba(0, 0, 0, 3);
border-radius: 8px;
}
#drop-shadow-layer-2 {
background: rgba(0, 0, 0, 5);
border-radius: 8px;
}
#drop-shadow-layer-3 {
background: rgba(0, 0, 0, 6);
border-radius: 8px;
}
#drop-shadow-layer-4 {
background: rgba(0, 0, 0, 9);
border-radius: 8px;
}
#drop-shadow-layer-5 {
background: rgba(0, 0, 0, 10);
border-radius: 8px;
}

View File

@ -0,0 +1,7 @@
#toast-close-button {
background: transparent;
}
#toast-icon-widget {
background: transparent;
}

View File

@ -0,0 +1,57 @@
from __future__ import annotations
from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import QWidget
from .css import css_path
class DropShadow(QWidget):
def __init__(self, parent: QWidget | None = None) -> None:
"""Create a new DropShadow instance
:param parent: the parent widget
"""
super().__init__(parent)
# Drawn manually since only one graphics effect can be applied
self.layer_1 = QWidget(self)
self.layer_1.setObjectName("drop-shadow-layer-1")
self.layer_2 = QWidget(self)
self.layer_2.setObjectName("drop-shadow-layer-2")
self.layer_3 = QWidget(self)
self.layer_3.setObjectName("drop-shadow-layer-3")
self.layer_4 = QWidget(self)
self.layer_4.setObjectName("drop-shadow-layer-4")
self.layer_5 = QWidget(self)
self.layer_5.setObjectName("drop-shadow-layer-5")
# Apply stylesheet
self.setStyleSheet((css_path / "drop_shadow.css").read_text(encoding="utf-8"))
def resize(self, size: QSize) -> None:
"""Resize the drop shadow widget
:param size: new size
"""
super().resize(size)
width = size.width()
height = size.height()
self.layer_1.resize(width, height)
self.layer_1.move(0, 0)
self.layer_2.resize(width - 2, height - 2)
self.layer_2.move(1, 1)
self.layer_3.resize(width - 4, height - 4)
self.layer_3.move(2, 2)
self.layer_4.resize(width - 6, height - 6)
self.layer_4.move(3, 3)
self.layer_5.resize(width - 8, height - 8)
self.layer_5.move(4, 4)

View File

@ -0,0 +1,7 @@
from __future__ import annotations
import os
def get_hook_dirs() -> list[str]:
return [os.path.dirname(__file__)]

View File

@ -0,0 +1,5 @@
from __future__ import annotations
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files("pyqttoast", excludes=["hooks"])

View File

@ -0,0 +1,55 @@
from __future__ import annotations
from PyQt5.QtGui import QColor, QImage, QPixmap, qRgba
from .icons import icon_path
from .toast_enums import ToastIcon
class IconUtils:
@staticmethod
def get_icon_from_enum(enum_icon: ToastIcon) -> QPixmap:
"""Get a QPixmap from a ToastIcon
:param enum_icon: ToastIcon
:return: pixmap of the ToastIcon
"""
image = QPixmap()
if enum_icon == ToastIcon.SUCCESS:
image.loadFromData((icon_path / "success.png").read_bytes())
elif enum_icon == ToastIcon.WARNING:
image.loadFromData((icon_path / "warning.png").read_bytes())
elif enum_icon == ToastIcon.ERROR:
image.loadFromData((icon_path / "error.png").read_bytes())
elif enum_icon == ToastIcon.INFORMATION:
image.loadFromData((icon_path / "information.png").read_bytes())
elif enum_icon == ToastIcon.CLOSE:
image.loadFromData((icon_path / "close.png").read_bytes())
return image
@staticmethod
def recolor_image(image: QImage, color: QColor | None) -> QImage:
"""Take an image and return a copy with the colors changed
:param image: image to recolor
:param color: new color (None if the image should not be recolored)
:return: recolored image
"""
# Leave image as is if color is None
if color is None:
return image
# Loop through every pixel
for x in range(0, image.width()):
for y in range(0, image.height()):
# Get current color of the pixel
current_color = image.pixelColor(x, y)
# Replace the rgb values with rgb of new color and keep alpha the same
new_color_r = color.red()
new_color_g = color.green()
new_color_b = color.blue()
new_color = QColor.fromRgba(qRgba(new_color_r, new_color_g, new_color_b, current_color.alpha()))
image.setPixelColor(x, y, new_color)
return image

View File

@ -0,0 +1,5 @@
from __future__ import annotations
import importlib.resources
icon_path = importlib.resources.files(__package__)

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
from __future__ import annotations
from enum import Enum
class ToastPreset(Enum):
SUCCESS = 1
WARNING = 2
ERROR = 3
INFORMATION = 4
SUCCESS_DARK = 5
WARNING_DARK = 6
ERROR_DARK = 7
INFORMATION_DARK = 8
class ToastIcon(Enum):
SUCCESS = 1
WARNING = 2
ERROR = 3
INFORMATION = 4
CLOSE = 5
class ToastPosition(Enum):
BOTTOM_LEFT = 1
BOTTOM_MIDDLE = 2
BOTTOM_RIGHT = 3
TOP_LEFT = 4
TOP_MIDDLE = 5
TOP_RIGHT = 6
CENTER = 7
class ToastButtonAlignment(Enum):
TOP = 1
MIDDLE = 2
BOTTOM = 3

View File

@ -9,6 +9,7 @@ import webbrowser
from collections.abc import Sequence
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QWidget
logger = logging.getLogger(__name__)
@ -122,6 +123,12 @@ if qt_available:
# And the move call repositions the window
window.move(hpos + main_window_size.left(), vpos + main_window_size.top())
def is_dark_mode() -> bool:
palette = QPalette()
text = palette.color(QPalette.WindowText)
window = palette.color(QPalette.Window)
return text.lightness() > window.lightness()
def get_qimage_from_data(image_data: bytes) -> QtGui.QImage:
img = QtGui.QImage()
success = img.loadFromData(image_data)

View File

@ -31,7 +31,7 @@
<number>0</number>
</property>
<item alignment="Qt::AlignTop">
<widget class="QWidget" name="imageContainer" native="true">
<widget class="QWidget" name="coverImageContainer" native="true">
<property name="sizePolicy">
<sizepolicy hsizetype="Preferred" vsizetype="Preferred">
<horstretch>0</horstretch>
@ -66,7 +66,7 @@
</widget>
</item>
<item>
<widget class="QLabel" name="lblSourceName">
<widget class="QLabel" name="lblIssuesSourceName">
<property name="sizePolicy">
<sizepolicy hsizetype="Preferred" vsizetype="Preferred">
<horstretch>0</horstretch>
@ -177,7 +177,7 @@
</property>
</column>
</widget>
<widget class="QTextEdit" name="teDetails">
<widget class="QTextEdit" name="teDescription">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Expanding">
<horstretch>0</horstretch>
@ -223,7 +223,7 @@
</widget>
</item>
<item>
<widget class="QCheckBox" name="cbxFilter">
<widget class="QCheckBox" name="cbxPublisherFilter">
<property name="toolTip">
<string>Filter the publishers based on the publisher filter.</string>
</property>

View File

@ -15,7 +15,7 @@ from __future__ import annotations
import logging
import pathlib
from typing import Any, Callable
from typing import Any, Callable, NamedTuple
import settngs
@ -25,6 +25,11 @@ from comictalker.talker_utils import fix_url
logger = logging.getLogger(__name__)
class RLCallBack(NamedTuple):
callback: Callable[[float, float], None]
interval: float
class TalkerError(Exception):
"""Base class exception for information sources.
@ -170,6 +175,8 @@ class ComicTalker:
refresh_cache: bool = False,
literal: bool = False,
series_match_thresh: int = 90,
*,
on_rate_limit: RLCallBack | None = None,
) -> list[ComicSeries]:
"""
This function should return a list of series that match the given series name
@ -191,7 +198,12 @@ class ComicTalker:
raise NotImplementedError
def fetch_comic_data(
self, issue_id: str | None = None, series_id: str | None = None, issue_number: str = ""
self,
issue_id: str | None = None,
series_id: str | None = None,
issue_number: str = "",
*,
on_rate_limit: RLCallBack | None = None,
) -> GenericMetadata:
"""
This function should return an instance of GenericMetadata for a single issue.
@ -210,19 +222,34 @@ class ComicTalker:
"""
raise NotImplementedError
def fetch_series(self, series_id: str) -> ComicSeries:
def fetch_series(
self,
series_id: str,
*,
on_rate_limit: RLCallBack | None = None,
) -> ComicSeries:
"""
This function should return an instance of ComicSeries from the given series ID.
Caching MUST be implemented on this function.
"""
raise NotImplementedError
def fetch_issues_in_series(self, series_id: str) -> list[GenericMetadata]:
def fetch_issues_in_series(
self,
series_id: str,
*,
on_rate_limit: RLCallBack | None = None,
) -> list[GenericMetadata]:
"""Expected to return a list of issues with a given series ID"""
raise NotImplementedError
def fetch_issues_by_series_issue_num_and_year(
self, series_id_list: list[str], issue_number: str, year: int | None
self,
series_id_list: list[str],
issue_number: str,
year: int | None,
*,
on_rate_limit: RLCallBack | None = None,
) -> list[GenericMetadata]:
"""
This function should return a single issue for each series id in

View File

@ -27,7 +27,6 @@ from typing import Any, Callable, Generic, TypeVar, cast
from urllib.parse import parse_qsl, urljoin
import settngs
from pyrate_limiter import Limiter, RequestRate
from typing_extensions import Required, TypedDict
from comicapi import utils
@ -36,7 +35,8 @@ from comicapi.issuestring import IssueString
from comicapi.utils import LocationParseError, parse_url
from comictalker import talker_utils
from comictalker.comiccacher import ComicCacher, Issue, Series
from comictalker.comictalker import ComicTalker, TalkerDataError, TalkerNetworkError
from comictalker.comictalker import ComicTalker, RLCallBack, TalkerDataError, TalkerNetworkError
from comictalker.vendor.pyrate_limiter import Limiter, RequestRate
try:
import niquests as requests
@ -101,7 +101,7 @@ class CVSeries(TypedDict, total=False):
description: str
id: Required[int]
image: CVImage
name: str
name: Required[str]
publisher: CVPublisher
start_year: str
resource_type: str
@ -270,6 +270,8 @@ class ComicVineTalker(ComicTalker):
refresh_cache: bool = False,
literal: bool = False,
series_match_thresh: int = 90,
*,
on_rate_limit: RLCallBack | None = None,
) -> list[ComicSeries]:
# Sanitize the series name for comicvine searching, comicvine search ignore symbols
search_series_name = utils.sanitize_title(series_name, basic=literal)
@ -301,7 +303,11 @@ class ComicVineTalker(ComicTalker):
"limit": 100,
}
cv_response: CVResult[list[CVSeries]] = self._get_cv_content(urljoin(self.api_url, "search"), params)
cv_response: CVResult[list[CVSeries]] = self._get_cv_content(
urljoin(self.api_url, "search"),
params,
on_rate_limit=on_rate_limit,
)
search_results: list[CVSeries] = []
@ -346,7 +352,11 @@ class ComicVineTalker(ComicTalker):
page += 1
params["page"] = page
cv_response = self._get_cv_content(urljoin(self.api_url, "search"), params)
cv_response = self._get_cv_content(
urljoin(self.api_url, "search"),
params,
on_rate_limit=on_rate_limit,
)
search_results.extend(cv_response["results"])
current_result_count += cv_response["number_of_page_results"]
@ -369,24 +379,56 @@ class ComicVineTalker(ComicTalker):
return formatted_search_results
def fetch_comic_data(
self, issue_id: str | None = None, series_id: str | None = None, issue_number: str = ""
self,
issue_id: str | None = None,
series_id: str | None = None,
issue_number: str = "",
on_rate_limit: RLCallBack | None = None,
) -> GenericMetadata:
comic_data = GenericMetadata()
if issue_id:
comic_data = self._fetch_issue_data_by_issue_id(issue_id)
comic_data = self._fetch_issue_data_by_issue_id(
issue_id,
on_rate_limit=on_rate_limit,
)
elif issue_number and series_id:
comic_data = self._fetch_issue_data(int(series_id), issue_number)
comic_data = self._fetch_issue_data(
int(series_id),
issue_number,
on_rate_limit=on_rate_limit,
)
return comic_data
def fetch_series(self, series_id: str) -> ComicSeries:
return self._fetch_series_data(int(series_id))[0]
def fetch_series(
self,
series_id: str,
on_rate_limit: RLCallBack | None = None,
) -> ComicSeries:
return self._fetch_series_data(
int(series_id),
on_rate_limit=on_rate_limit,
)[0]
def fetch_issues_in_series(self, series_id: str) -> list[GenericMetadata]:
return [x[0] for x in self._fetch_issues_in_series(series_id)]
def fetch_issues_in_series(
self,
series_id: str,
on_rate_limit: RLCallBack | None = None,
) -> list[GenericMetadata]:
return [
x[0]
for x in self._fetch_issues_in_series(
series_id,
on_rate_limit=on_rate_limit,
)
]
def fetch_issues_by_series_issue_num_and_year(
self, series_id_list: list[str], issue_number: str, year: str | int | None
self,
series_id_list: list[str],
issue_number: str,
year: str | int | None,
on_rate_limit: RLCallBack | None = None,
) -> list[GenericMetadata]:
logger.debug("Fetching comics by series ids: %s and number: %s", series_id_list, issue_number)
# before we search online, look in our cache, since we might already have this info
@ -408,7 +450,12 @@ class ComicVineTalker(ComicTalker):
cached_results.append(
self._map_comic_issue_to_metadata(
cvissue,
self._fetch_series([int(cvissue["volume"]["id"])])[0][0],
self._fetch_series(
[int(cvissue["volume"]["id"])],
on_rate_limit=on_rate_limit,
)[
0
][0],
),
)
issue_found = True
@ -440,7 +487,11 @@ class ComicVineTalker(ComicTalker):
"filter": flt,
}
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(urljoin(self.api_url, "issues/"), params)
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(
urljoin(self.api_url, "issues/"),
params,
on_rate_limit=on_rate_limit,
)
current_result_count = cv_response["number_of_page_results"]
total_result_count = cv_response["number_of_total_results"]
@ -455,7 +506,11 @@ class ComicVineTalker(ComicTalker):
offset += cv_response["number_of_page_results"]
params["offset"] = offset
cv_response = self._get_cv_content(urljoin(self.api_url, "issues/"), params)
cv_response = self._get_cv_content(
urljoin(self.api_url, "issues/"),
params,
on_rate_limit=on_rate_limit,
)
filtered_issues_result.extend(cv_response["results"])
current_result_count += cv_response["number_of_page_results"]
@ -470,14 +525,25 @@ class ComicVineTalker(ComicTalker):
)
formatted_filtered_issues_result = [
self._map_comic_issue_to_metadata(x, self._fetch_series_data(x["volume"]["id"])[0])
self._map_comic_issue_to_metadata(
x,
self._fetch_series_data(
x["volume"]["id"],
on_rate_limit=on_rate_limit,
)[0],
)
for x in filtered_issues_result
]
formatted_filtered_issues_result.extend(cached_results)
return formatted_filtered_issues_result
def fetch_comics(self, *, issue_ids: list[str]) -> list[GenericMetadata]:
def fetch_comics(
self,
*,
issue_ids: list[str],
on_rate_limit: RLCallBack | None = None,
) -> list[GenericMetadata]:
logger.debug("Fetching comic IDs: %s", issue_ids)
# before we search online, look in our cache, since we might already have this info
cvc = ComicCacher(self.cache_folder, self.version)
@ -490,7 +556,12 @@ class ComicVineTalker(ComicTalker):
cached_results.append(
self._map_comic_issue_to_metadata(
json.loads(cached_issue[0].data),
self._fetch_series([int(cached_issue[0].series_id)])[0][0],
self._fetch_series(
[int(cached_issue[0].series_id)],
on_rate_limit=on_rate_limit,
)[
0
][0],
),
)
else:
@ -511,7 +582,11 @@ class ComicVineTalker(ComicTalker):
"format": "json",
"filter": flt,
}
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(issue_url, params)
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(
issue_url,
params,
on_rate_limit=on_rate_limit,
)
issue_results = cv_response["results"]
page = 1
@ -525,12 +600,22 @@ class ComicVineTalker(ComicTalker):
offset += cv_response["number_of_page_results"]
params["offset"] = offset
cv_response = self._get_cv_content(issue_url, params)
cv_response = self._get_cv_content(
issue_url,
params,
on_rate_limit=on_rate_limit,
)
issue_results.extend(cv_response["results"])
current_result_count += cv_response["number_of_page_results"]
series_info = {s[0].id: s[0] for s in self._fetch_series([int(i["volume"]["id"]) for i in issue_results])}
series_info = {
s[0].id: s[0]
for s in self._fetch_series(
[int(i["volume"]["id"]) for i in issue_results],
on_rate_limit=on_rate_limit,
)
}
for issue in issue_results:
cvc.add_issues_info(
@ -550,7 +635,11 @@ class ComicVineTalker(ComicTalker):
return cached_results
def _fetch_series(self, series_ids: list[int]) -> list[tuple[ComicSeries, bool]]:
def _fetch_series(
self,
series_ids: list[int],
on_rate_limit: RLCallBack | None,
) -> list[tuple[ComicSeries, bool]]:
# before we search online, look in our cache, since we might already have this info
cvc = ComicCacher(self.cache_folder, self.version)
cached_results: list[tuple[ComicSeries, bool]] = []
@ -576,7 +665,11 @@ class ComicVineTalker(ComicTalker):
"format": "json",
"filter": flt,
}
cv_response: CVResult[list[CVSeries]] = self._get_cv_content(series_url, params)
cv_response: CVResult[list[CVSeries]] = self._get_cv_content(
series_url,
params,
on_rate_limit=on_rate_limit,
)
series_results = cv_response["results"]
page = 1
@ -590,7 +683,11 @@ class ComicVineTalker(ComicTalker):
offset += cv_response["number_of_page_results"]
params["offset"] = offset
cv_response = self._get_cv_content(series_url, params)
cv_response = self._get_cv_content(
series_url,
params,
on_rate_limit=on_rate_limit,
)
series_results.extend(cv_response["results"])
current_result_count += cv_response["number_of_page_results"]
@ -606,14 +703,24 @@ class ComicVineTalker(ComicTalker):
return cached_results
def _get_cv_content(self, url: str, params: dict[str, Any]) -> CVResult[T]:
def _get_cv_content(
self,
url: str,
params: dict[str, Any],
*,
on_rate_limit: RLCallBack | None,
) -> CVResult[T]:
"""
Get the content from the CV server.
"""
ratelimit_key = url
if self.api_key == self.default_api_key:
ratelimit_key = "cv"
with self.limiter.ratelimit(ratelimit_key, delay=True):
with self.limiter.ratelimit(
ratelimit_key,
delay=True,
on_rate_limit=on_rate_limit,
):
cv_response: CVResult[T] = self._get_url_content(url, params)
if cv_response["status_code"] != 1:
@ -644,7 +751,7 @@ class ComicVineTalker(ComicTalker):
logger.debug(str(resp.status_code))
elif resp.status_code in (requests.status_codes.codes.TOO_MANY_REQUESTS, TWITTER_TOO_MANY_REQUESTS):
logger.info(f"{self.name} rate limit encountered. Waiting for 10 seconds\n")
logger.info(f"{self.name} rate limit encountered. Waiting 10 seconds\n")
self._log_total_requests()
time.sleep(10)
limit_counter += 1
@ -709,13 +816,20 @@ class ComicVineTalker(ComicTalker):
format=None,
)
def _fetch_issues_in_series(self, series_id: str) -> list[tuple[GenericMetadata, bool]]:
def _fetch_issues_in_series(
self,
series_id: str,
on_rate_limit: RLCallBack | None,
) -> list[tuple[GenericMetadata, bool]]:
logger.debug("Fetching all issues in series: %s", series_id)
# before we search online, look in our cache, since we might already have this info
cvc = ComicCacher(self.cache_folder, self.version)
cached_results = cvc.get_series_issues_info(series_id, self.id)
series = self._fetch_series_data(int(series_id))[0]
series = self._fetch_series_data(
int(series_id),
on_rate_limit=on_rate_limit,
)[0]
logger.debug(
"Found %d issues cached need %d issues",
@ -731,7 +845,11 @@ class ComicVineTalker(ComicTalker):
"format": "json",
"offset": 0,
}
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(urljoin(self.api_url, "issues/"), params)
cv_response: CVResult[list[CVIssue]] = self._get_cv_content(
urljoin(self.api_url, "issues/"),
params,
on_rate_limit=on_rate_limit,
)
current_result_count = cv_response["number_of_page_results"]
total_result_count = cv_response["number_of_total_results"]
@ -746,13 +864,23 @@ class ComicVineTalker(ComicTalker):
offset += cv_response["number_of_page_results"]
params["offset"] = offset
cv_response = self._get_cv_content(urljoin(self.api_url, "issues/"), params)
cv_response = self._get_cv_content(
urljoin(self.api_url, "issues/"),
params,
on_rate_limit=on_rate_limit,
)
series_issues_result.extend(cv_response["results"])
current_result_count += cv_response["number_of_page_results"]
# Format to expected output
formatted_series_issues_result = [
self._map_comic_issue_to_metadata(x, self._fetch_series_data(x["volume"]["id"])[0])
self._map_comic_issue_to_metadata(
x,
self._fetch_series_data(
x["volume"]["id"],
on_rate_limit=on_rate_limit,
)[0],
)
for x in series_issues_result
]
@ -766,7 +894,11 @@ class ComicVineTalker(ComicTalker):
)
return [(x, False) for x in formatted_series_issues_result]
def _fetch_series_data(self, series_id: int) -> tuple[ComicSeries, bool]:
def _fetch_series_data(
self,
series_id: int,
on_rate_limit: RLCallBack | None,
) -> tuple[ComicSeries, bool]:
logger.debug("Fetching series info: %s", series_id)
# before we search online, look in our cache, since we might already have this info
cvc = ComicCacher(self.cache_folder, self.version)
@ -782,7 +914,11 @@ class ComicVineTalker(ComicTalker):
"api_key": self.api_key,
"format": "json",
}
cv_response: CVResult[CVSeries] = self._get_cv_content(series_url, params)
cv_response: CVResult[CVSeries] = self._get_cv_content(
series_url,
params,
on_rate_limit=on_rate_limit,
)
series_results = cv_response["results"]
@ -793,9 +929,17 @@ class ComicVineTalker(ComicTalker):
return self._format_series(series_results), True
def _fetch_issue_data(self, series_id: int, issue_number: str) -> GenericMetadata:
def _fetch_issue_data(
self,
series_id: int,
issue_number: str,
on_rate_limit: RLCallBack | None,
) -> GenericMetadata:
logger.debug("Fetching issue by series ID: %s and issue number: %s", series_id, issue_number)
issues_list_results = self._fetch_issues_in_series(str(series_id))
issues_list_results = self._fetch_issues_in_series(
str(series_id),
on_rate_limit=on_rate_limit,
)
# Loop through issue list to find the required issue info
f_record = (GenericMetadata(), False)
@ -811,10 +955,17 @@ class ComicVineTalker(ComicTalker):
return f_record[0]
if f_record[0].issue_id is not None:
return self._fetch_issue_data_by_issue_id(f_record[0].issue_id)
return self._fetch_issue_data_by_issue_id(
f_record[0].issue_id,
on_rate_limit=on_rate_limit,
)
return GenericMetadata()
def _fetch_issue_data_by_issue_id(self, issue_id: str) -> GenericMetadata:
def _fetch_issue_data_by_issue_id(
self,
issue_id: str,
on_rate_limit: RLCallBack | None,
) -> GenericMetadata:
logger.debug("Fetching issue by issue ID: %s", issue_id)
# before we search online, look in our cache, since we might already have this info
cvc = ComicCacher(self.cache_folder, self.version)
@ -823,12 +974,20 @@ class ComicVineTalker(ComicTalker):
logger.debug("Issue cached: %s", bool(cached_issue and cached_issue[1]))
if cached_issue and cached_issue[1]:
return self._map_comic_issue_to_metadata(
json.loads(cached_issue[0].data), self._fetch_series_data(int(cached_issue[0].series_id))[0]
json.loads(cached_issue[0].data),
self._fetch_series_data(
int(cached_issue[0].series_id),
on_rate_limit=on_rate_limit,
)[0],
)
issue_url = urljoin(self.api_url, f"issue/{CVTypeID.Issue}-{issue_id}")
params = {"api_key": self.api_key, "format": "json"}
cv_response: CVResult[CVIssue] = self._get_cv_content(issue_url, params)
cv_response: CVResult[CVIssue] = self._get_cv_content(
issue_url,
params,
on_rate_limit=on_rate_limit,
)
issue_results = cv_response["results"]
@ -846,7 +1005,11 @@ class ComicVineTalker(ComicTalker):
# Now, map the GenericMetadata data to generic metadata
return self._map_comic_issue_to_metadata(
issue_results, self._fetch_series_data(int(issue_results["volume"]["id"]))[0]
issue_results,
self._fetch_series_data(
int(issue_results["volume"]["id"]),
on_rate_limit=on_rate_limit,
)[0],
)
def _map_comic_issue_to_metadata(self, issue: CVIssue, series: ComicSeries) -> GenericMetadata:

1
comictalker/vendor/__init__.py vendored Normal file
View File

@ -0,0 +1 @@
from __future__ import annotations

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 vutran1710
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,402 @@
<img align="left" width="95" height="120" src="docs/_static/logo.png">
# PyrateLimiter
The request rate limiter using Leaky-bucket algorithm.
Full project documentation can be found at [pyratelimiter.readthedocs.io](https://pyratelimiter.readthedocs.io).
[![PyPI version](https://badge.fury.io/py/pyrate-limiter.svg)](https://badge.fury.io/py/pyrate-limiter)
[![PyPI - Python Versions](https://img.shields.io/pypi/pyversions/pyrate-limiter)](https://pypi.org/project/pyrate-limiter)
[![codecov](https://codecov.io/gh/vutran1710/PyrateLimiter/branch/master/graph/badge.svg?token=E0Q0YBSINS)](https://codecov.io/gh/vutran1710/PyrateLimiter)
[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://github.com/vutran1710/PyrateLimiter/graphs/commit-activity)
[![PyPI license](https://img.shields.io/pypi/l/ansicolortags.svg)](https://pypi.python.org/pypi/pyrate-limiter/)
<br>
## Contents
- [PyrateLimiter](#pyratelimiter)
- [Contents](#contents)
- [Features](#features)
- [Installation](#installation)
- [Basic usage](#basic-usage)
- [Defining rate limits](#defining-rate-limits)
- [Applying rate limits](#applying-rate-limits)
- [Identities](#identities)
- [Handling exceeded limits](#handling-exceeded-limits)
- [Bucket analogy](#bucket-analogy)
- [Rate limit exceptions](#rate-limit-exceptions)
- [Rate limit delays](#rate-limit-delays)
- [Additional usage options](#additional-usage-options)
- [Decorator](#decorator)
- [Contextmanager](#contextmanager)
- [Async decorator/contextmanager](#async-decoratorcontextmanager)
- [Backends](#backends)
- [Memory](#memory)
- [SQLite](#sqlite)
- [Redis](#redis)
- [Custom backends](#custom-backends)
- [Additional features](#additional-features)
- [Time sources](#time-sources)
- [Examples](#examples)
## Features
* Tracks any number of rate limits and intervals you want to define
* Independently tracks rate limits for multiple services or resources
* Handles exceeded rate limits by either raising errors or adding delays
* Several usage options including a normal function call, a decorator, or a contextmanager
* Async support
* Includes optional SQLite and Redis backends, which can be used to persist limit tracking across
multiple threads, processes, or application restarts
## Installation
Install using pip:
```
pip install pyrate-limiter
```
Or using conda:
```
conda install --channel conda-forge pyrate-limiter
```
## Basic usage
### Defining rate limits
Consider some public API (like LinkedIn, GitHub, etc.) that has rate limits like the following:
```
- 500 requests per hour
- 1000 requests per day
- 10000 requests per month
```
You can define these rates using the `RequestRate` class, and add them to a `Limiter`:
``` python
from pyrate_limiter import Duration, RequestRate, Limiter
hourly_rate = RequestRate(500, Duration.HOUR) # 500 requests per hour
daily_rate = RequestRate(1000, Duration.DAY) # 1000 requests per day
monthly_rate = RequestRate(10000, Duration.MONTH) # 10000 requests per month
limiter = Limiter(hourly_rate, daily_rate, monthly_rate)
```
or
``` python
from pyrate_limiter import Duration, RequestRate, Limiter
rate_limits = (
RequestRate(500, Duration.HOUR), # 500 requests per hour
RequestRate(1000, Duration.DAY), # 1000 requests per day
RequestRate(10000, Duration.MONTH), # 10000 requests per month
)
limiter = Limiter(*rate_limits)
```
Note that these rates need to be ordered by interval length; in other words, an hourly rate must
come before a daily rate, etc.
### Applying rate limits
Then, use `Limiter.try_acquire()` wherever you are making requests (or other rate-limited operations).
This will raise an exception if the rate limit is exceeded.
```python
import requests
def request_function():
limiter.try_acquire('identity')
requests.get('https://example.com')
while True:
request_function()
```
Alternatively, you can use `Limiter.ratelimit()` as a function decorator:
```python
@limiter.ratelimit('identity')
def request_function():
requests.get('https://example.com')
```
See [Additional usage options](#additional-usage-options) below for more details.
### Identities
Note that both `try_acquire()` and `ratelimit()` take one or more `identity` arguments. Typically this is
the name of the service or resource that is being rate-limited. This allows you to track rate limits
for these resources independently. For example, if you have a service that is rate-limited by user:
```python
def request_function(user_ids):
limiter.try_acquire(*user_ids)
for user_id in user_ids:
requests.get(f'https://example.com?user_id={user_id}')
```
## Handling exceeded limits
When a rate limit is exceeded, you have two options: raise an exception, or add delays.
### Bucket analogy
<img height="300" align="right" src="https://upload.wikimedia.org/wikipedia/commons/c/c4/Leaky_bucket_analogy.JPG">
At this point it's useful to introduce the analogy of "buckets" used for rate-limiting. Here is a
quick summary:
* This library implements the [Leaky Bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket).
* It is named after the idea of representing some kind of fixed capacity -- like a network or service -- as a bucket.
* The bucket "leaks" at a constant rate. For web services, this represents the **ideal or permitted request rate**.
* The bucket is "filled" at an intermittent, unpredicatble rate, representing the **actual rate of requests**.
* When the bucket is "full", it will overflow, representing **canceled or delayed requests**.
### Rate limit exceptions
By default, a `BucketFullException` will be raised when a rate limit is exceeded.
The error contains a `meta_info` attribute with the following information:
* `identity`: The identity it received
* `rate`: The specific rate that has been exceeded
* `remaining_time`: The remaining time until the next request can be sent
Here's an example that will raise an exception on the 4th request:
```python
from pyrate_limiter import (Duration, RequestRate,
Limiter, BucketFullException)
rate = RequestRate(3, Duration.SECOND)
limiter = Limiter(rate)
for _ in range(4):
try:
limiter.try_acquire('vutran')
except BucketFullException as err:
print(err)
# Output: Bucket for vutran with Rate 3/1 is already full
print(err.meta_info)
# Output: {'identity': 'vutran', 'rate': '3/1', 'remaining_time': 2.9,
# 'error': 'Bucket for vutran with Rate 3/1 is already full'}
```
The rate part of the output is constructed as: `limit / interval`. On the above example, the limit
is 3 and the interval is 1, hence the `Rate 3/1`.
### Rate limit delays
You may want to simply slow down your requests to stay within the rate limits instead of canceling
them. In that case you can use the `delay` argument. Note that this is only available for
`Limiter.ratelimit()`:
```python
@limiter.ratelimit('identity', delay=True)
def my_function():
do_stuff()
```
If you exceed a rate limit with a long interval (daily, monthly, etc.), you may not want to delay
that long. In this case, you can set a `max_delay` (in seconds) that you are willing to wait in
between calls:
```python
@limiter.ratelimit('identity', delay=True, max_delay=360)
def my_function():
do_stuff()
```
In this case, calls may be delayed by at most 360 seconds to stay within the rate limits; any longer
than that, and a `BucketFullException` will be raised instead. Without specifying `max_delay`, calls
will be delayed as long as necessary.
## Additional usage options
Besides `Limiter.try_acquire()`, some additional usage options are available using `Limiter.ratelimit()`:
### Decorator
`Limiter.ratelimit()` can be used as a decorator:
```python
@limiter.ratelimit('identity')
def my_function():
do_stuff()
```
As with `Limiter.try_acquire()`, if calls to the wrapped function exceed the rate limits you
defined, a `BucketFullException` will be raised.
### Contextmanager
`Limiter.ratelimit()` also works as a contextmanager:
```python
def my_function():
with limiter.ratelimit('identity', delay=True):
do_stuff()
```
### Async decorator/contextmanager
`Limiter.ratelimit()` also support async functions, either as a decorator or contextmanager:
```python
@limiter.ratelimit('identity', delay=True)
async def my_function():
await do_stuff()
async def my_function():
async with limiter.ratelimit('identity'):
await do_stuff()
```
When delays are enabled for an async function, `asyncio.sleep()` will be used instead of `time.sleep()`.
## Backends
A few different bucket backends are available, which can be selected using the `bucket_class`
argument for `Limiter`. Any additional backend-specific arguments can be passed
via `bucket_kwargs`.
### Memory
The default bucket is stored in memory, backed by a `queue.Queue`. A list implementation is also available:
```python
from pyrate_limiter import Limiter, MemoryListBucket
limiter = Limiter(bucket_class=MemoryListBucket)
```
### SQLite
If you need to persist the bucket state, a SQLite backend is available.
By default it will store the state in the system temp directory, and you can use
the `path` argument to use a different location:
```python
from pyrate_limiter import Limiter, SQLiteBucket
limiter = Limiter(bucket_class=SQLiteBucket)
```
By default, the database will be stored in the system temp directory. You can specify a different
path via `bucket_kwargs`:
```python
limiter = Limiter(
bucket_class=SQLiteBucket,
bucket_kwargs={'path': '/path/to/db.sqlite'},
)
```
#### Concurrency
This backend is thread-safe.
If you want to use SQLite with multiprocessing, some additional protections are needed. For
these cases, a separate `FileLockSQLiteBucket` class is available. This requires installing the
[py-filelock](https://py-filelock.readthedocs.io) library.
```python
limiter = Limiter(bucket_class=FileLockSQLiteBucket)
```
### Redis
If you have a larger, distributed application, Redis is an ideal backend. This
option requires [redis-py](https://github.com/andymccurdy/redis-py).
Note that this backend requires a `bucket_name` argument, which will be used as a prefix for the
Redis keys created. This can be used to disambiguate between multiple services using the same Redis
instance with pyrate-limiter.
**Important**: you might want to consider adding `expire_time` for each buckets. In a scenario where some `identity` produces a request rate that is too sparsed, it is a good practice to expire the bucket which holds such identity's info to save memory.
```python
from pyrate_limiter import Limiter, RedisBucket, Duration, RequestRate
rates = [
RequestRate(5, 10 * Duration.SECOND),
RequestRate(8, 20 * Duration.SECOND),
]
limiter = Limiter(
*rates
bucket_class=RedisBucket,
bucket_kwargs={
'bucket_name':
'my_service',
'expire_time': rates[-1].interval,
},
)
```
#### Connection settings
If you need to pass additional connection settings, you can use the `redis_pool` bucket argument:
```python
from redis import ConnectionPool
redis_pool = ConnectionPool(host='localhost', port=6379, db=0)
rate = RequestRate(5, 10 * Duration.SECOND)
limiter = Limiter(
rate,
bucket_class=RedisBucket,
bucket_kwargs={'redis_pool': redis_pool, 'bucket_name': 'my_service'},
)
```
#### Redis clusters
Redis clusters are also supported, which requires
[redis-py-cluster](https://github.com/Grokzen/redis-py-cluster):
```python
from pyrate_limiter import Limiter, RedisClusterBucket
limiter = Limiter(bucket_class=RedisClusterBucket)
```
### Custom backends
If these don't suit your needs, you can also create your own bucket backend by extending `pyrate_limiter.bucket.AbstractBucket`.
## Additional features
### Time sources
By default, monotonic time is used, to ensure requests are always logged in the correct order.
You can specify a custom time source with the `time_function` argument. For example, you may want to
use the current UTC time for consistency across a distributed application using a Redis backend.
```python
from datetime import datetime
from pyrate_limiter import Duration, Limiter, RequestRate
rate = RequestRate(5, Duration.SECOND)
limiter_datetime = Limiter(rate, time_function=lambda: datetime.utcnow().timestamp())
```
Or simply use the basic `time.time()` function:
```python
from time import time
rate = RequestRate(5, Duration.SECOND)
limiter_time = Limiter(rate, time_function=time)
```
## Examples
To prove that pyrate-limiter is working as expected, here is a complete example to demonstrate
rate-limiting with delays:
```python
from time import perf_counter as time
from pyrate_limiter import Duration, Limiter, RequestRate
limiter = Limiter(RequestRate(5, Duration.SECOND))
n_requests = 27
@limiter.ratelimit("test", delay=True)
def limited_function(start_time):
print(f"t + {(time() - start_time):.5f}")
start_time = time()
for _ in range(n_requests):
limited_function(start_time)
print(f"Ran {n_requests} requests in {time() - start_time:.5f} seconds")
```
And an equivalent example for async usage:
```python
import asyncio
from time import perf_counter as time
from pyrate_limiter import Duration, Limiter, RequestRate
limiter = Limiter(RequestRate(5, Duration.SECOND))
n_requests = 27
@limiter.ratelimit("test", delay=True)
async def limited_function(start_time):
print(f"t + {(time() - start_time):.5f}")
async def test_ratelimit():
start_time = time()
tasks = [limited_function(start_time) for _ in range(n_requests)]
await asyncio.gather(*tasks)
print(f"Ran {n_requests} requests in {time() - start_time:.5f} seconds")
asyncio.run(test_ratelimit())
```

View File

@ -0,0 +1,10 @@
# flake8: noqa
"""PyrateLimiter
"""
from __future__ import annotations
from .bucket import *
from .constants import *
from .exceptions import *
from .limiter import *
from .request_rate import *

View File

@ -0,0 +1,134 @@
""" Implement this class to create
a workable bucket for Limiter to use
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from queue import Queue
from threading import RLock
class AbstractBucket(ABC):
"""Base bucket interface"""
def __init__(self, maxsize: int = 0, **_kwargs):
self._maxsize = maxsize
def maxsize(self) -> int:
"""Return the maximum size of the bucket,
ie the maximum number of item this bucket can hold
"""
return self._maxsize
@abstractmethod
def size(self) -> int:
"""Return the current size of the bucket,
ie the count of all items currently in the bucket
"""
@abstractmethod
def put(self, item: float) -> int:
"""Put an item (typically the current time) in the bucket
Return 1 if successful, else 0
"""
@abstractmethod
def get(self, number: int) -> int:
"""Get items, remove them from the bucket in the FIFO order, and return the number of items
that have been removed
"""
@abstractmethod
def all_items(self) -> list[float]:
"""Return a list as copies of all items in the bucket"""
@abstractmethod
def flush(self) -> None:
"""Flush/reset bucket"""
def inspect_expired_items(self, time: float) -> tuple[int, float]:
"""Find how many items in bucket that have slipped out of the time-window
Returns:
The number of unexpired items, and the time until the next item will expire
"""
volume = self.size()
item_count, remaining_time = 0, 0.0
for log_idx, log_item in enumerate(self.all_items()):
if log_item > time:
item_count = volume - log_idx
remaining_time = round(log_item - time, 3)
break
return item_count, remaining_time
def lock_acquire(self):
"""Acquire a lock prior to beginning a new transaction, if needed"""
def lock_release(self):
"""Release lock following a transaction, if needed"""
class MemoryQueueBucket(AbstractBucket):
"""A bucket that resides in memory using python's built-in Queue class"""
def __init__(self, maxsize: int = 0, **_kwargs):
super().__init__()
self._q: Queue = Queue(maxsize=maxsize)
def size(self) -> int:
return self._q.qsize()
def put(self, item: float):
return self._q.put(item)
def get(self, number: int) -> int:
counter = 0
for _ in range(number):
self._q.get()
counter += 1
return counter
def all_items(self) -> list[float]:
return list(self._q.queue)
def flush(self):
while not self._q.empty():
self._q.get()
class MemoryListBucket(AbstractBucket):
"""A bucket that resides in memory using python's List"""
def __init__(self, maxsize: int = 0, **_kwargs):
super().__init__(maxsize=maxsize)
self._q: list[float] = []
self._lock = RLock()
def size(self) -> int:
return len(self._q)
def put(self, item: float):
with self._lock:
if self.size() < self.maxsize():
self._q.append(item)
return 1
return 0
def get(self, number: int) -> int:
with self._lock:
counter = 0
for _ in range(number):
self._q.pop(0)
counter += 1
return counter
def all_items(self) -> list[float]:
return self._q.copy()
def flush(self):
self._q = list()

View File

@ -0,0 +1,9 @@
from __future__ import annotations
class Duration:
SECOND = 1
MINUTE = 60
HOUR = 3600
DAY = 3600 * 24
MONTH = 3600 * 24 * 30

View File

@ -0,0 +1,32 @@
# pylint: disable=C0114,C0115
from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from .request_rate import RequestRate
class BucketFullException(Exception):
def __init__(self, identity: str, rate: RequestRate, remaining_time: float):
error = f"Bucket for {identity} with Rate {rate} is already full"
self.meta_info: dict[str, str | float] = {
"error": error,
"identity": identity,
"rate": str(rate),
"remaining_time": remaining_time,
}
super().__init__(error)
class InvalidParams(Exception):
def __init__(self, param_name: str):
self.message = f"Parameters missing or invalid:{param_name}"
super().__init__(self.message)
class ImmutableClassProperty(Exception):
def __init__(self, class_instance: Any, prop: str):
"""Mutating class property is forbidden"""
self.message = f"{class_instance}.{prop} must not be mutated"
super().__init__(self.message)

View File

@ -0,0 +1,132 @@
from __future__ import annotations
import asyncio
from functools import partial, wraps
from inspect import iscoroutinefunction
from logging import getLogger
from time import sleep
from typing import TYPE_CHECKING
from comictalker.comictalker import RLCallBack
from .exceptions import BucketFullException
logger = getLogger("pyrate_limiter")
if TYPE_CHECKING:
from .limiter import Limiter
class LimitContextDecorator:
"""A class that can be used as a:
* decorator
* async decorator
* contextmanager
* async contextmanager
Intended to be used via :py:meth:`.Limiter.ratelimit`. Depending on arguments, calls that exceed
the rate limit will either raise an exception, or sleep until space is available in the bucket.
Args:
limiter: Limiter object
identities: Bucket identities
delay: Delay until the next request instead of raising an exception
max_delay: The maximum allowed delay time (in seconds); anything over this will raise
an exception
"""
def __init__(
self,
limiter: Limiter,
*identities: str,
delay: bool = False,
max_delay: int | float | None = None,
on_rate_limit: RLCallBack | None = None,
):
self.delay = delay
self.max_delay = max_delay or 0
self.try_acquire = partial(limiter.try_acquire, *identities)
self.on_rate_limit = on_rate_limit
def __call__(self, func):
"""Allows usage as a decorator for both normal and async functions"""
@wraps(func)
def wrapper(*args, **kwargs):
self.delayed_acquire()
return func(*args, **kwargs)
@wraps(func)
async def async_wrapper(*args, **kwargs):
await self.async_delayed_acquire()
return await func(*args, **kwargs)
# Return either an async or normal wrapper, depending on the type of the wrapped function
return async_wrapper if iscoroutinefunction(func) else wrapper
def __enter__(self):
"""Allows usage as a contextmanager"""
self.delayed_acquire()
def __exit__(self, *exc):
pass
async def __aenter__(self):
"""Allows usage as an async contextmanager"""
await self.async_delayed_acquire()
async def __aexit__(self, *exc):
pass
def delayed_acquire(self):
"""Delay and retry until we can successfully acquire an available bucket item"""
while True:
try:
self.try_acquire()
except BucketFullException as err:
delay_time = full_delay_time = self.delay_or_reraise(err)
else:
break
if self.on_rate_limit:
if self.on_rate_limit.interval > 0 and delay_time > self.on_rate_limit.interval:
delay_time = self.on_rate_limit.interval
self.on_rate_limit.callback(full_delay_time, delay_time)
logger.warning(
"Rate limit reached; %.0f seconds remaining before next request. Sleeping for %.0f seconds",
full_delay_time,
delay_time,
)
sleep(delay_time)
async def async_delayed_acquire(self):
"""Delay and retry until we can successfully acquire an available bucket item"""
while True:
try:
self.try_acquire()
except BucketFullException as err:
delay_time = full_delay_time = self.delay_or_reraise(err)
if self.on_rate_limit:
if self.on_rate_limit.interval > 0 and delay_time > self.on_rate_limit.interval:
delay_time = self.on_rate_limit.interval
self.on_rate_limit.callback(full_delay_time, delay_time)
logger.warning(
"Rate limit reached; %.0f seconds remaining before next request. Sleeping for %.0f seconds",
full_delay_time,
delay_time,
)
await asyncio.sleep(delay_time)
else:
break
def delay_or_reraise(self, err: BucketFullException) -> float:
"""Determine if we should delay after exceeding a rate limit. If so, return the delay time,
otherwise re-raise the exception.
"""
delay_time = float(err.meta_info["remaining_time"])
exceeded_max_delay = bool(self.max_delay) and (delay_time > self.max_delay)
if self.delay and not exceeded_max_delay:
return delay_time
raise err

View File

@ -0,0 +1,163 @@
from __future__ import annotations
from time import monotonic
from typing import Any, Callable
from comictalker.comictalker import RLCallBack
from .bucket import AbstractBucket, MemoryQueueBucket
from .exceptions import BucketFullException, InvalidParams
from .limit_context_decorator import LimitContextDecorator
from .request_rate import RequestRate
class Limiter:
"""Main rate-limiter class
Args:
rates: Request rate definitions
bucket_class: Bucket backend to use; may be any subclass of :py:class:`.AbstractBucket`.
See :py:mod`pyrate_limiter.bucket` for available bucket classes.
bucket_kwargs: Extra keyword arguments to pass to the bucket class constructor.
time_function: Time function that returns the current time as a float, in seconds
"""
def __init__(
self,
*rates: RequestRate,
on_rate_limit: RLCallBack | None = None,
bucket_class: type[AbstractBucket] = MemoryQueueBucket,
bucket_kwargs: dict[str, Any] | None = None,
time_function: Callable[[], float] | None = None,
):
self._validate_rate_list(rates)
self.on_rate_limit = on_rate_limit
self._rates = rates
self._bkclass = bucket_class
self._bucket_args = bucket_kwargs or {}
self._validate_bucket()
self.bucket_group: dict[str, AbstractBucket] = {}
self.time_function = monotonic
if time_function is not None:
self.time_function = time_function
# Call for time_function to make an anchor if required.
self.time_function()
def _validate_rate_list(self, rates): # pylint: disable=no-self-use
"""Raise exception if rates are incorrectly ordered."""
if not rates:
raise InvalidParams("Rate(s) must be provided")
for idx, rate in enumerate(rates[1:]):
prev_rate = rates[idx]
invalid = rate.limit <= prev_rate.limit or rate.interval <= prev_rate.interval
if invalid:
msg = f"{prev_rate} cannot come before {rate}"
raise InvalidParams(msg)
def _validate_bucket(self):
"""Try initialize a bucket to check if ok"""
bucket = self._bkclass(maxsize=self._rates[-1].limit, identity="_", **self._bucket_args)
del bucket
def _init_buckets(self, identities) -> None:
"""Initialize a bucket for each identity, if needed.
The bucket's maxsize equals the max limit of request-rates.
"""
maxsize = self._rates[-1].limit
for item_id in sorted(identities):
if not self.bucket_group.get(item_id):
self.bucket_group[item_id] = self._bkclass(
maxsize=maxsize,
identity=item_id,
**self._bucket_args,
)
self.bucket_group[item_id].lock_acquire()
def _release_buckets(self, identities) -> None:
"""Release locks after bucket transactions, if applicable"""
for item_id in sorted(identities):
self.bucket_group[item_id].lock_release()
def try_acquire(self, *identities: str) -> None:
"""Attempt to acquire an item, or raise an error if a rate limit has been exceeded.
Args:
identities: One or more identities to acquire. Typically this is the name of a service
or resource that is being rate-limited.
Raises:
:py:exc:`BucketFullException`: If the bucket is full and the item cannot be acquired
"""
self._init_buckets(identities)
now = round(self.time_function(), 3)
for rate in self._rates:
for item_id in identities:
bucket = self.bucket_group[item_id]
volume = bucket.size()
if volume < rate.limit:
continue
# Determine rate's starting point, and check requests made during its time window
item_count, remaining_time = bucket.inspect_expired_items(now - rate.interval)
if item_count >= rate.limit:
self._release_buckets(identities)
raise BucketFullException(item_id, rate, remaining_time)
# Remove expired bucket items beyond the last (maximum) rate limit,
if rate is self._rates[-1]:
bucket.get(volume - item_count)
# If no buckets are full, add another item to each bucket representing the next request
for item_id in identities:
self.bucket_group[item_id].put(now)
self._release_buckets(identities)
def ratelimit(
self,
*identities: str,
delay: bool = False,
max_delay: int | float | None = None,
on_rate_limit: RLCallBack | None = None,
):
"""A decorator and contextmanager that applies rate-limiting, with async support.
Depending on arguments, calls that exceed the rate limit will either raise an exception, or
sleep until space is available in the bucket.
Args:
identities: One or more identities to acquire. Typically this is the name of a service
or resource that is being rate-limited.
delay: Delay until the next request instead of raising an exception
max_delay: The maximum allowed delay time (in seconds); anything over this will raise
an exception
Raises:
:py:exc:`BucketFullException`: If the rate limit is reached, and ``delay=False`` or the
delay exceeds ``max_delay``
"""
return LimitContextDecorator(
self,
*identities,
delay=delay,
max_delay=max_delay,
on_rate_limit=self.on_rate_limit or on_rate_limit,
)
def get_current_volume(self, identity) -> int:
"""Get current bucket volume for a specific identity"""
bucket = self.bucket_group[identity]
return bucket.size()
def flush_all(self) -> int:
cnt = 0
for _, bucket in self.bucket_group.items():
bucket.flush()
cnt += 1
return cnt

View File

View File

@ -0,0 +1,53 @@
"""Initialize this class to define request-rates for limiter
"""
from __future__ import annotations
from enum import Enum
from typing import Any
from .exceptions import ImmutableClassProperty
class ResetTypes(Enum):
SCHEDULED = 1
INTERVAL = 2
class RequestRate:
"""Request rate definition.
Args:
limit: Number of requests allowed within ``interval``
interval: Time interval, in seconds
"""
def __init__(
self,
limit: int,
interval: int,
reset: ResetTypes = ResetTypes.INTERVAL,
):
self._limit = limit
self._interval = interval
self._reset = reset
self._log: dict[Any, Any] = {}
@property
def limit(self) -> int:
return self._limit
@limit.setter
def limit(self, _):
raise ImmutableClassProperty(self, "limit")
@property
def interval(self) -> int:
return self._interval
@interval.setter
def interval(self, _):
raise ImmutableClassProperty(self, "interval")
def __str__(self):
return f"{self.limit}/{self.interval}"

View File

@ -135,6 +135,7 @@ description = run the tests with pytest
package = wheel
deps =
pytest>=7
gui,all: pytest-qt
extras =
7z: 7Z
cbr: CBR
@ -150,17 +151,16 @@ description = run the tests with pytest
package = wheel
deps =
pytest>=7
icu,all: pyicu-binary
gui,all: pytest-qt
extras =
7z: 7Z
cbr: CBR
gui: GUI
all: 7Z,CBR,GUI
icu: ICU
all: 7Z,CBR,GUI,ICU
commands =
python -m pytest {tty:--color=yes} {posargs}
[testenv:py3.9-{icu,all}]
base = {env:tox_env:testenv}
icu,all: python -c 'import importlib,platform; importlib.import_module("icu") if platform.system() != "Windows" else ...' # Sanity check for icu
[testenv:format]
labels =
@ -247,7 +247,7 @@ description = Generate pyinstaller executable
labels =
build
release
base = {env:tox_env:testenv}
base = testenv
depends =
clean
deps =
@ -263,7 +263,7 @@ commands =
description = Generate appimage executable
skip_install = true
platform = linux
base = {env:tox_env:testenv}
base = testenv
labels =
release
build
@ -323,6 +323,7 @@ per-file-ignores =
build-tools/generate_settngs.py: T20
build-tools/oidc-exchange.py: T20
tests/*: L
tests/pyqttoast_test.py: E402
[mypy]
exclude = comictaggerlib/graphics/resources.py
@ -344,5 +345,15 @@ disallow_untyped_defs = false
disallow_incomplete_defs = false
check_untyped_defs = false
[mypy-comictaggerlib.ui.pyqttoast.tests.*]
disallow_untyped_defs = false
disallow_incomplete_defs = false
check_untyped_defs = false
[mypy-comictaggerlib.graphics.resources]
ignore_errors = True
ignore_errors = true
follow_imports = skip
[mypy-comictalker.vendor.*]
ignore_errors = true
follow_imports = skip

View File

@ -9,9 +9,12 @@ import testing.comicvine
def test_search_for_series(comicvine_api, comic_cache):
results = comicvine_api.search_for_series("cory doctorows futuristic tales of the here and now")[0]
results = comicvine_api.search_for_series(
"cory doctorows futuristic tales of the here and now", on_rate_limit=None
)[0]
cache_series = comic_cache.get_search_results(
comicvine_api.id, "cory doctorows futuristic tales of the here and now"
comicvine_api.id,
"cory doctorows futuristic tales of the here and now",
)[0][0]
series_results = comicvine_api._format_series(json.loads(cache_series.data))
assert results == series_results
@ -40,7 +43,7 @@ def test_fetch_issues_in_series(comicvine_api, comic_cache):
def test_fetch_issue_data_by_issue_id(comicvine_api):
result = comicvine_api.fetch_comic_data(140529)
result = comicvine_api.fetch_comic_data(140529, on_rate_limit=None)
result.notes = None
assert result == testing.comicvine.cv_md
@ -75,6 +78,6 @@ cv_issue = [
@pytest.mark.parametrize("series_id, issue_number, expected", cv_issue)
def test_fetch_issue_data(comicvine_api, series_id, issue_number, expected):
results = comicvine_api._fetch_issue_data(series_id, issue_number)
results = comicvine_api._fetch_issue_data(series_id, issue_number, on_rate_limit=None)
results.notes = None
assert results == expected

View File

@ -12,7 +12,6 @@ from typing import Any
import pytest
import settngs
from PIL import Image
from pyrate_limiter import Limiter, RequestRate
import comicapi.comicarchive
import comicapi.genericmetadata
@ -22,6 +21,7 @@ import comictalker
import comictalker.comiccacher
import comictalker.talkers.comicvine
from comicapi import utils
from comictalker.vendor.pyrate_limiter import Limiter, RequestRate
from testing import comicvine, filenames
from testing.comicdata import all_seed_imprints, seed_imprints

View File

@ -14,8 +14,17 @@ from comictaggerlib.resulttypes import IssueResult
def test_crop(cbz_double_cover, config, tmp_path, comicvine_api):
config, definitions = config
ii = comictaggerlib.issueidentifier.IssueIdentifier(cbz_double_cover, config, comicvine_api)
iio = comictaggerlib.issueidentifier.IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=comicvine_api,
)
ii = comictaggerlib.issueidentifier.IssueIdentifier(iio, None)
im = Image.open(io.BytesIO(cbz_double_cover.archiver.read_file("double_cover.jpg")))
@ -31,14 +40,34 @@ def test_crop(cbz_double_cover, config, tmp_path, comicvine_api):
@pytest.mark.parametrize("additional_md, expected", testing.comicdata.metadata_keys)
def test_get_search_keys(cbz, config, additional_md, expected, comicvine_api):
config, definitions = config
ii = comictaggerlib.issueidentifier.IssueIdentifier(cbz, config, comicvine_api)
iio = comictaggerlib.issueidentifier.IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=comicvine_api,
)
ii = comictaggerlib.issueidentifier.IssueIdentifier(iio, None)
assert expected == ii._get_search_keys(additional_md)
def test_get_issue_cover_match_score(cbz, config, comicvine_api):
config, definitions = config
ii = comictaggerlib.issueidentifier.IssueIdentifier(cbz, config, comicvine_api)
iio = comictaggerlib.issueidentifier.IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=comicvine_api,
)
ii = comictaggerlib.issueidentifier.IssueIdentifier(iio, None)
score = ii._get_issue_cover_match_score(
"https://comicvine.gamespot.com/a/uploads/scale_large/0/574/585444-109004_20080707014047_large.jpg",
["https://comicvine.gamespot.com/cory-doctorows-futuristic-tales-of-the-here-and-no/4000-140529/"],
@ -56,7 +85,17 @@ def test_get_issue_cover_match_score(cbz, config, comicvine_api):
def test_search(cbz, config, comicvine_api):
config, definitions = config
ii = comictaggerlib.issueidentifier.IssueIdentifier(cbz, config, comicvine_api)
iio = comictaggerlib.issueidentifier.IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=comicvine_api,
)
ii = comictaggerlib.issueidentifier.IssueIdentifier(iio, None)
result, issues = ii.identify(cbz, cbz.read_tags("cr"))
cv_expected = IssueResult(
series=f"{testing.comicvine.cv_volume_result['results']['name']} ({testing.comicvine.cv_volume_result['results']['start_year']})",
@ -80,7 +119,17 @@ def test_search(cbz, config, comicvine_api):
def test_crop_border(cbz, config, comicvine_api):
config, definitions = config
ii = comictaggerlib.issueidentifier.IssueIdentifier(cbz, config, comicvine_api)
iio = comictaggerlib.issueidentifier.IssueIdentifierOptions(
series_match_search_thresh=config.Issue_Identifier__series_match_search_thresh,
series_match_identify_thresh=config.Issue_Identifier__series_match_identify_thresh,
use_publisher_filter=config.Auto_Tag__use_publisher_filter,
publisher_filter=config.Auto_Tag__publisher_filter,
quiet=config.Runtime_Options__quiet,
cache_dir=config.Runtime_Options__config.user_cache_dir,
border_crop_percent=config.Issue_Identifier__border_crop_percent,
talker=comicvine_api,
)
ii = comictaggerlib.issueidentifier.IssueIdentifier(iio, None)
# This creates a white square centered on a black background
bg = Image.new("RGBA", (100, 100), (0, 0, 0, 255))

1174
tests/pyqttoast_test.py Normal file

File diff suppressed because it is too large Load Diff