Code cleanup
This commit is contained in:
parent
23f323f52d
commit
0fe881df59
@ -135,8 +135,8 @@ class SevenZipArchiver:
|
||||
os.close(tmp_fd)
|
||||
|
||||
try:
|
||||
with py7zr.SevenZipFile(self.path, "r") as zip:
|
||||
targets = [f for f in zip.getnames() if f not in exclude_list]
|
||||
with py7zr.SevenZipFile(self.path, "r") as zin:
|
||||
targets = [f for f in zin.getnames() if f not in exclude_list]
|
||||
with py7zr.SevenZipFile(self.path, "r") as zin:
|
||||
with py7zr.SevenZipFile(tmp_name, "w") as zout:
|
||||
for fname, bio in zin.read(targets).items():
|
||||
|
@ -89,7 +89,7 @@ class FileNameParser:
|
||||
# is the series name followed by issue
|
||||
filename = re.sub(r"--.*", self.repl, filename)
|
||||
|
||||
elif "__" in filename and not re.search(r"\[__\d+__\]", filename):
|
||||
elif "__" in filename and not re.search(r"\[__\d+__]", filename):
|
||||
# the pattern seems to be that anything to left of the first "__"
|
||||
# is the series name followed by issue
|
||||
filename = re.sub(r"__.*", self.repl, filename)
|
||||
|
@ -21,6 +21,7 @@ possible, however lossy it might be
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import List, TypedDict
|
||||
|
||||
from comicapi import utils
|
||||
@ -28,7 +29,7 @@ from comicapi import utils
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PageType:
|
||||
class PageType(Enum):
|
||||
|
||||
"""
|
||||
These page info classes are exactly the same as the CIX scheme, since
|
||||
@ -48,7 +49,7 @@ class PageType:
|
||||
Deleted = "Deleted"
|
||||
|
||||
|
||||
class ImageMetadata(TypedDict):
|
||||
class ImageMetadata(TypedDict, total=False):
|
||||
Type: PageType
|
||||
Bookmark: str
|
||||
DoublePage: bool
|
||||
@ -213,8 +214,7 @@ class GenericMetadata:
|
||||
def set_default_page_list(self, count):
|
||||
# generate a default page list, with the first page marked as the cover
|
||||
for i in range(count):
|
||||
page_dict = {}
|
||||
page_dict["Image"] = str(i)
|
||||
page_dict = ImageMetadata(Image=i)
|
||||
if i == 0:
|
||||
page_dict["Type"] = PageType.FrontCover
|
||||
self.pages.append(page_dict)
|
||||
@ -241,11 +241,7 @@ class GenericMetadata:
|
||||
|
||||
def add_credit(self, person, role, primary=False):
|
||||
|
||||
credit = {}
|
||||
credit["person"] = person
|
||||
credit["role"] = role
|
||||
if primary:
|
||||
credit["primary"] = primary
|
||||
credit: CreditMetadata = {"person": person, "role": role, "primary": primary}
|
||||
|
||||
# look to see if it's not already there...
|
||||
found = False
|
||||
|
@ -175,16 +175,15 @@ class ComicVineCacher:
|
||||
rows = cur.fetchall()
|
||||
# now process the results
|
||||
for record in rows:
|
||||
result = {}
|
||||
result["id"] = record[1]
|
||||
result["name"] = record[2]
|
||||
result["start_year"] = record[3]
|
||||
result["publisher"] = {}
|
||||
result["publisher"]["name"] = record[4]
|
||||
result["count_of_issues"] = record[5]
|
||||
result["image"] = {}
|
||||
result["image"]["super_url"] = record[6]
|
||||
result["description"] = record[7]
|
||||
result = {
|
||||
"id": record[1],
|
||||
"name": record[2],
|
||||
"start_year": record[3],
|
||||
"count_of_issues": record[5],
|
||||
"description": record[7],
|
||||
"publisher": {"name": record[4]},
|
||||
"image": {"super_url": record[6]},
|
||||
}
|
||||
|
||||
results.append(result)
|
||||
|
||||
@ -301,16 +300,15 @@ class ComicVineCacher:
|
||||
if row is None:
|
||||
return result
|
||||
|
||||
result = {}
|
||||
|
||||
# since ID is primary key, there is only one row
|
||||
result["id"] = row[0]
|
||||
result["name"] = row[1]
|
||||
result["publisher"] = {}
|
||||
result["publisher"]["name"] = row[2]
|
||||
result["count_of_issues"] = row[3]
|
||||
result["start_year"] = row[4]
|
||||
result["issues"] = []
|
||||
result = {
|
||||
"id": row[0],
|
||||
"name": row[1],
|
||||
"count_of_issues": row[3],
|
||||
"start_year": row[4],
|
||||
"issues": [],
|
||||
"publisher": {"name": row[2]},
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
@ -337,17 +335,15 @@ class ComicVineCacher:
|
||||
|
||||
# now process the results
|
||||
for row in rows:
|
||||
record = {}
|
||||
|
||||
record["id"] = row[0]
|
||||
record["name"] = row[1]
|
||||
record["issue_number"] = row[2]
|
||||
record["site_detail_url"] = row[3]
|
||||
record["cover_date"] = row[4]
|
||||
record["image"] = {}
|
||||
record["image"]["super_url"] = row[5]
|
||||
record["image"]["thumb_url"] = row[6]
|
||||
record["description"] = row[7]
|
||||
record = {
|
||||
"id": row[0],
|
||||
"name": row[1],
|
||||
"issue_number": row[2],
|
||||
"site_detail_url": row[3],
|
||||
"cover_date": row[4],
|
||||
"image": {"super_url": row[5], "thumb_url": row[6]},
|
||||
"description": row[7],
|
||||
}
|
||||
|
||||
results.append(record)
|
||||
|
||||
|
@ -644,16 +644,12 @@ class ComicVineTalker:
|
||||
|
||||
cv_response = self.get_cv_content(issue_url, params)
|
||||
|
||||
details: SelectDetails = {}
|
||||
details["image_url"] = None
|
||||
details["thumb_image_url"] = None
|
||||
details["cover_date"] = None
|
||||
details["site_detail_url"] = None
|
||||
|
||||
details["image_url"] = cv_response["results"]["image"]["super_url"]
|
||||
details["thumb_image_url"] = cv_response["results"]["image"]["thumb_url"]
|
||||
details["cover_date"] = cv_response["results"]["cover_date"]
|
||||
details["site_detail_url"] = cv_response["results"]["site_detail_url"]
|
||||
details: SelectDetails = {
|
||||
"image_url": cv_response["results"]["image"]["super_url"],
|
||||
"thumb_image_url": cv_response["results"]["image"]["thumb_url"],
|
||||
"cover_date": cv_response["results"]["cover_date"],
|
||||
"site_detail_url": cv_response["results"]["site_detail_url"],
|
||||
}
|
||||
|
||||
if details["image_url"] is not None:
|
||||
self.cache_issue_select_details(
|
||||
|
@ -112,8 +112,8 @@ class IssueIdentifier:
|
||||
def set_name_length_delta_threshold(self, delta):
|
||||
self.length_delta_thresh = delta
|
||||
|
||||
def set_publisher_filter(self, filter):
|
||||
self.publisher_filter = filter
|
||||
def set_publisher_filter(self, flt):
|
||||
self.publisher_filter = flt
|
||||
|
||||
def set_hasher_algorithm(self, algo):
|
||||
self.image_hasher = algo
|
||||
@ -164,12 +164,13 @@ class IssueIdentifier:
|
||||
def get_search_keys(self):
|
||||
|
||||
ca = self.comic_archive
|
||||
search_keys: SearchKeys = {}
|
||||
search_keys["series"] = None
|
||||
search_keys["issue_number"] = None
|
||||
search_keys["month"] = None
|
||||
search_keys["year"] = None
|
||||
search_keys["issue_count"] = None
|
||||
search_keys: SearchKeys = {
|
||||
"series": None,
|
||||
"issue_number": None,
|
||||
"month": None,
|
||||
"year": None,
|
||||
"issue_count": None,
|
||||
}
|
||||
|
||||
if ca is None:
|
||||
return None
|
||||
@ -274,10 +275,8 @@ class IssueIdentifier:
|
||||
self.cover_url_callback(url_image_data)
|
||||
|
||||
remote_cover_list = []
|
||||
item = {}
|
||||
item["url"] = primary_img_url
|
||||
item = {"url": primary_img_url, "hash": self.calculate_hash(url_image_data)}
|
||||
|
||||
item["hash"] = self.calculate_hash(url_image_data)
|
||||
remote_cover_list.append(item)
|
||||
|
||||
if self.cancel:
|
||||
@ -299,9 +298,7 @@ class IssueIdentifier:
|
||||
if self.cover_url_callback is not None:
|
||||
self.cover_url_callback(alt_url_image_data)
|
||||
|
||||
item = {}
|
||||
item["url"] = alt_url
|
||||
item["hash"] = self.calculate_hash(alt_url_image_data)
|
||||
item = {"url": alt_url, "hash": self.calculate_hash(alt_url_image_data)}
|
||||
remote_cover_list.append(item)
|
||||
|
||||
if self.cancel:
|
||||
@ -317,10 +314,7 @@ class IssueIdentifier:
|
||||
for local_cover_hash in local_cover_hash_list:
|
||||
for remote_cover_item in remote_cover_list:
|
||||
score = ImageHasher.hamming_distance(local_cover_hash, remote_cover_item["hash"])
|
||||
score_item = {}
|
||||
score_item["score"] = score
|
||||
score_item["url"] = remote_cover_item["url"]
|
||||
score_item["hash"] = remote_cover_item["hash"]
|
||||
score_item = {"score": score, "url": remote_cover_item["url"], "hash": remote_cover_item["hash"]}
|
||||
score_list.append(score_item)
|
||||
if use_log:
|
||||
self.log_msg(score, False)
|
||||
@ -520,24 +514,25 @@ class IssueIdentifier:
|
||||
self.match_list = []
|
||||
return self.match_list
|
||||
|
||||
match: IssueResult = {}
|
||||
match["series"] = f"{series['name']} ({series['start_year']})"
|
||||
match["distance"] = score_item["score"]
|
||||
match["issue_number"] = keys["issue_number"]
|
||||
match["cv_issue_count"] = series["count_of_issues"]
|
||||
match["url_image_hash"] = score_item["hash"]
|
||||
match["issue_title"] = issue["name"]
|
||||
match["issue_id"] = issue["id"]
|
||||
match["volume_id"] = series["id"]
|
||||
match["month"] = month
|
||||
match["year"] = year
|
||||
match["publisher"] = None
|
||||
match: IssueResult = {
|
||||
"series": f"{series['name']} ({series['start_year']})",
|
||||
"distance": score_item["score"],
|
||||
"issue_number": keys["issue_number"],
|
||||
"cv_issue_count": series["count_of_issues"],
|
||||
"url_image_hash": score_item["hash"],
|
||||
"issue_title": issue["name"],
|
||||
"issue_id": issue["id"],
|
||||
"volume_id": series["id"],
|
||||
"month": month,
|
||||
"year": year,
|
||||
"publisher": None,
|
||||
"image_url": image_url,
|
||||
"thumb_url": thumb_url,
|
||||
"page_url": page_url,
|
||||
"description": issue["description"],
|
||||
}
|
||||
if series["publisher"] is not None:
|
||||
match["publisher"] = series["publisher"]["name"]
|
||||
match["image_url"] = image_url
|
||||
match["thumb_url"] = thumb_url
|
||||
match["page_url"] = page_url
|
||||
match["description"] = issue["description"]
|
||||
|
||||
self.match_list.append(match)
|
||||
|
||||
|
@ -85,7 +85,7 @@ try:
|
||||
qt_exception_hook = UncaughtHook()
|
||||
from comictaggerlib.taggerwindow import TaggerWindow
|
||||
except ImportError as e:
|
||||
logging.debug(e)
|
||||
logger.error(str(e))
|
||||
qt_available = False
|
||||
|
||||
|
||||
|
@ -124,9 +124,6 @@ class ComicTaggerSettings:
|
||||
self.wait_and_retry_on_rate_limit = False
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.settings_file = ""
|
||||
self.folder = ""
|
||||
# General Settings
|
||||
self.rar_exe_path = ""
|
||||
self.allow_cbi_in_rar = True
|
||||
|
@ -23,9 +23,9 @@ def _lang_code_mac():
|
||||
# - The macOS underlying API:
|
||||
# https://developer.apple.com/documentation/foundation/nsuserdefaults.
|
||||
|
||||
LANG_DETECT_COMMAND = "defaults read -g AppleLocale"
|
||||
lang_detect_command = "defaults read -g AppleLocale"
|
||||
|
||||
status, output = subprocess.getstatusoutput(LANG_DETECT_COMMAND)
|
||||
status, output = subprocess.getstatusoutput(lang_detect_command)
|
||||
if status == 0:
|
||||
# Command was successful.
|
||||
lang_code = output
|
||||
|
Loading…
Reference in New Issue
Block a user