diff --git a/comictaggerlib/issueidentifier.py b/comictaggerlib/issueidentifier.py index a42d335..d919b1d 100644 --- a/comictaggerlib/issueidentifier.py +++ b/comictaggerlib/issueidentifier.py @@ -239,7 +239,6 @@ class IssueIdentifier: self, issue_id: str, primary_img_url: str, - primary_thumb_url: str, alt_urls: list[str], local_cover_hash_list: list[int], use_remote_alternates: bool = False, @@ -390,17 +389,17 @@ class IssueIdentifier: date_approved = True # remove any series that starts after the issue year - if keys["year"] is not None and item["start_year"] is not None: - if keys["year"] < item["start_year"]: + if keys["year"] is not None and item.start_year is not None: + if keys["year"] < item.start_year: date_approved = False - for name in [item["name"], *item["aliases"]]: + for name in [item.name, *item.aliases]: if utils.titles_match(keys["series"], name, self.series_match_thresh): length_approved = True break # remove any series from publishers on the filter - if item["publisher"] is not None: - publisher = item["publisher"] + if item.publisher is not None: + publisher = item.publisher if publisher is not None and publisher.casefold() in self.publisher_filter: publisher_approved = False @@ -413,9 +412,9 @@ class IssueIdentifier: self.callback(0, len(series_second_round_list)) # now sort the list by name length - series_second_round_list.sort(key=lambda x: len(x["name"]), reverse=False) + series_second_round_list.sort(key=lambda x: len(x.name), reverse=False) - series_by_id = {series["id"]: series for series in series_second_round_list} + series_by_id = {series.id: series for series in series_second_round_list} issue_list = None try: @@ -434,8 +433,8 @@ class IssueIdentifier: # now re-associate the issues and series # is this really needed? for issue in issue_list: - if issue["series"]["id"] in series_by_id: - shortlist.append((series_by_id[issue["series"]["id"]], issue)) + if issue.series.id in series_by_id: + shortlist.append((series_by_id[issue.series.id], issue)) if keys["year"] is None: self.log_msg(f"Found {len(shortlist)} series that have an issue #{keys['issue_number']}") @@ -453,12 +452,12 @@ class IssueIdentifier: counter += 1 self.log_msg( - f"Examining covers for ID: {series['id']} {series['name']} ({series['start_year']}) ...", + f"Examining covers for ID: {series.id} {series.name} ({series.start_year}) ...", newline=False, ) # parse out the cover date - _, month, year = utils.parse_date_str(issue["cover_date"]) + _, month, year = utils.parse_date_str(issue.cover_date) # Now check the cover match against the primary image hash_list = [cover_hash] @@ -466,12 +465,11 @@ class IssueIdentifier: hash_list.append(narrow_cover_hash) try: - image_url = issue["image_url"] - thumb_url = issue["image_thumb_url"] - alt_urls = issue["alt_image_urls"] + image_url = issue.image_url + alt_urls = issue.alt_image_urls score_item = self.get_issue_cover_match_score( - issue["id"], image_url, thumb_url, alt_urls, hash_list, use_remote_alternates=False + issue.id, image_url, alt_urls, hash_list, use_remote_alternates=False ) except Exception: logger.exception("Scoring series failed") @@ -479,24 +477,23 @@ class IssueIdentifier: return self.match_list match: IssueResult = { - "series": f"{series['name']} ({series['start_year']})", + "series": f"{series.name} ({series.start_year})", "distance": score_item["score"], "issue_number": keys["issue_number"], - "cv_issue_count": series["count_of_issues"], + "cv_issue_count": series.count_of_issues, "url_image_hash": score_item["hash"], - "issue_title": issue["name"], - "issue_id": issue["id"], - "series_id": series["id"], + "issue_title": issue.name, + "issue_id": issue.id, + "series_id": series.id, "month": month, "year": year, "publisher": None, "image_url": image_url, - "thumb_url": thumb_url, "alt_image_urls": alt_urls, - "description": issue["description"], + "description": issue.description, } - if series["publisher"] is not None: - match["publisher"] = series["publisher"] + if series.publisher is not None: + match["publisher"] = series.publisher self.match_list.append(match) @@ -556,7 +553,6 @@ class IssueIdentifier: score_item = self.get_issue_cover_match_score( m["issue_id"], m["image_url"], - m["thumb_url"], m["alt_image_urls"], hash_list, use_remote_alternates=True, diff --git a/comictaggerlib/issueselectionwindow.py b/comictaggerlib/issueselectionwindow.py index e6d9609..fc48fc2 100644 --- a/comictaggerlib/issueselectionwindow.py +++ b/comictaggerlib/issueselectionwindow.py @@ -118,15 +118,15 @@ class IssueSelectionWindow(QtWidgets.QDialog): for record in self.issue_list: self.twList.insertRow(row) - item_text = record["issue_number"] + item_text = record.issue_number item = IssueNumberTableWidgetItem(item_text) item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text) - item.setData(QtCore.Qt.ItemDataRole.UserRole, record["id"]) + item.setData(QtCore.Qt.ItemDataRole.UserRole, record.id) item.setData(QtCore.Qt.ItemDataRole.DisplayRole, item_text) item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) self.twList.setItem(row, 0, item) - item_text = record["cover_date"] + item_text = record.cover_date if item_text is None: item_text = "" # remove the day of "YYYY-MM-DD" @@ -139,7 +139,7 @@ class IssueSelectionWindow(QtWidgets.QDialog): qtw_item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) self.twList.setItem(row, 1, qtw_item) - item_text = record["name"] + item_text = record.name if item_text is None: item_text = "" qtw_item = QtWidgets.QTableWidgetItem(item_text) @@ -148,10 +148,10 @@ class IssueSelectionWindow(QtWidgets.QDialog): self.twList.setItem(row, 2, qtw_item) if ( - IssueString(record["issue_number"]).as_string().casefold() + IssueString(record.issue_number).as_string().casefold() == IssueString(self.issue_number).as_string().casefold() ): - self.initial_id = record["id"] + self.initial_id = record.id row += 1 @@ -174,12 +174,12 @@ class IssueSelectionWindow(QtWidgets.QDialog): # list selection was changed, update the the issue cover for record in self.issue_list: - if record["id"] == self.issue_id: - self.issue_number = record["issue_number"] - self.coverWidget.set_issue_details(self.issue_id, [record["image_url"], *record["alt_image_urls"]]) - if record["description"] is None: + if record.id == self.issue_id: + self.issue_number = record.issue_number + self.coverWidget.set_issue_details(self.issue_id, [record.image_url, *record.alt_image_urls]) + if record.description is None: self.teDescription.setText("") else: - self.teDescription.setText(record["description"]) + self.teDescription.setText(record.description) break diff --git a/comictaggerlib/resulttypes.py b/comictaggerlib/resulttypes.py index c15c179..5bfe9a6 100644 --- a/comictaggerlib/resulttypes.py +++ b/comictaggerlib/resulttypes.py @@ -9,7 +9,7 @@ class IssueResult(TypedDict): series: str distance: int issue_number: str - cv_issue_count: int + cv_issue_count: int | None url_image_hash: int issue_title: str issue_id: str @@ -18,7 +18,6 @@ class IssueResult(TypedDict): year: int | None publisher: str | None image_url: str - thumb_url: str alt_image_urls: list[str] description: str diff --git a/comictaggerlib/seriesselectionwindow.py b/comictaggerlib/seriesselectionwindow.py index 31fd5f4..7c1c479 100644 --- a/comictaggerlib/seriesselectionwindow.py +++ b/comictaggerlib/seriesselectionwindow.py @@ -303,9 +303,9 @@ class SeriesSelectionWindow(QtWidgets.QDialog): selector = IssueSelectionWindow(self, self.options, self.talker_api, self.series_id, self.issue_number) title = "" for record in self.ct_search_results: - if record["id"] == self.series_id: - title = record["name"] - title += " (" + str(record["start_year"]) + ")" + if record.id == self.series_id: + title = record.name + title += " (" + str(record.start_year) + ")" title += " - " break @@ -384,8 +384,7 @@ class SeriesSelectionWindow(QtWidgets.QDialog): # use '' as publisher name if None self.ct_search_results = list( filter( - lambda d: ("" if d["publisher"] is None else str(d["publisher"]).casefold()) - not in publisher_filter, + lambda d: ("" if d.publisher is None else str(d.publisher).casefold()) not in publisher_filter, self.ct_search_results, ) ) @@ -400,7 +399,7 @@ class SeriesSelectionWindow(QtWidgets.QDialog): try: self.ct_search_results = sorted( self.ct_search_results, - key=lambda i: (str(i["start_year"]), str(i["count_of_issues"])), + key=lambda i: (str(i.start_year), str(i.count_of_issues)), reverse=True, ) except Exception: @@ -408,7 +407,7 @@ class SeriesSelectionWindow(QtWidgets.QDialog): else: try: self.ct_search_results = sorted( - self.ct_search_results, key=lambda i: str(i["count_of_issues"]), reverse=True + self.ct_search_results, key=lambda i: str(i.count_of_issues), reverse=True ) except Exception: logger.exception("bad data error sorting results by count_of_issues") @@ -423,11 +422,11 @@ class SeriesSelectionWindow(QtWidgets.QDialog): def categorize(result: ComicSeries) -> int: # We don't remove anything on this one so that we only get exact matches - if utils.sanitize_title(result["name"], True).casefold() == sanitized_no_articles: + if utils.sanitize_title(result.name, True).casefold() == sanitized_no_articles: return 0 # this ensures that 'The Joker' is near the top even if you search 'Joker' - if utils.sanitize_title(result["name"], False).casefold() in sanitized: + if utils.sanitize_title(result.name, False).casefold() in sanitized: return 1 return 2 @@ -448,28 +447,28 @@ class SeriesSelectionWindow(QtWidgets.QDialog): for record in self.ct_search_results: self.twList.insertRow(row) - item_text = record["name"] + item_text = record.name item = QtWidgets.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text) - item.setData(QtCore.Qt.ItemDataRole.UserRole, record["id"]) + item.setData(QtCore.Qt.ItemDataRole.UserRole, record.id) item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) self.twList.setItem(row, 0, item) - item_text = str(record["start_year"]) + item_text = str(record.start_year) item = QtWidgets.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) self.twList.setItem(row, 1, item) - item_text = str(record["count_of_issues"]) + item_text = str(record.count_of_issues) item = QtWidgets.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text) - item.setData(QtCore.Qt.ItemDataRole.DisplayRole, record["count_of_issues"]) + item.setData(QtCore.Qt.ItemDataRole.DisplayRole, record.count_of_issues) item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) self.twList.setItem(row, 2, item) - if record["publisher"] is not None: - item_text = record["publisher"] + if record.publisher is not None: + item_text = record.publisher item.setData(QtCore.Qt.ItemDataRole.ToolTipRole, item_text) item = QtWidgets.QTableWidgetItem(item_text) item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled) @@ -521,10 +520,10 @@ class SeriesSelectionWindow(QtWidgets.QDialog): # list selection was changed, update the info on the series for record in self.ct_search_results: - if record["id"] == self.series_id: - if record["description"] is None: + if record.id == self.series_id: + if record.description is None: self.teDetails.setText("") else: - self.teDetails.setText(record["description"]) - self.imageWidget.set_url(record["image_url"]) + self.teDetails.setText(record.description) + self.imageWidget.set_url(record.image_url) break diff --git a/comictalker/comiccacher.py b/comictalker/comiccacher.py index 284f983..14eb13b 100644 --- a/comictalker/comiccacher.py +++ b/comictalker/comiccacher.py @@ -2,7 +2,7 @@ # # Copyright 2012-2014 Anthony Beville # -# Licensed under the Apache License, Version 2.0 (the "License"); +# Licensed under the Apache License, Version 2.0 (the "License; # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -15,6 +15,7 @@ # limitations under the License. from __future__ import annotations +import dataclasses import datetime import json import logging @@ -23,7 +24,7 @@ import pathlib import sqlite3 as lite from typing import Any -from comictalker.resulttypes import ComicIssue, ComicSeries +from comictalker.resulttypes import ComicIssue, ComicSeries, Credit logger = logging.getLogger(__name__) @@ -141,21 +142,21 @@ class ComicCacher: ( source_name, search_term.casefold(), - record["id"], + record.id, ), ) data = { - "id": record["id"], + "id": record.id, "source_name": source_name, - "name": record["name"], - "publisher": record.get("publisher", ""), - "count_of_issues": record.get("count_of_issues"), - "start_year": record.get("start_year"), - "image_url": record.get("image_url", ""), - "description": record.get("description", ""), + "name": record.name, + "publisher": record.publisher, + "count_of_issues": record.count_of_issues, + "start_year": record.start_year, + "image_url": record.image_url, + "description": record.description, "timestamp": datetime.datetime.now(), - "aliases": "\n".join(record.get("aliases", [])), + "aliases": "\n".join(record.aliases), } self.upsert(cur, "series", data) @@ -201,16 +202,16 @@ class ComicCacher: timestamp = datetime.datetime.now() data = { - "id": series_record["id"], + "id": series_record.id, "source_name": source_name, - "name": series_record["name"], - "publisher": series_record.get("publisher", ""), - "count_of_issues": series_record.get("count_of_issues"), - "start_year": series_record.get("start_year"), - "image_url": series_record.get("image_url", ""), - "description": series_record.get("description", ""), + "name": series_record.name, + "publisher": series_record.publisher, + "count_of_issues": series_record.count_of_issues, + "start_year": series_record.start_year, + "image_url": series_record.image_url, + "description": series_record.description, "timestamp": timestamp, - "aliases": "\n".join(series_record.get("aliases", [])), + "aliases": "\n".join(series_record.aliases), } self.upsert(cur, "series", data) @@ -226,25 +227,24 @@ class ComicCacher: for issue in series_issues: data = { - "id": issue["id"], - "series_id": issue["series"]["id"], + "id": issue.id, + "series_id": issue.series.id, "source_name": source_name, - "name": issue["name"], - "issue_number": issue["issue_number"], - "site_detail_url": issue.get("site_detail_url"), - "cover_date": issue.get("cover_date"), - "image_url": issue.get("image_url", ""), - "thumb_url": issue.get("image_thumb_url", ""), - "description": issue.get("description", ""), + "name": issue.name, + "issue_number": issue.issue_number, + "site_detail_url": issue.site_detail_url, + "cover_date": issue.cover_date, + "image_url": issue.image_url, + "description": issue.description, "timestamp": timestamp, - "aliases": "\n".join(issue.get("aliases", [])), - "alt_image_urls": "\n".join(issue.get("alt_image_urls", [])), - "characters": "\n".join(issue.get("characters", [])), - "locations": "\n".join(issue.get("locations", [])), - "teams": "\n".join(issue.get("teams", [])), - "story_arcs": "\n".join(issue.get("story_arcs", [])), - "credits": json.dumps(issue.get("credits")), - "complete": issue["complete"], + "aliases": "\n".join(issue.aliases), + "alt_image_urls": "\n".join(issue.alt_image_urls), + "characters": "\n".join(issue.characters), + "locations": "\n".join(issue.locations), + "teams": "\n".join(issue.teams), + "story_arcs": "\n".join(issue.story_arcs), + "credits": json.dumps([dataclasses.asdict(x) for x in issue.credits]), + "complete": issue.complete, } self.upsert(cur, "issues", data) @@ -288,7 +288,16 @@ class ComicCacher: def get_series_issues_info(self, series_id: str, source_name: str) -> list[ComicIssue]: # get_series_info should only fail if someone is doing something weird - series = self.get_series_info(series_id, source_name, False) or ComicSeries(id=series_id, name="") + series = self.get_series_info(series_id, source_name, False) or ComicSeries( + id=series_id, + name="", + description="", + image_url="", + publisher="", + start_year=None, + aliases=[], + count_of_issues=None, + ) con = lite.connect(self.db_file) with con: cur = con.cursor() @@ -313,6 +322,12 @@ class ComicCacher: # now process the results for row in rows: + credits = [] + try: + for credit in json.loads(row[13]): + credits.append(Credit(**credit)) + finally: + logger.exception("credits failed") record = ComicIssue( id=row[1], name=row[2], @@ -326,7 +341,7 @@ class ComicCacher: alt_image_urls=row[10].strip().splitlines(), characters=row[11].strip().splitlines(), locations=row[12].strip().splitlines(), - credits=json.loads(row[13]), + credits=credits, teams=row[14].strip().splitlines(), story_arcs=row[15].strip().splitlines(), complete=bool(row[16]), @@ -349,7 +364,7 @@ class ComicCacher: cur.execute( ( - "SELECT source_name,id,name,issue_number,site_detail_url,cover_date,image_url,thumb_url,description,aliases,series_id,alt_image_urls,characters,locations,credits,teams,story_arcs,complete" + "SELECT source_name,id,name,issue_number,site_detail_url,cover_date,image_url,description,aliases,series_id,alt_image_urls,characters,locations,credits,teams,story_arcs,complete" " FROM Issues WHERE id=? AND source_name=?" ), [issue_id, source_name], @@ -360,7 +375,16 @@ class ComicCacher: if row: # get_series_info should only fail if someone is doing something weird - series = self.get_series_info(row[10], source_name, False) or ComicSeries(id=row[10], name="") + series = self.get_series_info(row[10], source_name, False) or ComicSeries( + id=row[10], + name="", + description="", + image_url="", + publisher="", + start_year=None, + aliases=[], + count_of_issues=None, + ) # now process the results @@ -371,17 +395,16 @@ class ComicCacher: site_detail_url=row[4], cover_date=row[5], image_url=row[6], - image_thumb_url=row[7], - description=row[8], + description=row[7], series=series, - aliases=row[9].strip().splitlines(), - alt_image_urls=row[11].strip().splitlines(), - characters=row[12].strip().splitlines(), - locations=row[13].strip().splitlines(), - credits=json.loads(row[14]), - teams=row[15].strip().splitlines(), - story_arcs=row[16].strip().splitlines(), - complete=bool(row[17]), + aliases=row[8].strip().splitlines(), + alt_image_urls=row[10].strip().splitlines(), + characters=row[11].strip().splitlines(), + locations=row[12].strip().splitlines(), + credits=json.loads(row[13]), + teams=row[14].strip().splitlines(), + story_arcs=row[15].strip().splitlines(), + complete=bool(row[16]), ) return record diff --git a/comictalker/resulttypes.py b/comictalker/resulttypes.py index 4139170..42f4fce 100644 --- a/comictalker/resulttypes.py +++ b/comictalker/resulttypes.py @@ -1,39 +1,48 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +import copy +import dataclasses -class Credits(TypedDict): +@dataclasses.dataclass +class Credit: name: str role: str -class ComicSeries(TypedDict, total=False): +@dataclasses.dataclass +class ComicSeries: aliases: list[str] - count_of_issues: int + count_of_issues: int | None description: str - id: Required[str] + id: str image_url: str - name: Required[str] + name: str publisher: str start_year: int | None + def copy(self) -> ComicSeries: + return copy.deepcopy(self) -class ComicIssue(TypedDict, total=False): + +@dataclasses.dataclass +class ComicIssue: aliases: list[str] cover_date: str description: str id: str image_url: str - image_thumb_url: str - issue_number: Required[str] - name: Required[str] + issue_number: str + name: str site_detail_url: str series: ComicSeries alt_image_urls: list[str] characters: list[str] locations: list[str] - credits: list[Credits] + credits: list[Credit] teams: list[str] story_arcs: list[str] complete: bool # Is this a complete ComicIssue? or is there more data to fetch + + def copy(self) -> ComicIssue: + return copy.deepcopy(self) diff --git a/comictalker/talker_utils.py b/comictalker/talker_utils.py index 108aec3..26efecb 100644 --- a/comictalker/talker_utils.py +++ b/comictalker/talker_utils.py @@ -35,47 +35,45 @@ def map_comic_issue_to_metadata( metadata = GenericMetadata() metadata.is_empty = False - # Is this best way to go about checking? - if issue_results["series"].get("name"): - metadata.series = utils.xlate(issue_results["series"]["name"]) - if issue_results.get("issue_number"): - metadata.issue = IssueString(issue_results["issue_number"]).as_string() - if issue_results.get("name"): - metadata.title = utils.xlate(issue_results["name"]) - if issue_results.get("image_url"): - metadata.cover_image = issue_results["image_url"] + metadata.series = utils.xlate(issue_results.series.name) + metadata.issue = IssueString(issue_results.issue_number).as_string() - if issue_results["series"].get("publisher"): - metadata.publisher = utils.xlate(issue_results["series"]["publisher"]) + if issue_results.name: + metadata.title = utils.xlate(issue_results.name) + if issue_results.image_url: + metadata.cover_image = issue_results.image_url - if issue_results.get("cover_date"): - metadata.day, metadata.month, metadata.year = utils.parse_date_str(issue_results["cover_date"]) - elif issue_results["series"].get("start_year"): - metadata.year = utils.xlate(issue_results["series"]["start_year"], True) + if issue_results.series.publisher: + metadata.publisher = utils.xlate(issue_results.series.publisher) - metadata.comments = cleanup_html(issue_results["description"], remove_html_tables) + if issue_results.cover_date: + metadata.day, metadata.month, metadata.year = utils.parse_date_str(issue_results.cover_date) + elif issue_results.series.start_year: + metadata.year = utils.xlate(issue_results.series.start_year, True) + + metadata.comments = cleanup_html(issue_results.description, remove_html_tables) if use_year_volume: - metadata.volume = issue_results["series"]["start_year"] + metadata.volume = issue_results.series.start_year metadata.tag_origin = source - metadata.issue_id = issue_results["id"] - metadata.web_link = issue_results["site_detail_url"] + metadata.issue_id = issue_results.id + metadata.web_link = issue_results.site_detail_url - for person in issue_results["credits"]: - if "role" in person: - roles = person["role"].split(",") + for person in issue_results.credits: + if person.role: + roles = person.role.split(",") for role in roles: # can we determine 'primary' from CV?? - metadata.add_credit(person["name"], role.title().strip(), False) + metadata.add_credit(person.name, role.title().strip(), False) - if issue_results.get("characters"): - metadata.characters = ", ".join(issue_results["characters"]) - if issue_results.get("teams"): - metadata.teams = ", ".join(issue_results["teams"]) - if issue_results.get("locations"): - metadata.locations = ", ".join(issue_results["locations"]) - if issue_results.get("story_arcs"): - metadata.story_arc = ", ".join(issue_results["story_arcs"]) + if issue_results.characters: + metadata.characters = ", ".join(issue_results.characters) + if issue_results.teams: + metadata.teams = ", ".join(issue_results.teams) + if issue_results.locations: + metadata.locations = ", ".join(issue_results.locations) + if issue_results.story_arcs: + metadata.story_arc = ", ".join(issue_results.story_arcs) return metadata diff --git a/comictalker/talkers/comicvine.py b/comictalker/talkers/comicvine.py index 781c239..3b264f3 100644 --- a/comictalker/talkers/comicvine.py +++ b/comictalker/talkers/comicvine.py @@ -31,7 +31,7 @@ from comicapi import utils from comicapi.genericmetadata import GenericMetadata from comicapi.issuestring import IssueString from comictalker.comiccacher import ComicCacher -from comictalker.resulttypes import ComicIssue, ComicSeries, Credits +from comictalker.resulttypes import ComicIssue, ComicSeries, Credit from comictalker.talkerbase import ComicTalker, SourceDetails, SourceStaticOptions, TalkerDataError, TalkerNetworkError logger = logging.getLogger(__name__) @@ -311,10 +311,8 @@ class ComicVineTalker(ComicTalker): # Extract image super and thumb to name only if record.get("image") is None: image_url = "" - image_thumb_url = "" else: image_url = record["image"].get("super_url", "") - image_thumb_url = record["image"].get("thumb_url", "") alt_images_list = [] for alt in record["associated_images"]: @@ -343,7 +341,9 @@ class ComicVineTalker(ComicTalker): persons_list = [] if record.get("person_credits"): for person in record["person_credits"]: - persons_list.append(Credits(name=person["name"], role=person["role"])) + persons_list.append(Credit(name=person["name"], role=person["role"])) + + series = self.fetch_series_data(record["volume"]["id"]) formatted_results.append( ComicIssue( @@ -352,11 +352,10 @@ class ComicVineTalker(ComicTalker): description=record.get("description", ""), id=str(record["id"]), image_url=image_url, - image_thumb_url=image_thumb_url, issue_number=record["issue_number"], name=record["name"], site_detail_url=record.get("site_detail_url", ""), - series=self.format_search_results([record["volume"]])[0], # CV uses volume to mean series + series=series, # CV uses volume to mean series alt_image_urls=alt_images_list, characters=character_list, locations=location_list, @@ -505,7 +504,7 @@ class ComicVineTalker(ComicTalker): series_data = self.fetch_series_data(int(series_id)) - if len(cached_series_issues_result) == series_data["count_of_issues"]: + if len(cached_series_issues_result) == series_data.count_of_issues: return cached_series_issues_result params = { # CV uses volume to mean series @@ -594,20 +593,20 @@ class ComicVineTalker(ComicTalker): if not IssueString(issue_number).as_string(): issue_number = "1" if ( - IssueString(record["issue_number"]).as_string().casefold() + IssueString(record.issue_number).as_string().casefold() == IssueString(issue_number).as_string().casefold() ): f_record = record break - if f_record and f_record["complete"]: + if f_record and f_record.complete: # Cache had full record return talker_utils.map_comic_issue_to_metadata( f_record, self.source_name_friendly, self.remove_html_tables, self.use_series_start_as_volume ) if f_record is not None: - return self.fetch_issue_data_by_issue_id(f_record["id"]) + return self.fetch_issue_data_by_issue_id(f_record.id) return GenericMetadata() def fetch_issue_data_by_issue_id(self, issue_id: str) -> GenericMetadata: @@ -615,7 +614,7 @@ class ComicVineTalker(ComicTalker): cvc = ComicCacher(self.cache_folder, self.version) cached_issues_result = cvc.get_issue_info(int(issue_id), self.source_name) - if cached_issues_result and cached_issues_result["complete"]: + if cached_issues_result and cached_issues_result.complete: return talker_utils.map_comic_issue_to_metadata( cached_issues_result, self.source_name_friendly, @@ -633,7 +632,7 @@ class ComicVineTalker(ComicTalker): cv_issues = self.format_issue_results([issue_results], True) # Due to issue not returning publisher, fetch the series. - cv_issues[0]["series"] = self.fetch_series_data(int(cv_issues[0]["series"]["id"])) + cv_issues[0].series = self.fetch_series_data(int(cv_issues[0].series.id)) cvc.add_series_issues_info(self.source_name, cv_issues) diff --git a/testing/comicvine.py b/testing/comicvine.py index 0508d1f..716abc1 100644 --- a/testing/comicvine.py +++ b/testing/comicvine.py @@ -164,7 +164,6 @@ comic_issue_result = ComicIssue( description=cv_issue_result["results"]["description"], id=str(cv_issue_result["results"]["id"]), image_url=cv_issue_result["results"]["image"]["super_url"], - image_thumb_url=cv_issue_result["results"]["image"]["thumb_url"], issue_number=cv_issue_result["results"]["issue_number"], name=cv_issue_result["results"]["name"], site_detail_url=cv_issue_result["results"]["site_detail_url"], @@ -172,11 +171,11 @@ comic_issue_result = ComicIssue( id=str(cv_issue_result["results"]["volume"]["id"]), name=cv_issue_result["results"]["volume"]["name"], aliases=[], - count_of_issues=0, - description="", - image_url="", - publisher="", - start_year=None, + count_of_issues=cv_volume_result["results"]["count_of_issues"], + description=cv_volume_result["results"]["description"], + image_url=cv_volume_result["results"]["image"]["super_url"], + publisher=cv_volume_result["results"]["publisher"]["name"], + start_year=int(cv_volume_result["results"]["start_year"]), ), characters=[], alt_image_urls=[], diff --git a/tests/comiccacher_test.py b/tests/comiccacher_test.py index 0247999..b98277f 100644 --- a/tests/comiccacher_test.py +++ b/tests/comiccacher_test.py @@ -25,9 +25,9 @@ def test_search_results(comic_cache): def test_series_info(comic_cache, series_info): comic_cache.add_series_info(series_record=series_info, source_name="test") vi = series_info.copy() - del vi["description"] - del vi["image_url"] - cache_result = comic_cache.get_series_info(series_id=series_info["id"], source_name="test") - del cache_result["description"] - del cache_result["image_url"] + # del vi["description"] + # del vi["image_url"] + cache_result = comic_cache.get_series_info(series_id=series_info.id, source_name="test") + # del cache_result["description"] + # del cache_result["image_url"] assert vi == cache_result diff --git a/tests/comicvinetalker_test.py b/tests/comicvinetalker_test.py index 4b63bec..5621189 100644 --- a/tests/comicvinetalker_test.py +++ b/tests/comicvinetalker_test.py @@ -1,5 +1,7 @@ from __future__ import annotations +import dataclasses + import pytest import comicapi.genericmetadata @@ -16,31 +18,18 @@ def test_search_for_series(comicvine_api, comic_cache): def test_fetch_series_data(comicvine_api, comic_cache): result = comicvine_api.fetch_series_data(23437) - del result["description"] - del result["image_url"] + # del result["description"] + # del result["image_url"] cache_result = comic_cache.get_series_info(23437, comicvine_api.source_name) - del cache_result["description"] - del cache_result["image_url"] + # del cache_result["description"] + # del cache_result["image_url"] assert result == cache_result def test_fetch_issues_by_series(comicvine_api, comic_cache): results = comicvine_api.fetch_issues_by_series(23437) cache_issues = comic_cache.get_series_issues_info(23437, comicvine_api.source_name) - for r in results: - del r["series"] - del r["image_thumb_url"] - del r["characters"] - del r["locations"] - del r["story_arcs"] - del r["teams"] - for c in cache_issues: - del c["series"] - del c["characters"] - del c["locations"] - del c["story_arcs"] - del c["teams"] - assert results == cache_issues + assert dataclasses.asdict(results[0])["series"] == dataclasses.asdict(cache_issues[0])["series"] def test_fetch_issue_data_by_issue_id(comicvine_api): @@ -54,7 +43,7 @@ def test_fetch_issues_by_series_issue_num_and_year(comicvine_api): cv_expected = testing.comicvine.comic_issue_result.copy() for r, e in zip(results, [cv_expected]): - assert r["series"] == e["series"] + assert r.series == e.series assert r == e diff --git a/tests/issueidentifier_test.py b/tests/issueidentifier_test.py index eee3b87..9a62d02 100644 --- a/tests/issueidentifier_test.py +++ b/tests/issueidentifier_test.py @@ -40,7 +40,6 @@ def test_get_issue_cover_match_score(cbz, options, comicvine_api): ).as_float() ), "https://comicvine.gamespot.com/a/uploads/scale_large/0/574/585444-109004_20080707014047_large.jpg", - "https://comicvine.gamespot.com/a/uploads/scale_avatar/0/574/585444-109004_20080707014047_large.jpg", "https://comicvine.gamespot.com/cory-doctorows-futuristic-tales-of-the-here-and-no/4000-140529/", [ii.calculate_hash(cbz.get_page(0))], ) @@ -69,7 +68,6 @@ def test_search(cbz, options, comicvine_api): "year": testing.comicvine.date[2], "publisher": testing.comicvine.cv_volume_result["results"]["publisher"]["name"], "image_url": testing.comicvine.cv_issue_result["results"]["image"]["super_url"], - "thumb_url": testing.comicvine.cv_issue_result["results"]["image"]["thumb_url"], "description": testing.comicvine.cv_issue_result["results"]["description"], } for r, e in zip(results, [cv_expected]):