Rename super_url to image_url in comiccacher. Merge fetch_issue_data_by_issue_id into fetch_comic_data. Fill comic volume info in comiccacher:get_volume_issues_info

This commit is contained in:
Mizaki 2022-10-19 19:33:51 +01:00
parent 8a8dea8aa4
commit e1ab72ec2a
6 changed files with 51 additions and 40 deletions

View File

@ -382,7 +382,7 @@ def process_file_cli(
if opts.issue_id is not None:
# we were given the actual issue ID to search with
try:
ct_md = talker_api.fetch_issue_data_by_issue_id(opts.issue_id)
ct_md = talker_api.fetch_comic_data(0, "", opts.issue_id)
except TalkerError as e:
logger.exception(f"Error retrieving issue details. Save aborted.\n{e}")
match_results.fetch_data_failures.append(str(ca.path.absolute()))

View File

@ -117,7 +117,7 @@ class ComicCacher:
+ "volume_id INT,"
+ "name TEXT,"
+ "issue_number TEXT,"
+ "super_url TEXT,"
+ "image_url TEXT,"
+ "thumb_url TEXT,"
+ "cover_date TEXT,"
+ "site_detail_url TEXT,"
@ -162,22 +162,34 @@ class ComicCacher:
),
)
def get_search_results(self, source_name: str, search_term: str) -> list[ComicVolume]:
def get_search_results(
self, source_name: str, search_term: str, volume_id: int = 0, purge: bool = True
) -> list[ComicVolume]:
results = []
con = lite.connect(self.db_file)
with con:
con.text_factory = str
cur = con.cursor()
# purge stale search results
a_day_ago = datetime.datetime.today() - datetime.timedelta(days=1)
cur.execute("DELETE FROM VolumeSearchCache WHERE timestamp < ?", [str(a_day_ago)])
# DELETE requires an unlocked DB. get_volume_issues_info call will mean it is locked
if purge:
# purge stale search results
a_day_ago = datetime.datetime.today() - datetime.timedelta(days=1)
cur.execute("DELETE FROM VolumeSearchCache WHERE timestamp < ?", [str(a_day_ago)])
if not volume_id:
# fetch by name
cur.execute(
"SELECT * FROM VolumeSearchCache WHERE search_term=? AND source_name=?",
[search_term.casefold(), source_name],
)
else:
# fetch by id (for get_volume_issues_info)
cur.execute(
"SELECT * FROM VolumeSearchCache WHERE id=? AND source_name=?",
[volume_id, source_name],
)
# fetch
cur.execute(
"SELECT * FROM VolumeSearchCache WHERE search_term=? AND source_name=?",
[search_term.casefold(), source_name],
)
rows = cur.fetchall()
# now process the results
for record in rows:
@ -277,7 +289,7 @@ class ComicCacher:
"issue_number": issue["issue_number"],
"site_detail_url": issue["site_detail_url"],
"cover_date": issue["cover_date"],
"super_url": issue["image"],
"image_url": issue["image"],
"thumb_url": issue["image_thumb"],
"description": issue["description"],
"timestamp": timestamp,
@ -337,7 +349,7 @@ class ComicCacher:
cur.execute(
(
"SELECT source_name,id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description,aliases"
"SELECT source_name,id,name,issue_number,site_detail_url,cover_date,image_url,thumb_url,description,aliases"
" FROM Issues WHERE volume_id=? AND source_name=?"
),
[volume_id, source_name],
@ -346,6 +358,7 @@ class ComicCacher:
# now process the results
for row in rows:
volume_info = self.get_search_results(source_name, "", volume_id, False)
record = ComicIssue(
id=row[1],
name=row[2],
@ -354,7 +367,16 @@ class ComicCacher:
cover_date=row[5],
image=row[6],
description=row[8],
volume={"id": volume_id, "name": ""}, # Name isn't recorded
volume=ComicVolume(
aliases=volume_info[0]["aliases"],
count_of_issues=volume_info[0]["count_of_issues"],
id=volume_id,
name=volume_info[0]["name"],
description=volume_info[0]["description"],
image=volume_info[0]["image"],
publisher=volume_info[0]["publisher"],
start_year=volume_info[0]["start_year"],
),
aliases=row[9],
)
@ -382,7 +404,7 @@ class ComicCacher:
data = {
"id": issue_id,
"source_name": source_name,
"super_url": image_url,
"image_url": image_url,
"thumb_url": thumb_image_url,
"cover_date": cover_date,
"site_detail_url": site_detail_url,
@ -398,7 +420,7 @@ class ComicCacher:
con.text_factory = str
cur.execute(
"SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=? AND source_name=?",
"SELECT image_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=? AND source_name=?",
[issue_id, source_name],
)
row = cur.fetchone()

View File

@ -92,10 +92,14 @@ class ComicTalker:
"The source has not implemented: 'search_for_series'",
)
# Get issue or volume information
def fetch_comic_data(self, series_id: int, issue_number: str = "") -> GenericMetadata:
# Get issue or volume information. issue_id is used by CLI
def fetch_comic_data(self, series_id: int = 0, issue_number: str = "", issue_id: int = 0) -> GenericMetadata:
"""This function is expected to handle a few possibilities:
1. Only series_id. Retrieve the SERIES/VOLUME information only.
2. series_id and issue_number. Retrieve the ISSUE information.
3. Only issue_id. Used solely by the CLI to retrieve the ISSUE information."""
try:
comic_data = self.talker.fetch_comic_data(series_id, issue_number)
comic_data = self.talker.fetch_comic_data(series_id, issue_number, issue_id)
return comic_data
except NotImplementedError:
logger.warning(f"{self.talker.source_details.name} has not implemented: 'fetch_comic_data'")
@ -118,19 +122,6 @@ class ComicTalker:
"The source has not implemented: 'fetch_issues_by_volume'",
)
# For CLI
def fetch_issue_data_by_issue_id(self, issue_id: int) -> GenericMetadata:
try:
issue_result = self.talker.fetch_issue_data_by_issue_id(issue_id)
return issue_result
except NotImplementedError:
logger.warning(f"{self.talker.source_details.name} has not implemented: 'fetch_issue_data_by_issue_id'")
raise TalkerError(
self.talker.source_details.name,
4,
"The source has not implemented: 'fetch_issue_data_by_issue_id'",
)
# For issueidentifer
def fetch_alternate_cover_urls(self, issue_id: int) -> list[str]:
try:

View File

@ -66,7 +66,7 @@ class SourceSettingsOptions(TypedDict):
text: Required[str] # Display text i.e "Remove HTML tables"
help_text: str # Tooltip text i.e "Enabling this will remove HTML tables from the description."
hidden: Required[bool] # To hide an option from the settings menu.
type: Required[type[bool] | type[int] | type[str]]
type: Required[type[bool] | type[int] | type[str] | type[float]]
value: Any
@ -195,10 +195,6 @@ class TalkerBase:
def fetch_comic_data(self, series_id: int, issue_number: str = "") -> GenericMetadata:
raise NotImplementedError
# For CLI
def fetch_issue_data_by_issue_id(self, issue_id: int) -> GenericMetadata:
raise NotImplementedError
def fetch_alternate_cover_urls(self, issue_id: int) -> list[str]:
raise NotImplementedError

View File

@ -559,11 +559,13 @@ class ComicVineTalker(TalkerBase):
return self.map_cv_volume_data_to_metadata(volume_results)
# Get issue or volume information
def fetch_comic_data(self, series_id: int, issue_number: str = "") -> GenericMetadata:
def fetch_comic_data(self, series_id: int, issue_number: str = "", issue_id: int = 0) -> GenericMetadata:
comic_data = GenericMetadata()
# TODO remove has_issues check? Enables testing. Possibly add source option to only get volume info?
if self.source_details.static_options.has_issues and issue_number:
if self.source_details.static_options.has_issues and issue_number and series_id:
comic_data = self.fetch_issue_data(series_id, issue_number)
elif issue_id:
comic_data = self.fetch_issue_data_by_issue_id(issue_id)
else:
# Only retrieve the volume data
comic_data = self.fetch_volume_data(series_id)

View File

@ -37,7 +37,7 @@ def test_fetch_issues_by_volume(comicvine_api, comic_cache):
def test_fetch_issue_data_by_issue_id(comicvine_api, settings, mock_now, mock_version):
ct = comictalker.talkers.comicvine.ComicVineTalker()
result = ct.fetch_issue_data_by_issue_id(140529)
result = ct.fetch_comic_data(0, "", 140529)
assert result == testing.comicvine.cv_md