Change ComicVolume, ComicIssue to image_url and image_thumb_url. Add/change search/volume DB layout to remove duplication of data. Fixup some test.

This commit is contained in:
Mizaki 2022-10-23 22:40:15 +01:00
parent c9cd58fecb
commit d23258f359
11 changed files with 82 additions and 148 deletions

View File

@ -430,8 +430,8 @@ class IssueIdentifier:
hash_list.append(narrow_cover_hash)
try:
image_url = series["image"]
thumb_url = series["image"]
image_url = series["image_url"]
thumb_url = series["image_url"]
page_url = ""
score_item = self.get_issue_cover_match_score(
@ -525,8 +525,8 @@ class IssueIdentifier:
hash_list.append(narrow_cover_hash)
try:
image_url = issue["image"]
thumb_url = issue["image_thumb"]
image_url = issue["image_url"]
thumb_url = issue["image_thumb_url"]
page_url = issue["site_detail_url"]
score_item = self.get_issue_cover_match_score(

View File

@ -180,7 +180,7 @@ class IssueSelectionWindow(QtWidgets.QDialog):
for record in self.issue_list:
if record["id"] == self.issue_id:
self.issue_number = record["issue_number"]
self.coverWidget.set_issue_details(self.issue_id, record["site_detail_url"], record["image"])
self.coverWidget.set_issue_details(self.issue_id, record["site_detail_url"], record["image_url"])
if record["description"] is None:
self.teDescription.setText("")
else:

View File

@ -149,7 +149,9 @@ class MatchSelectionWindow(QtWidgets.QDialog):
if prev is not None and prev.row() == curr.row():
return
self.altCoverWidget.set_issue_id(self.current_match()["page_url"], self.current_match()["image_url"])
self.altCoverWidget.set_issue_details(
self.current_match()["issue_id"], self.current_match()["page_url"], self.current_match()["image_url"]
)
if self.current_match()["description"] is None:
self.teDescription.setText("")
else:

View File

@ -516,5 +516,5 @@ class VolumeSelectionWindow(QtWidgets.QDialog):
self.teDetails.setText("")
else:
self.teDetails.setText(record["description"])
self.imageWidget.set_url(record["image"])
self.imageWidget.set_url(record["image_url"])
break

View File

@ -77,15 +77,8 @@ class ComicCacher:
"CREATE TABLE VolumeSearchCache("
+ "search_term TEXT,"
+ "id INT NOT NULL,"
+ "name TEXT,"
+ "start_year INT,"
+ "publisher TEXT,"
+ "count_of_issues INT,"
+ "image_url TEXT,"
+ "description TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')),"
+ "source_name TEXT NOT NULL,"
+ "aliases TEXT)" # Newline separated
+ "source_name TEXT NOT NULL)"
)
cur.execute(
@ -95,9 +88,11 @@ class ComicCacher:
+ "publisher TEXT,"
+ "count_of_issues INT,"
+ "start_year INT,"
+ "image_url TEXT,"
+ "aliases TEXT," # Newline separated
+ "description TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')), "
+ "source_name TEXT NOT NULL,"
+ "aliases TEXT," # Newline separated
+ "PRIMARY KEY (id, source_name))"
)
@ -143,65 +138,48 @@ class ComicCacher:
# now add in new results
for record in ct_search_results:
cur.execute(
"INSERT INTO VolumeSearchCache "
+ "(source_name, search_term, id, name, start_year, publisher, count_of_issues, image_url, description, aliases) "
+ "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
"INSERT INTO VolumeSearchCache " + "(source_name, search_term, id) " + "VALUES(?, ?, ?)",
(
source_name,
search_term.casefold(),
record["id"],
record["name"],
record["start_year"],
record["publisher"],
record["count_of_issues"],
record["image"],
record["description"],
record["aliases"],
),
)
def get_search_results(
self, source_name: str, search_term: str, volume_id: int = 0, purge: bool = True
) -> list[ComicVolume]:
data = {
"source_name": source_name,
"timestamp": datetime.datetime.now(),
}
data.update(record)
self.upsert(cur, "volumes", data)
def get_search_results(self, source_name: str, search_term: str) -> list[ComicVolume]:
results = []
con = lite.connect(self.db_file)
with con:
con.text_factory = str
cur = con.cursor()
# DELETE requires an unlocked DB. get_volume_issues_info call will mean it is locked
if purge:
# purge stale search results
a_day_ago = datetime.datetime.today() - datetime.timedelta(days=1)
cur.execute("DELETE FROM VolumeSearchCache WHERE timestamp < ?", [str(a_day_ago)])
if not volume_id:
# fetch by name
cur.execute(
"SELECT * FROM VolumeSearchCache WHERE search_term=? AND source_name=?",
[search_term.casefold(), source_name],
)
else:
# fetch by id (for get_volume_issues_info)
cur.execute(
"SELECT * FROM VolumeSearchCache WHERE id=? AND source_name=?",
[volume_id, source_name],
)
cur.execute(
"SELECT * FROM VolumeSearchCache INNER JOIN Volumes on"
" VolumeSearchCache.id=Volumes.id AND VolumeSearchCache.source_name=Volumes.source_name"
" WHERE search_term=? AND VolumeSearchCache.source_name=?",
[search_term.casefold(), source_name],
)
rows = cur.fetchall()
# now process the results
for record in rows:
result = ComicVolume(
id=record[1],
name=record[2],
start_year=record[3],
count_of_issues=record[5],
description=record[7],
publisher=record[4],
image=record[6],
id=record[4],
name=record[5],
publisher=record[6],
count_of_issues=record[7],
start_year=record[8],
image_url=record[9],
aliases=record[10],
description=record[11],
)
results.append(result)
@ -289,8 +267,8 @@ class ComicCacher:
"issue_number": issue["issue_number"],
"site_detail_url": issue["site_detail_url"],
"cover_date": issue["cover_date"],
"image_url": issue["image"],
"thumb_url": issue["image_thumb"],
"image_url": issue["image_url"],
"thumb_url": issue["image_thumb_url"],
"description": issue["description"],
"timestamp": timestamp,
"aliases": issue["aliases"],
@ -311,8 +289,7 @@ class ComicCacher:
# fetch
cur.execute(
"SELECT source_name,id,name,publisher,count_of_issues,start_year,aliases FROM Volumes"
" WHERE id=? AND source_name=?",
"SELECT * FROM Volumes" " WHERE id=? AND source_name=?",
[volume_id, source_name],
)
@ -323,17 +300,21 @@ class ComicCacher:
# since ID is primary key, there is only one row
result = ComicVolume(
id=row[1],
name=row[2],
count_of_issues=row[4],
start_year=row[5],
publisher=row[3],
id=row[0],
name=row[1],
publisher=row[2],
count_of_issues=row[3],
start_year=row[4],
image_url=row[5],
aliases=row[6],
description=row[7],
)
return result
def get_volume_issues_info(self, volume_id: int, source_name: str) -> list[ComicIssue]:
# get_volume_info should only fail if someone is doing something weird
volume = self.get_volume_info(volume_id, source_name) or ComicVolume(id=volume_id, name="")
con = lite.connect(self.db_file)
with con:
cur = con.cursor()
@ -358,33 +339,15 @@ class ComicCacher:
# now process the results
for row in rows:
volume_info = self.get_search_results(source_name, "", volume_id, False)
# Cover if it comes back empty
if not volume_info:
volume_info = [
ComicVolume(
id=volume_id,
name="",
)
]
record = ComicIssue(
id=row[1],
name=row[2],
issue_number=row[3],
site_detail_url=row[4],
cover_date=row[5],
image=row[6],
image_url=row[6],
description=row[8],
volume=ComicVolume(
aliases=volume_info[0].get("aliases", ""),
count_of_issues=volume_info[0].get("count_of_issues", 0),
id=volume_id,
name=volume_info[0].get("name", ""),
description=volume_info[0].get("description", ""),
image=volume_info[0].get("image", ""),
publisher=volume_info[0].get("publisher", ""),
start_year=volume_info[0].get("start_year", 0),
),
volume=volume,
aliases=row[9],
)

View File

@ -8,7 +8,7 @@ class ComicVolume(TypedDict, total=False):
count_of_issues: int
description: str
id: Required[int]
image: str
image_url: str
name: Required[str]
publisher: str
start_year: int
@ -19,8 +19,8 @@ class ComicIssue(TypedDict, total=False):
cover_date: str
description: str
id: int
image: str
image_thumb: str
image_url: str
image_thumb_url: str
issue_number: Required[str]
name: Required[str]
site_detail_url: str

View File

@ -392,12 +392,12 @@ class ComicVineTalker(TalkerBase):
pub_name = record["publisher"]["name"]
if record.get("image") is None:
image = ""
image_url = ""
else:
if record["image"].get("super_url") is None:
image = ""
image_url = ""
else:
image = record["image"]["super_url"]
image_url = record["image"]["super_url"]
if record.get("start_year") is None:
start_year = 0
@ -410,7 +410,7 @@ class ComicVineTalker(TalkerBase):
count_of_issues=record.get("count_of_issues", 0),
description=record.get("description", ""),
id=record["id"],
image=image,
image_url=image_url,
name=record["name"],
publisher=pub_name,
start_year=start_year,
@ -424,11 +424,11 @@ class ComicVineTalker(TalkerBase):
for record in issue_results:
# Extract image super and thumb to name only
if record.get("image") is None:
image = ""
image_thumb = ""
image_url = ""
image_thumb_url = ""
else:
image = record["image"].get("super_url", "")
image_thumb = record["image"].get("thumb_url", "")
image_url = record["image"].get("super_url", "")
image_thumb_url = record["image"].get("thumb_url", "")
formatted_results.append(
ComicIssue(
@ -436,8 +436,8 @@ class ComicVineTalker(TalkerBase):
cover_date=record.get("cover_date", ""),
description=record.get("description", ""),
id=record["id"],
image=image,
image_thumb=image_thumb,
image_url=image_url,
image_thumb_url=image_thumb_url,
issue_number=record["issue_number"],
name=record["name"],
site_detail_url=record.get("site_detail_url", ""),
@ -1033,7 +1033,7 @@ class ComicVineTalker(TalkerBase):
ComicTalker.alt_url_list_fetch_complete(alt_cover_url_list)
def repair_urls(self, issue_list: list[CVIssuesResults] | list[ComicIssue] | list[CVIssueDetailResults]) -> None:
def repair_urls(self, issue_list: list[CVIssuesResults] | list[CVIssueDetailResults]) -> None:
# make sure there are URLs for the image fields
for issue in issue_list:
if issue["image"] is None:

View File

@ -9,7 +9,7 @@ search_results = [
count_of_issues=1,
description="this is a description",
id=1,
image="https://test.org/image/1",
image_url="https://test.org/image/1",
name="test",
publisher="test",
start_year=0,
@ -18,8 +18,8 @@ search_results = [
comictalker.resulttypes.ComicVolume(
count_of_issues=1,
description="this is a description",
id=1,
image="https://test.org/image/2",
id=2,
image_url="https://test.org/image/2",
name="test 2",
publisher="test",
start_year=0,

View File

@ -162,8 +162,8 @@ comic_issue_result: dict[str, Any] = {
"cover_date": cv_issue_result["results"]["cover_date"],
"description": cv_issue_result["results"]["description"],
"id": cv_issue_result["results"]["id"],
"image": cv_issue_result["results"]["image"]["super_url"],
"image_thumb": cv_issue_result["results"]["image"]["thumb_url"],
"image_url": cv_issue_result["results"]["image"]["super_url"],
"image_thumb_url": cv_issue_result["results"]["image"]["thumb_url"],
"issue_number": cv_issue_result["results"]["issue_number"],
"name": cv_issue_result["results"]["name"],
"site_detail_url": cv_issue_result["results"]["site_detail_url"],
@ -224,7 +224,7 @@ cv_md = comicapi.genericmetadata.GenericMetadata(
rights=None,
identifier=None,
last_mark=None,
cover_image=None,
cover_image=cv_issue_result["results"]["image"]["super_url"],
)

View File

@ -31,13 +31,8 @@ def test_volume_info(comic_cache, volume_info):
comic_cache.add_volume_info(volume_record=volume_info, source_name="test")
vi = volume_info.copy()
del vi["description"]
del vi["image"]
assert vi == comic_cache.get_volume_info(volume_id=volume_info["id"], source_name="test")
@pytest.mark.parametrize("details", select_details)
def test_issue_select_details(comic_cache, details):
comic_cache.add_issue_select_details(**details, source_name="test")
det = details.copy()
del det["issue_id"]
assert det == comic_cache.get_issue_select_details(details["issue_id"], "test")
del vi["image_url"]
cache_result = comic_cache.get_volume_info(volume_id=volume_info["id"], source_name="test")
del cache_result["description"]
del cache_result["image_url"]
assert vi == cache_result

View File

@ -19,8 +19,11 @@ def test_fetch_volume_data(comicvine_api, comic_cache):
ct = comictalker.talkers.comicvine.ComicVineTalker()
result = ct.fetch_partial_volume_data(23437)
del result["description"]
del result["image"]
assert result == comic_cache.get_volume_info(23437, ct.source_name)
del result["image_url"]
cache_result = comic_cache.get_volume_info(23437, ct.source_name)
del cache_result["description"]
del cache_result["image_url"]
assert result == cache_result
def test_fetch_issues_by_volume(comicvine_api, comic_cache):
@ -29,7 +32,7 @@ def test_fetch_issues_by_volume(comicvine_api, comic_cache):
cache_issues = comic_cache.get_volume_issues_info(23437, ct.source_name)
for r in results:
del r["volume"]
del r["image_thumb"]
del r["image_thumb_url"]
for c in cache_issues:
del c["volume"]
assert results == cache_issues
@ -50,7 +53,8 @@ def test_fetch_issues_by_volume_issue_num_and_year(comicvine_api):
{"params": {"field_list": "id,volume,issue_number,name,image,cover_date,site_detail_url,description,aliases"}},
)
for r, e in zip(results, [cv_expected]):
del r["image_thumb"]
del r["image_thumb_url"]
del r["image_url"]
assert r == e
@ -66,33 +70,3 @@ def test_fetch_issue_data(comicvine_api, settings, mock_now, mock_version, volum
ct = comictalker.talkers.comicvine.ComicVineTalker()
results = ct.fetch_issue_data(volume_id, issue_number)
assert results == expected
def test_fetch_issue_select_details(comicvine_api, mock_now, mock_version):
ct = comictalker.talkers.comicvine.ComicVineTalker()
result = ct.fetch_issue_select_details(140529)
expected = {
"cover_date": testing.comicvine.cv_issue_result["results"]["cover_date"],
"site_detail_url": testing.comicvine.cv_issue_result["results"]["site_detail_url"],
"image_url": testing.comicvine.cv_issue_result["results"]["image"]["super_url"],
"thumb_image_url": testing.comicvine.cv_issue_result["results"]["image"]["thumb_url"],
}
assert result == expected
@pytest.mark.parametrize("details", select_details)
def test_issue_select_details(comic_cache, details):
expected = details.copy()
del expected["issue_id"]
ct = comictalker.talkers.comicvine.ComicVineTalker()
ct.cache_issue_select_details(
issue_id=details["issue_id"],
image_url=details["image_url"],
thumb_url=details["thumb_image_url"],
cover_date=details["cover_date"],
page_url=details["site_detail_url"],
)
result = comic_cache.get_issue_select_details(details["issue_id"], ct.source_name)
assert result == expected