Add source field to cache DB.

Add source to cache db.

Rename comicvinecacher to comiccacher and update refs.

Fix comment spacing.

Move source_name to end to reduce changes.

Move source_name to end to reduce changes. Fixed.

Fix syntax.

Fix various issues with DB changes.

Move new source_name to bottom.

Remove source_name from CV_.

Revert id to volume_id
This commit is contained in:
Mizaki 2022-06-28 15:21:35 +01:00 committed by Timmy Welch
parent 3ddfacd89e
commit b72fcaa9a9
3 changed files with 83 additions and 76 deletions

View File

@ -28,10 +28,10 @@ from comictaggerlib.settings import ComicTaggerSettings
logger = logging.getLogger(__name__)
class ComicVineCacher:
class ComicCacher:
def __init__(self) -> None:
self.settings_folder = ComicTaggerSettings.get_settings_folder()
self.db_file = os.path.join(self.settings_folder, "cv_cache.db")
self.db_file = os.path.join(self.settings_folder, "comic_cache.db")
self.version_file = os.path.join(self.settings_folder, "cache_version.txt")
# verify that cache is from same version as this one
@ -72,42 +72,45 @@ class ComicVineCacher:
# create tables
with con:
cur = con.cursor()
# name,id,start_year,publisher,image,description,count_of_issues
# source_name,name,id,start_year,publisher,image,description,count_of_issues
cur.execute(
"CREATE TABLE VolumeSearchCache("
+ "search_term TEXT,"
+ "id INT,"
+ "id INT NOT NULL,"
+ "name TEXT,"
+ "start_year INT,"
+ "publisher TEXT,"
+ "count_of_issues INT,"
+ "image_url TEXT,"
+ "description TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime'))) "
+ "timestamp DATE DEFAULT (datetime('now','localtime')),"
+ "source_name TEXT NOT NULL)"
)
cur.execute(
"CREATE TABLE Volumes("
+ "id INT,"
+ "id INT NOT NULL,"
+ "name TEXT,"
+ "publisher TEXT,"
+ "count_of_issues INT,"
+ "start_year INT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')), "
+ "PRIMARY KEY (id))"
+ "source_name TEXT NOT NULL,"
+ "PRIMARY KEY (id, source_name))"
)
cur.execute(
"CREATE TABLE AltCovers("
+ "issue_id INT,"
+ "issue_id INT NOT NULL,"
+ "url_list TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')), "
+ "PRIMARY KEY (issue_id))"
+ "source_name TEXT NOT NULL,"
+ "PRIMARY KEY (issue_id, source_name))"
)
cur.execute(
"CREATE TABLE Issues("
+ "id INT,"
+ "id INT NOT NULL,"
+ "volume_id INT,"
+ "name TEXT,"
+ "issue_number TEXT,"
@ -117,10 +120,11 @@ class ComicVineCacher:
+ "site_detail_url TEXT,"
+ "description TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')), "
+ "PRIMARY KEY (id))"
+ "source_name TEXT NOT NULL,"
+ "PRIMARY KEY (id, source_name))"
)
def add_search_results(self, search_term: str, cv_search_results: list[CVVolumeResults]) -> None:
def add_search_results(self, source_name: str, search_term: str, cv_search_results: list[CVVolumeResults]) -> None:
con = lite.connect(self.db_file)
@ -146,9 +150,10 @@ class ComicVineCacher:
cur.execute(
"INSERT INTO VolumeSearchCache "
+ "(search_term, id, name, start_year, publisher, count_of_issues, image_url, description) "
+ "VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
+ "(source_name, search_term, id, name, start_year, publisher, count_of_issues, image_url, description) "
+ "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
source_name,
search_term.casefold(),
record["id"],
record["name"],
@ -160,7 +165,7 @@ class ComicVineCacher:
),
)
def get_search_results(self, search_term: str) -> list[CVVolumeResults]:
def get_search_results(self, source_name: str, search_term: str) -> list[CVVolumeResults]:
results = []
con = lite.connect(self.db_file)
@ -173,7 +178,7 @@ class ComicVineCacher:
cur.execute("DELETE FROM VolumeSearchCache WHERE timestamp < ?", [str(a_day_ago)])
# fetch
cur.execute("SELECT * FROM VolumeSearchCache WHERE search_term=?", [search_term.casefold()])
cur.execute("SELECT * FROM VolumeSearchCache WHERE search_term=? AND source_name=?", [search_term.casefold(), source_name])
rows = cur.fetchall()
# now process the results
for record in rows:
@ -186,6 +191,7 @@ class ComicVineCacher:
"description": record[7],
"publisher": {"name": record[4]},
"image": {"super_url": record[6]},
"source": record[9],
}
)
@ -193,7 +199,7 @@ class ComicVineCacher:
return results
def add_alt_covers(self, issue_id: int, url_list: list[str]) -> None:
def add_alt_covers(self, source_name: str, issue_id: int, url_list: list[str]) -> None:
con = lite.connect(self.db_file)
@ -202,13 +208,13 @@ class ComicVineCacher:
cur = con.cursor()
# remove all previous entries with this search term
cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [issue_id])
cur.execute("DELETE FROM AltCovers WHERE issue_id=? AND source_name=?", [issue_id, source_name])
url_list_str = ", ".join(url_list)
# now add in new record
cur.execute("INSERT INTO AltCovers (issue_id, url_list) VALUES(?, ?)", (issue_id, url_list_str))
cur.execute("INSERT INTO AltCovers (source_name, issue_id, url_list) VALUES(?, ?, ?)", (source_name, issue_id, url_list_str))
def get_alt_covers(self, issue_id: int) -> list[str]:
def get_alt_covers(self, source_name: str, issue_id: int) -> list[str]:
con = lite.connect(self.db_file)
with con:
@ -220,7 +226,7 @@ class ComicVineCacher:
a_month_ago = datetime.datetime.today() - datetime.timedelta(days=30)
cur.execute("DELETE FROM AltCovers WHERE timestamp < ?", [str(a_month_ago)])
cur.execute("SELECT url_list FROM AltCovers WHERE issue_id=?", [issue_id])
cur.execute("SELECT url_list FROM AltCovers WHERE issue_id=? AND source_name=?", [issue_id, source_name])
row = cur.fetchone()
if row is None:
return []
@ -234,7 +240,7 @@ class ComicVineCacher:
url_list.append(str(item).strip())
return url_list
def add_volume_info(self, cv_volume_record: CVVolumeResults) -> None:
def add_volume_info(self, source_name: str, cv_volume_record: CVVolumeResults) -> None:
con = lite.connect(self.db_file)
@ -250,15 +256,17 @@ class ComicVineCacher:
pub_name = cv_volume_record["publisher"]["name"]
data = {
"id": cv_volume_record["id"],
"source_name": source_name,
"name": cv_volume_record["name"],
"publisher": pub_name,
"count_of_issues": cv_volume_record["count_of_issues"],
"start_year": cv_volume_record["start_year"],
"timestamp": timestamp,
}
self.upsert(cur, "volumes", "id", cv_volume_record["id"], data)
self.upsert(cur, "volumes", data)
def add_volume_issues_info(self, volume_id: int, cv_volume_issues: list[CVIssuesResults]) -> None:
def add_volume_issues_info(self, source_name: str, volume_id: int, cv_volume_issues: list[CVIssuesResults]) -> None:
con = lite.connect(self.db_file)
@ -271,7 +279,9 @@ class ComicVineCacher:
for issue in cv_volume_issues:
data = {
"id": issue["id"],
"volume_id": volume_id,
"source_name": source_name,
"name": issue["name"],
"issue_number": issue["issue_number"],
"site_detail_url": issue["site_detail_url"],
@ -281,9 +291,9 @@ class ComicVineCacher:
"description": issue["description"],
"timestamp": timestamp,
}
self.upsert(cur, "issues", "id", issue["id"], data)
self.upsert(cur, "issues", data)
def get_volume_info(self, volume_id: int) -> CVVolumeResults | None:
def get_volume_info(self, volume_id: int, source_name: str) -> CVVolumeResults | None:
result: CVVolumeResults | None = None
@ -297,7 +307,7 @@ class ComicVineCacher:
cur.execute("DELETE FROM Volumes WHERE timestamp < ?", [str(a_week_ago)])
# fetch
cur.execute("SELECT id,name,publisher,count_of_issues,start_year FROM Volumes WHERE id = ?", [volume_id])
cur.execute("SELECT source_name,id,name,publisher,count_of_issues,start_year FROM Volumes WHERE id=? AND source_name=?", [volume_id, source_name])
row = cur.fetchone()
@ -307,17 +317,18 @@ class ComicVineCacher:
# since ID is primary key, there is only one row
result = CVVolumeResults(
{
"id": row[0],
"name": row[1],
"count_of_issues": row[3],
"start_year": row[4],
"publisher": {"name": row[2]},
"source_name": row[0],
"id": row[1],
"name": row[2],
"count_of_issues": row[4],
"start_year": row[5],
"publisher": {"name": row[3]},
}
)
return result
def get_volume_issues_info(self, volume_id: int) -> list[CVIssuesResults]:
def get_volume_issues_info(self, volume_id: int, source_name: str) -> list[CVIssuesResults]:
con = lite.connect(self.db_file)
with con:
@ -334,10 +345,10 @@ class ComicVineCacher:
cur.execute(
(
"SELECT id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description"
" FROM Issues WHERE volume_id = ?"
"SELECT source_name,id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description"
" FROM Issues WHERE volume_id=? AND source_name=?"
),
[volume_id],
[volume_id, source_name]
)
rows = cur.fetchall()
@ -345,13 +356,14 @@ class ComicVineCacher:
for row in rows:
record = CVIssuesResults(
{
"id": row[0],
"name": row[1],
"issue_number": row[2],
"site_detail_url": row[3],
"cover_date": row[4],
"image": {"super_url": row[5], "thumb_url": row[6]},
"description": row[7],
"id": row[1],
"name": row[2],
"issue_number": row[3],
"site_detail_url": row[4],
"cover_date": row[5],
"image": {"super_url": row[6], "thumb_url": row[7]},
"description": row[8],
"source_name": row[0],
}
)
@ -371,22 +383,23 @@ class ComicVineCacher:
timestamp = datetime.datetime.now()
data = {
"id": issue_id,
"super_url": image_url,
"thumb_url": thumb_image_url,
"cover_date": cover_date,
"site_detail_url": site_detail_url,
"timestamp": timestamp,
}
self.upsert(cur, "issues", "id", issue_id, data)
self.upsert(cur, "issues", data)
def get_issue_select_details(self, issue_id: int) -> SelectDetails:
def get_issue_select_details(self, issue_id: int, source_name: str) -> SelectDetails:
con = lite.connect(self.db_file)
with con:
cur = con.cursor()
con.text_factory = str
cur.execute("SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=?", [issue_id])
cur.execute("SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=? AND source_name=?", [issue_id, source_name])
row = cur.fetchone()
details = SelectDetails(
@ -405,11 +418,10 @@ class ComicVineCacher:
return details
def upsert(self, cur: lite.Cursor, tablename: str, pkname: str, pkval: Any, data: dict[str, Any]) -> None:
def upsert(self, cur: lite.Cursor, tablename: str, data: dict[str, Any]) -> None:
"""This does an insert if the given PK doesn't exist, and an
update it if does
TODO: look into checking if UPDATE is needed
TODO: should the cursor be created here, and not up the stack?
"""
@ -432,13 +444,5 @@ class ComicVineCacher:
ins_slots += "?"
set_slots += key + " = ?"
keys += ", " + pkname
vals.append(pkval)
ins_slots += ", ?"
condition = pkname + " = ?"
sql_ins = f"INSERT OR IGNORE INTO {tablename} ({keys}) VALUES ({ins_slots})"
sql_ins = f"INSERT OR REPLACE INTO {tablename} ({keys}) VALUES ({ins_slots})"
cur.execute(sql_ins, vals)
sql_upd = f"UPDATE {tablename} SET {set_slots} WHERE {condition}"
cur.execute(sql_upd, vals)

View File

@ -29,7 +29,7 @@ from comicapi import utils
from comicapi.genericmetadata import GenericMetadata
from comicapi.issuestring import IssueString
from comictaggerlib import ctversion
from comictaggerlib.comicvinecacher import ComicVineCacher
from comictaggerlib.comiccacher import ComicCacher
from comictaggerlib.resulttypes import CVIssueDetailResults, CVIssuesResults, CVResult, CVVolumeResults, SelectDetails
from comictaggerlib.settings import ComicTaggerSettings
@ -92,6 +92,9 @@ class ComicVineTalker:
return "Comic Vine rate limit exceeded. Please wait a bit."
def __init__(self) -> None:
# Identity name for the information source
self.source_name = 'comicvine'
self.wait_for_rate_limit = False
# key that is registered to comictagger
@ -212,9 +215,9 @@ class ComicVineTalker:
# Before we search online, look in our cache, since we might have done this same search recently
# For literal searches always retrieve from online
cvc = ComicVineCacher()
cvc = ComicCacher()
if not refresh_cache and not literal:
cached_search_results = cvc.get_search_results(series_name)
cached_search_results = cvc.get_search_results(self.source_name, series_name)
if len(cached_search_results) > 0:
return cached_search_results
@ -307,15 +310,15 @@ class ComicVineTalker:
# Cache these search results, even if it's literal we cache the results
# The most it will cause is extra processing time
cvc.add_search_results(series_name, search_results)
cvc.add_search_results(self.source_name, series_name, search_results)
return search_results
def fetch_volume_data(self, series_id: int) -> CVVolumeResults:
# before we search online, look in our cache, since we might already have this info
cvc = ComicVineCacher()
cached_volume_result = cvc.get_volume_info(series_id)
cvc = ComicCacher()
cached_volume_result = cvc.get_volume_info(series_id, self.source_name)
if cached_volume_result is not None:
return cached_volume_result
@ -332,14 +335,14 @@ class ComicVineTalker:
volume_results = cast(CVVolumeResults, cv_response["results"])
if volume_results:
cvc.add_volume_info(volume_results)
cvc.add_volume_info(self.source_name, volume_results)
return volume_results
def fetch_issues_by_volume(self, series_id: int) -> list[CVIssuesResults]:
# before we search online, look in our cache, since we might already have this info
cvc = ComicVineCacher()
cached_volume_issues_result = cvc.get_volume_issues_info(series_id)
cvc = ComicCacher()
cached_volume_issues_result = cvc.get_volume_issues_info(series_id, self.source_name)
if cached_volume_issues_result:
return cached_volume_issues_result
@ -373,7 +376,7 @@ class ComicVineTalker:
self.repair_urls(volume_issues_result)
cvc.add_volume_issues_info(series_id, volume_issues_result)
cvc.add_volume_issues_info(self.source_name, series_id, volume_issues_result)
return volume_issues_result
@ -667,13 +670,13 @@ class ComicVineTalker:
def fetch_cached_issue_select_details(self, issue_id: int) -> SelectDetails:
# before we search online, look in our cache, since we might already have this info
cvc = ComicVineCacher()
return cvc.get_issue_select_details(issue_id)
cvc = ComicCacher()
return cvc.get_issue_select_details(issue_id, self.source_name)
def cache_issue_select_details(
self, issue_id: int, image_url: str, thumb_url: str, cover_date: str, page_url: str
) -> None:
cvc = ComicVineCacher()
cvc = ComicCacher()
cvc.add_issue_select_details(issue_id, image_url, thumb_url, cover_date, page_url)
def fetch_alternate_cover_urls(self, issue_id: int, issue_page_url: str) -> list[str]:
@ -717,14 +720,14 @@ class ComicVineTalker:
def fetch_cached_alternate_cover_urls(self, issue_id: int) -> list[str]:
# before we search online, look in our cache, since we might already have this info
cvc = ComicVineCacher()
url_list = cvc.get_alt_covers(issue_id)
cvc = ComicCacher()
url_list = cvc.get_alt_covers(self.source_name, issue_id)
return url_list
def cache_alternate_cover_urls(self, issue_id: int, url_list: list[str]) -> None:
cvc = ComicVineCacher()
cvc.add_alt_covers(issue_id, url_list)
cvc = ComicCacher()
cvc.add_alt_covers(self.source_name, issue_id, url_list)
def async_fetch_issue_cover_urls(self, issue_id: int) -> None:

View File

@ -24,7 +24,7 @@ from PyQt5 import QtCore, QtGui, QtWidgets, uic
from comicapi import utils
from comicapi.genericmetadata import md_test
from comictaggerlib.comicvinecacher import ComicVineCacher
from comictaggerlib.comiccacher import ComicCacher
from comictaggerlib.comicvinetalker import ComicVineTalker
from comictaggerlib.filerenamer import FileRenamer
from comictaggerlib.imagefetcher import ImageFetcher
@ -334,7 +334,7 @@ class SettingsWindow(QtWidgets.QDialog):
def clear_cache(self) -> None:
ImageFetcher().clear_cache()
ComicVineCacher().clear_cache()
ComicCacher().clear_cache()
QtWidgets.QMessageBox.information(self, self.name, "Cache has been cleared.")
def test_api_key(self) -> None: