Merge branch 'mizaki-additional_comic_fields' into develop

This commit is contained in:
Timmy Welch 2023-06-03 10:37:44 -07:00
commit a912c7392b
7 changed files with 166 additions and 50 deletions

View File

@ -239,6 +239,7 @@ def get_language_from_iso(iso: str | None) -> str | None:
def get_language_iso(string: str | None) -> str | None:
if string is None:
return None
# Return current string if all else fails
lang = string.casefold()
try:
@ -248,6 +249,10 @@ def get_language_iso(string: str | None) -> str | None:
return lang
def get_country_from_iso(iso: str | None) -> str | None:
return countries[iso]
def get_publisher(publisher: str) -> tuple[str, str]:
imprint = ""

View File

@ -2,7 +2,7 @@
#
# Copyright 2012-2014 ComicTagger Authors
#
# Licensed under the Apache License, Version 2.0 (the "License;
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
@ -88,10 +88,14 @@ class ComicCacher:
+ "name TEXT,"
+ "publisher TEXT,"
+ "count_of_issues INT,"
+ "count_of_volumes INT,"
+ "volume TEXT,"
+ "start_year INT,"
+ "image_url TEXT,"
+ "aliases TEXT," # Newline separated
+ "description TEXT,"
+ "genres TEXT," # Newline separated. For filtering etc.
+ "format TEXT,"
+ "timestamp DATE DEFAULT (datetime('now','localtime')), "
+ "source_name TEXT NOT NULL,"
+ "PRIMARY KEY (id, source_name))"
@ -117,6 +121,13 @@ class ComicCacher:
+ "credits TEXT," # JSON: "{"name": "Bob Shakespeare", "role": "Writer"}"
+ "teams TEXT," # Newline separated
+ "story_arcs TEXT," # Newline separated
+ "genres TEXT," # Newline separated
+ "tags TEXT," # Newline separated
+ "critical_rating FLOAT,"
+ "manga TEXT," # Yes/YesAndRightToLeft/No
+ "maturity_rating TEXT,"
+ "language TEXT,"
+ "country TEXT,"
+ "complete BOOL," # Is the data complete? Includes characters, locations, credits.
+ "PRIMARY KEY (id, source_name))"
)
@ -147,9 +158,13 @@ class ComicCacher:
"name": record.name,
"publisher": record.publisher,
"count_of_issues": record.count_of_issues,
"count_of_volumes": record.count_of_volumes,
"volume": record.volume,
"start_year": record.start_year,
"image_url": record.image_url,
"description": record.description,
"genres": "\n".join(record.genres),
"format": record.format,
"timestamp": datetime.datetime.now(),
"aliases": "\n".join(record.aliases),
}
@ -177,10 +192,14 @@ class ComicCacher:
name=record[5],
publisher=record[6],
count_of_issues=record[7],
start_year=record[8],
image_url=record[9],
aliases=record[10].strip().splitlines(),
description=record[11],
count_of_volumes=record[8],
volume=record[9],
start_year=record[10],
image_url=record[11],
aliases=record[12].strip().splitlines(),
description=record[13],
genres=record[14].strip().splitlines(),
format=record[15],
)
results.append(result)
@ -201,9 +220,13 @@ class ComicCacher:
"name": series_record.name,
"publisher": series_record.publisher,
"count_of_issues": series_record.count_of_issues,
"count_of_volumes": series_record.count_of_volumes,
"volume": series_record.volume,
"start_year": series_record.start_year,
"image_url": series_record.image_url,
"description": series_record.description,
"genres": "\n".join(series_record.genres),
"format": series_record.format,
"timestamp": timestamp,
"aliases": "\n".join(series_record.aliases),
}
@ -237,6 +260,13 @@ class ComicCacher:
"locations": "\n".join(issue.locations),
"teams": "\n".join(issue.teams),
"story_arcs": "\n".join(issue.story_arcs),
"genres": "\n".join(issue.genres),
"tags": "\n".join(issue.tags),
"critical_rating": issue.critical_rating,
"manga": issue.manga,
"maturity_rating": issue.maturity_rating,
"language": issue.language,
"country": issue.country,
"credits": json.dumps([dataclasses.asdict(x) for x in issue.credits]),
"complete": issue.complete,
}
@ -269,10 +299,14 @@ class ComicCacher:
name=row[1],
publisher=row[2],
count_of_issues=row[3],
start_year=row[4],
image_url=row[5],
aliases=row[6].strip().splitlines(),
description=row[7],
count_of_volumes=row[4],
volume=row[5],
start_year=row[6],
image_url=row[7],
aliases=row[8].strip().splitlines(),
description=row[9],
genres=row[10].strip().splitlines(),
format=row[11],
)
return result
@ -283,11 +317,15 @@ class ComicCacher:
id=series_id,
name="",
description="",
genres=[],
image_url="",
publisher="",
start_year=None,
aliases=[],
count_of_issues=None,
count_of_volumes=None,
volume=None,
format=None,
)
con = lite.connect(self.db_file)
with con:
@ -302,40 +340,41 @@ class ComicCacher:
# fetch
results: list[ComicIssue] = []
cur.execute(
(
"SELECT source_name,id,name,issue_number,site_detail_url,cover_date,image_url,thumb_url,description,aliases,alt_image_urls,characters,locations,credits,teams,story_arcs,complete"
" FROM Issues WHERE series_id=? AND source_name=?"
),
[series_id, source_name],
)
cur.execute("SELECT * FROM Issues WHERE series_id=? AND source_name=?", [series_id, source_name])
rows = cur.fetchall()
# now process the results
for row in rows:
credits = []
try:
for credit in json.loads(row[13]):
for credit in json.loads(row[15]):
credits.append(Credit(**credit))
except Exception:
logger.exception("credits failed")
record = ComicIssue(
id=row[1],
id=row[0],
name=row[2],
issue_number=row[3],
site_detail_url=row[4],
cover_date=row[5],
image_url=row[6],
site_detail_url=row[7],
cover_date=row[6],
image_url=row[4],
description=row[8],
series=series,
aliases=row[9].strip().splitlines(),
alt_image_urls=row[10].strip().splitlines(),
characters=row[11].strip().splitlines(),
locations=row[12].strip().splitlines(),
aliases=row[11].strip().splitlines(),
alt_image_urls=row[12].strip().splitlines(),
characters=row[13].strip().splitlines(),
locations=row[14].strip().splitlines(),
credits=credits,
teams=row[14].strip().splitlines(),
story_arcs=row[15].strip().splitlines(),
complete=bool(row[16]),
teams=row[16].strip().splitlines(),
story_arcs=row[17].strip().splitlines(),
genres=row[18].strip().splitlines(),
tags=row[19].strip().splitlines(),
critical_rating=row[20],
manga=row[21],
maturity_rating=row[22],
language=row[23],
country=row[24],
complete=bool(row[25]),
)
results.append(record)
@ -353,54 +392,59 @@ class ComicCacher:
a_week_ago = datetime.datetime.today() - datetime.timedelta(days=7)
cur.execute("DELETE FROM Issues WHERE timestamp < ?", [str(a_week_ago)])
cur.execute(
(
"SELECT source_name,id,name,issue_number,site_detail_url,cover_date,image_url,description,aliases,series_id,alt_image_urls,characters,locations,credits,teams,story_arcs,complete"
" FROM Issues WHERE id=? AND source_name=?"
),
[issue_id, source_name],
)
cur.execute("SELECT * FROM Issues WHERE id=? AND source_name=?", [issue_id, source_name])
row = cur.fetchone()
record = None
if row:
# get_series_info should only fail if someone is doing something weird
series = self.get_series_info(row[10], source_name, False) or ComicSeries(
id=row[10],
series = self.get_series_info(row[1], source_name, False) or ComicSeries(
id=row[1],
name="",
description="",
genres=[],
image_url="",
publisher="",
start_year=None,
aliases=[],
count_of_issues=None,
count_of_volumes=None,
volume=None,
format=None,
)
# now process the results
credits = []
try:
for credit in json.loads(row[13]):
for credit in json.loads(row[15]):
credits.append(Credit(**credit))
except Exception:
logger.exception("credits failed")
record = ComicIssue(
id=row[1],
id=row[0],
name=row[2],
issue_number=row[3],
site_detail_url=row[4],
cover_date=row[5],
image_url=row[6],
description=row[7],
site_detail_url=row[7],
cover_date=row[6],
image_url=row[4],
description=row[8],
series=series,
aliases=row[8].strip().splitlines(),
alt_image_urls=row[10].strip().splitlines(),
characters=row[11].strip().splitlines(),
locations=row[12].strip().splitlines(),
aliases=row[11].strip().splitlines(),
alt_image_urls=row[12].strip().splitlines(),
characters=row[13].strip().splitlines(),
locations=row[14].strip().splitlines(),
credits=credits,
teams=row[14].strip().splitlines(),
story_arcs=row[15].strip().splitlines(),
complete=bool(row[16]),
teams=row[16].strip().splitlines(),
story_arcs=row[17].strip().splitlines(),
genres=row[18].strip().splitlines(),
tags=row[19].strip().splitlines(),
critical_rating=row[20],
manga=row[21],
maturity_rating=row[22],
language=row[23],
country=row[24],
complete=bool(row[25]),
)
return record

View File

@ -14,12 +14,16 @@ class Credit:
class ComicSeries:
aliases: list[str]
count_of_issues: int | None
count_of_volumes: int | None
volume: str | None
description: str
id: str
image_url: str
name: str
publisher: str
start_year: int | None
genres: list[str]
format: str | None
def copy(self) -> ComicSeries:
return copy.deepcopy(self)
@ -33,7 +37,14 @@ class ComicIssue:
id: str
image_url: str
issue_number: str
critical_rating: float
maturity_rating: str
manga: str
genres: list[str]
tags: list[str]
name: str
language: str
country: str
site_detail_url: str
series: ComicSeries
alt_image_urls: list[str]

View File

@ -45,6 +45,15 @@ def map_comic_issue_to_metadata(
metadata.series = utils.xlate(issue_results.series.name)
metadata.issue = IssueString(issue_results.issue_number).as_string()
# Rely on comic talker to validate this number
metadata.issue_count = utils.xlate_int(issue_results.series.volume)
if issue_results.series.format:
metadata.format = issue_results.series.format
metadata.volume = utils.xlate_int(issue_results.series.volume)
metadata.volume_count = utils.xlate_int(issue_results.series.count_of_volumes)
if issue_results.name:
metadata.title = utils.xlate(issue_results.name)
if issue_results.image_url:
@ -81,6 +90,23 @@ def map_comic_issue_to_metadata(
metadata.locations = ", ".join(issue_results.locations)
if issue_results.story_arcs:
metadata.story_arc = ", ".join(issue_results.story_arcs)
if issue_results.genres:
metadata.genre = ", ".join(issue_results.genres)
if issue_results.tags:
metadata.tags = set(issue_results.tags)
if issue_results.manga:
metadata.manga = issue_results.manga
if issue_results.critical_rating:
metadata.critical_rating = utils.xlate_float(issue_results.critical_rating)
if issue_results.language:
metadata.language = issue_results.language
if issue_results.country:
metadata.country = issue_results.country
return metadata

View File

@ -509,12 +509,16 @@ class ComicVineTalker(ComicTalker):
ComicSeries(
aliases=aliases.splitlines(),
count_of_issues=record.get("count_of_issues", 0),
count_of_volumes=None,
volume=None,
description=record.get("description", ""),
id=str(record["id"]),
image_url=image_url,
name=record["name"],
publisher=pub_name,
start_year=start_year,
genres=[],
format=None,
)
)
@ -576,6 +580,13 @@ class ComicVineTalker(ComicTalker):
locations=location_list,
teams=teams_list,
story_arcs=story_list,
critical_rating=0,
maturity_rating="",
manga="",
language="",
country="",
genres=[],
tags=[],
credits=persons_list,
complete=complete,
)

View File

@ -7,6 +7,8 @@ from comicapi import utils
search_results = [
comictalker.resulttypes.ComicSeries(
count_of_issues=1,
count_of_volumes=1,
volume="1",
description="this is a description",
id="1",
image_url="https://test.org/image/1",
@ -14,9 +16,13 @@ search_results = [
publisher="test",
start_year=0,
aliases=[],
genres=[],
format=None,
),
comictalker.resulttypes.ComicSeries(
count_of_issues=1,
count_of_volumes=1,
volume="1",
description="this is a description",
id="2",
image_url="https://test.org/image/2",
@ -24,6 +30,8 @@ search_results = [
publisher="test",
start_year=0,
aliases=[],
genres=[],
format=None,
),
]

View File

@ -172,10 +172,14 @@ comic_issue_result = ComicIssue(
name=cv_issue_result["results"]["volume"]["name"],
aliases=[],
count_of_issues=cv_volume_result["results"]["count_of_issues"],
count_of_volumes=None,
volume=None,
description=cv_volume_result["results"]["description"],
image_url=cv_volume_result["results"]["image"]["super_url"],
publisher=cv_volume_result["results"]["publisher"]["name"],
start_year=int(cv_volume_result["results"]["start_year"]),
genres=[],
format=None,
),
characters=[],
alt_image_urls=[],
@ -183,6 +187,13 @@ comic_issue_result = ComicIssue(
credits=[],
locations=[],
story_arcs=[],
critical_rating=0,
maturity_rating="",
manga="",
language="",
country="",
genres=[],
tags=[],
teams=[],
)
date = utils.parse_date_str(cv_issue_result["results"]["cover_date"])