Code cleanup

Remove no longer used google scripts
Remove convenience files from comicataggerlib and import comicapi directly
Add type-hints to facilitate auto-complete tools
Make PyQt5 code more compatible with PyQt6

Implement automatic tooling
isort and black for code formatting
Line length has been set to 120
flake8 for code standards with exceptions:
E203 - Whitespace before ':'  - format compatiblity with black
E501 - Line too long          - flake8 line limit cannot be set
E722 - Do not use bare except - fixing bare except statements is a
                                lot of overhead and there are already
                                many in the codebase

These changes, along with some manual fixes creates much more readable code.
See examples below:

diff --git a/comicapi/comet.py b/comicapi/comet.py
index d1741c5..52dc195 100644
--- a/comicapi/comet.py
+++ b/comicapi/comet.py
@@ -166,7 +166,2 @@ class CoMet:

-            if credit['role'].lower() in set(self.editor_synonyms):
-                ET.SubElement(
-                    root,
-                    'editor').text = "{0}".format(
-                    credit['person'])

@@ -174,2 +169,4 @@ class CoMet:
         self.indent(root)
+            if credit["role"].lower() in set(self.editor_synonyms):
+                ET.SubElement(root, "editor").text = str(credit["person"])

diff --git a/comictaggerlib/autotagmatchwindow.py b/comictaggerlib/autotagmatchwindow.py
index 4338176..9219f01 100644
--- a/comictaggerlib/autotagmatchwindow.py
+++ b/comictaggerlib/autotagmatchwindow.py
@@ -63,4 +63,3 @@ class AutoTagMatchWindow(QtWidgets.QDialog):
             self.skipButton, QtWidgets.QDialogButtonBox.ActionRole)
-        self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setText(
-            "Accept and Write Tags")
+        self.buttonBox.button(QtWidgets.QDialogButtonBox.StandardButton.Ok).setText("Accept and Write Tags")

diff --git a/comictaggerlib/cli.py b/comictaggerlib/cli.py
index 688907d..dbd0c2e 100644
--- a/comictaggerlib/cli.py
+++ b/comictaggerlib/cli.py
@@ -293,7 +293,3 @@ def process_file_cli(filename, opts, settings, match_results):
                 if opts.raw:
-                    print((
-                        "{0}".format(
-                            str(
-                                ca.readRawCIX(),
-                                errors='ignore'))))
+                    print(ca.read_raw_cix())
                 else:
This commit is contained in:
Timmy Welch 2022-04-01 16:50:46 -07:00
parent 6f4de04a00
commit e96d1d5c97
9 changed files with 1006 additions and 1517 deletions

View File

@ -1 +1 @@
__author__ = 'dromanin'
__author__ = "dromanin"

View File

@ -15,175 +15,121 @@
# limitations under the License.
import xml.etree.ElementTree as ET
#from datetime import datetime
#from pprint import pprint
#import zipfile
from .genericmetadata import GenericMetadata
from . import utils
from comicapi import utils
from comicapi.genericmetadata import GenericMetadata
class CoMet:
writer_synonyms = ['writer', 'plotter', 'scripter']
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
inker_synonyms = ['inker', 'artist', 'finishes']
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
letterer_synonyms = ['letterer']
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
editor_synonyms = ['editor']
writer_synonyms = ["writer", "plotter", "scripter"]
penciller_synonyms = ["artist", "penciller", "penciler", "breakdowns"]
inker_synonyms = ["inker", "artist", "finishes"]
colorist_synonyms = ["colorist", "colourist", "colorer", "colourer"]
letterer_synonyms = ["letterer"]
cover_synonyms = ["cover", "covers", "coverartist", "cover artist"]
editor_synonyms = ["editor"]
def metadataFromString(self, string):
def metadata_from_string(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
return self.convert_xml_to_metadata(tree)
def stringFromMetadata(self, metadata):
def string_from_metadata(self, metadata):
header = '<?xml version="1.0" encoding="UTF-8"?>\n'
tree = self.convertMetadataToXML(self, metadata)
tree = self.convert_metadata_to_xml(metadata)
return header + ET.tostring(tree.getroot())
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata):
def convert_metadata_to_xml(self, metadata):
# shorthand for the metadata
md = metadata
# build a tree structure
root = ET.Element("comet")
root.attrib['xmlns:comet'] = "http://www.denvog.com/comet/"
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib[
'xsi:schemaLocation'] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
root.attrib["xmlns:comet"] = "http://www.denvog.com/comet/"
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib["xsi:schemaLocation"] = "http://www.denvog.com http://www.denvog.com/comet/comet.xsd"
# helper func
def assign(comet_entry, md_entry):
if md_entry is not None:
ET.SubElement(root, comet_entry).text = "{0}".format(md_entry)
ET.SubElement(root, comet_entry).text = str(md_entry)
# title is manditory
if md.title is None:
md.title = ""
assign('title', md.title)
assign('series', md.series)
assign('issue', md.issue) # must be int??
assign('volume', md.volume)
assign('description', md.comments)
assign('publisher', md.publisher)
assign('pages', md.pageCount)
assign('format', md.format)
assign('language', md.language)
assign('rating', md.maturityRating)
assign('price', md.price)
assign('isVersionOf', md.isVersionOf)
assign('rights', md.rights)
assign('identifier', md.identifier)
assign('lastMark', md.lastMark)
assign('genre', md.genre) # TODO repeatable
assign("title", md.title)
assign("series", md.series)
assign("issue", md.issue) # must be int??
assign("volume", md.volume)
assign("description", md.comments)
assign("publisher", md.publisher)
assign("pages", md.page_count)
assign("format", md.format)
assign("language", md.language)
assign("rating", md.maturity_rating)
assign("price", md.price)
assign("isVersionOf", md.is_version_of)
assign("rights", md.rights)
assign("identifier", md.identifier)
assign("lastMark", md.last_mark)
assign("genre", md.genre) # TODO repeatable
if md.characters is not None:
char_list = [c.strip() for c in md.characters.split(',')]
char_list = [c.strip() for c in md.characters.split(",")]
for c in char_list:
assign('character', c)
assign("character", c)
if md.manga is not None and md.manga == "YesAndRightToLeft":
assign('readingDirection', "rtl")
assign("readingDirection", "rtl")
date_str = ""
if md.year is not None:
date_str = str(md.year).zfill(4)
if md.month is not None:
date_str += "-" + str(md.month).zfill(2)
assign('date', date_str)
assign("date", date_str)
assign('coverImage', md.coverImage)
# need to specially process the credits, since they are structured
# differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
assign("coverImage", md.cover_image)
# loop thru credits, and build a list for each role that CoMet supports
for credit in metadata.credits:
if credit['role'].lower() in set(self.writer_synonyms):
ET.SubElement(
root,
'writer').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.writer_synonyms):
ET.SubElement(root, "writer").text = str(credit["person"])
if credit['role'].lower() in set(self.penciller_synonyms):
ET.SubElement(
root,
'penciller').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.penciller_synonyms):
ET.SubElement(root, "penciller").text = str(credit["person"])
if credit['role'].lower() in set(self.inker_synonyms):
ET.SubElement(
root,
'inker').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.inker_synonyms):
ET.SubElement(root, "inker").text = str(credit["person"])
if credit['role'].lower() in set(self.colorist_synonyms):
ET.SubElement(
root,
'colorist').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.colorist_synonyms):
ET.SubElement(root, "colorist").text = str(credit["person"])
if credit['role'].lower() in set(self.letterer_synonyms):
ET.SubElement(
root,
'letterer').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.letterer_synonyms):
ET.SubElement(root, "letterer").text = str(credit["person"])
if credit['role'].lower() in set(self.cover_synonyms):
ET.SubElement(
root,
'coverDesigner').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.cover_synonyms):
ET.SubElement(root, "coverDesigner").text = str(credit["person"])
if credit['role'].lower() in set(self.editor_synonyms):
ET.SubElement(
root,
'editor').text = "{0}".format(
credit['person'])
if credit["role"].lower() in set(self.editor_synonyms):
ET.SubElement(root, "editor").text = str(credit["person"])
# self pretty-print
self.indent(root)
utils.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
def convert_xml_to_metadata(self, tree):
root = tree.getroot()
if root.tag != 'comet':
raise 1
return None
if root.tag != "comet":
raise "1"
metadata = GenericMetadata()
md = metadata
@ -193,84 +139,85 @@ class CoMet:
node = root.find(tag)
if node is not None:
return node.text
else:
return None
md.series = xlate('series')
md.title = xlate('title')
md.issue = xlate('issue')
md.volume = xlate('volume')
md.comments = xlate('description')
md.publisher = xlate('publisher')
md.language = xlate('language')
md.format = xlate('format')
md.pageCount = xlate('pages')
md.maturityRating = xlate('rating')
md.price = xlate('price')
md.isVersionOf = xlate('isVersionOf')
md.rights = xlate('rights')
md.identifier = xlate('identifier')
md.lastMark = xlate('lastMark')
md.genre = xlate('genre') # TODO - repeatable field
md.series = xlate("series")
md.title = xlate("title")
md.issue = xlate("issue")
md.volume = xlate("volume")
md.comments = xlate("description")
md.publisher = xlate("publisher")
md.language = xlate("language")
md.format = xlate("format")
md.page_count = xlate("pages")
md.maturity_rating = xlate("rating")
md.price = xlate("price")
md.is_version_of = xlate("isVersionOf")
md.rights = xlate("rights")
md.identifier = xlate("identifier")
md.last_mark = xlate("lastMark")
md.genre = xlate("genre") # TODO - repeatable field
date = xlate('date')
date = xlate("date")
if date is not None:
parts = date.split('-')
parts = date.split("-")
if len(parts) > 0:
md.year = parts[0]
if len(parts) > 1:
md.month = parts[1]
md.coverImage = xlate('coverImage')
md.cover_image = xlate("coverImage")
readingDirection = xlate('readingDirection')
if readingDirection is not None and readingDirection == "rtl":
reading_direction = xlate("readingDirection")
if reading_direction is not None and reading_direction == "rtl":
md.manga = "YesAndRightToLeft"
# loop for character tags
char_list = []
for n in root:
if n.tag == 'character':
if n.tag == "character":
char_list.append(n.text.strip())
md.characters = utils.listToString(char_list)
md.characters = utils.list_to_string(char_list)
# Now extract the credit info
for n in root:
if (n.tag == 'writer' or
n.tag == 'penciller' or
n.tag == 'inker' or
n.tag == 'colorist' or
n.tag == 'letterer' or
n.tag == 'editor'
if any(
[
n.tag == "writer",
n.tag == "penciller",
n.tag == "inker",
n.tag == "colorist",
n.tag == "letterer",
n.tag == "editor",
]
):
metadata.addCredit(n.text.strip(), n.tag.title())
metadata.add_credit(n.text.strip(), n.tag.title())
if n.tag == 'coverDesigner':
metadata.addCredit(n.text.strip(), "Cover")
if n.tag == "coverDesigner":
metadata.add_credit(n.text.strip(), "Cover")
metadata.isEmpty = False
metadata.is_empty = False
return metadata
# verify that the string actually contains CoMet data in XML format
def validateString(self, string):
def validate_string(self, string):
try:
tree = ET.ElementTree(ET.fromstring(string))
root = tree.getroot()
if root.tag != 'comet':
if root.tag != "comet":
raise Exception
except:
return False
return True
def writeToExternalFile(self, filename, metadata):
def write_to_external_file(self, filename, metadata):
tree = self.convertMetadataToXML(self, metadata)
# ET.dump(tree)
tree.write(filename, encoding='utf-8')
tree = self.convert_metadata_to_xml(metadata)
tree.write(filename, encoding="utf-8")
def readFromExternalFile(self, filename):
def read_from_external_file(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
return self.convert_xml_to_metadata(tree)

File diff suppressed because it is too large Load Diff

View File

@ -15,42 +15,39 @@
# limitations under the License.
import json
from collections import defaultdict
from datetime import datetime
#import zipfile
from .genericmetadata import GenericMetadata
from . import utils
#import ctversion
from comicapi import utils
from comicapi.genericmetadata import GenericMetadata
class ComicBookInfo:
def metadataFromString(self, string):
class Default(dict):
def __missing__(self, key):
return None
cbi_container = json.loads(str(string, 'utf-8'))
def metadata_from_string(self, string):
cbi_container = json.loads(str(string, "utf-8"))
metadata = GenericMetadata()
cbi = Default(cbi_container['ComicBookInfo/1.0'])
cbi = defaultdict(lambda: None, cbi_container["ComicBookInfo/1.0"])
metadata.series = utils.xlate(cbi['series'])
metadata.title = utils.xlate(cbi['title'])
metadata.issue = utils.xlate(cbi['issue'])
metadata.publisher = utils.xlate(cbi['publisher'])
metadata.month = utils.xlate(cbi['publicationMonth'], True)
metadata.year = utils.xlate(cbi['publicationYear'], True)
metadata.issueCount = utils.xlate(cbi['numberOfIssues'], True)
metadata.comments = utils.xlate(cbi['comments'])
metadata.genre = utils.xlate(cbi['genre'])
metadata.volume = utils.xlate(cbi['volume'], True)
metadata.volumeCount = utils.xlate(cbi['numberOfVolumes'], True)
metadata.language = utils.xlate(cbi['language'])
metadata.country = utils.xlate(cbi['country'])
metadata.criticalRating = utils.xlate(cbi['rating'])
metadata.series = utils.xlate(cbi["series"])
metadata.title = utils.xlate(cbi["title"])
metadata.issue = utils.xlate(cbi["issue"])
metadata.publisher = utils.xlate(cbi["publisher"])
metadata.month = utils.xlate(cbi["publicationMonth"], True)
metadata.year = utils.xlate(cbi["publicationYear"], True)
metadata.issue_count = utils.xlate(cbi["numberOfIssues"], True)
metadata.comments = utils.xlate(cbi["comments"])
metadata.genre = utils.xlate(cbi["genre"])
metadata.volume = utils.xlate(cbi["volume"], True)
metadata.volume_count = utils.xlate(cbi["numberOfVolumes"], True)
metadata.language = utils.xlate(cbi["language"])
metadata.country = utils.xlate(cbi["country"])
metadata.critical_rating = utils.xlate(cbi["rating"])
metadata.credits = cbi['credits']
metadata.tags = cbi['tags']
metadata.credits = cbi["credits"]
metadata.tags = cbi["tags"]
# make sure credits and tags are at least empty lists and not None
if metadata.credits is None:
@ -58,26 +55,20 @@ class ComicBookInfo:
if metadata.tags is None:
metadata.tags = []
# need to massage the language string to be ISO
# need the language string to be ISO
if metadata.language is not None:
# reverse look-up
pattern = metadata.language
metadata.language = None
for key in utils.getLanguageDict():
if utils.getLanguageDict()[key] == pattern.encode('utf-8'):
metadata.language = key
break
metadata.language = utils.get_language(metadata.language)
metadata.isEmpty = False
metadata.is_empty = False
return metadata
def stringFromMetadata(self, metadata):
def string_from_metadata(self, metadata):
cbi_container = self.createJSONDictionary(metadata)
cbi_container = self.create_json_dictionary(metadata)
return json.dumps(cbi_container)
def validateString(self, string):
def validate_string(self, string):
"""Verify that the string actually contains CBI data in JSON format"""
try:
@ -85,44 +76,45 @@ class ComicBookInfo:
except:
return False
return ('ComicBookInfo/1.0' in cbi_container)
return "ComicBookInfo/1.0" in cbi_container
def createJSONDictionary(self, metadata):
def create_json_dictionary(self, metadata):
"""Create the dictionary that we will convert to JSON text"""
cbi = dict()
cbi_container = {'appID': 'ComicTagger/' + '1.0.0', # ctversion.version,
'lastModified': str(datetime.now()),
'ComicBookInfo/1.0': cbi}
cbi = {}
cbi_container = {
"appID": "ComicTagger/" + "1.0.0",
"lastModified": str(datetime.now()),
"ComicBookInfo/1.0": cbi,
} # TODO: ctversion.version,
# helper func
def assign(cbi_entry, md_entry):
if md_entry is not None or isinstance(md_entry, str) and md_entry != "":
cbi[cbi_entry] = md_entry
assign('series', utils.xlate(metadata.series))
assign('title', utils.xlate(metadata.title))
assign('issue', utils.xlate(metadata.issue))
assign('publisher', utils.xlate(metadata.publisher))
assign('publicationMonth', utils.xlate(metadata.month, True))
assign('publicationYear', utils.xlate(metadata.year, True))
assign('numberOfIssues', utils.xlate(metadata.issueCount, True))
assign('comments', utils.xlate(metadata.comments))
assign('genre', utils.xlate(metadata.genre))
assign('volume', utils.xlate(metadata.volume, True))
assign('numberOfVolumes', utils.xlate(metadata.volumeCount, True))
assign('language', utils.xlate(utils.getLanguageFromISO(metadata.language)))
assign('country', utils.xlate(metadata.country))
assign('rating', utils.xlate(metadata.criticalRating))
assign('credits', metadata.credits)
assign('tags', metadata.tags)
assign("series", utils.xlate(metadata.series))
assign("title", utils.xlate(metadata.title))
assign("issue", utils.xlate(metadata.issue))
assign("publisher", utils.xlate(metadata.publisher))
assign("publicationMonth", utils.xlate(metadata.month, True))
assign("publicationYear", utils.xlate(metadata.year, True))
assign("numberOfIssues", utils.xlate(metadata.issue_count, True))
assign("comments", utils.xlate(metadata.comments))
assign("genre", utils.xlate(metadata.genre))
assign("volume", utils.xlate(metadata.volume, True))
assign("numberOfVolumes", utils.xlate(metadata.volume_count, True))
assign("language", utils.xlate(utils.get_language_from_iso(metadata.language)))
assign("country", utils.xlate(metadata.country))
assign("rating", utils.xlate(metadata.critical_rating))
assign("credits", metadata.credits)
assign("tags", metadata.tags)
return cbi_container
def writeToExternalFile(self, filename, metadata):
def write_to_external_file(self, filename, metadata):
cbi_container = self.createJSONDictionary(metadata)
cbi_container = self.create_json_dictionary(metadata)
f = open(filename, 'w')
with open(filename, "w") as f:
f.write(json.dumps(cbi_container, indent=4))
f.close

View File

@ -15,26 +15,23 @@
# limitations under the License.
import xml.etree.ElementTree as ET
#from datetime import datetime
#from pprint import pprint
#import zipfile
from .genericmetadata import GenericMetadata
from .issuestring import IssueString
from . import utils
from comicapi import utils
from comicapi.genericmetadata import GenericMetadata
from comicapi.issuestring import IssueString
class ComicInfoXml:
writer_synonyms = ['writer', 'plotter', 'scripter']
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
inker_synonyms = ['inker', 'artist', 'finishes']
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
letterer_synonyms = ['letterer']
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
editor_synonyms = ['editor']
writer_synonyms = ["writer", "plotter", "scripter"]
penciller_synonyms = ["artist", "penciller", "penciler", "breakdowns"]
inker_synonyms = ["inker", "artist", "finishes"]
colorist_synonyms = ["colorist", "colourist", "colorer", "colourer"]
letterer_synonyms = ["letterer"]
cover_synonyms = ["cover", "covers", "coverartist", "cover artist"]
editor_synonyms = ["editor"]
def getParseableCredits(self):
def get_parseable_credits(self):
parsable_credits = []
parsable_credits.extend(self.writer_synonyms)
parsable_credits.extend(self.penciller_synonyms)
@ -45,33 +42,17 @@ class ComicInfoXml:
parsable_credits.extend(self.editor_synonyms)
return parsable_credits
def metadataFromString(self, string):
def metadata_from_string(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
return self.convert_xml_to_metadata(tree)
def stringFromMetadata(self, metadata, xml=None):
tree = self.convertMetadataToXML(self, metadata, xml)
def string_from_metadata(self, metadata, xml=None):
tree = self.convert_metadata_to_xml(self, metadata, xml)
tree_str = ET.tostring(tree.getroot(), encoding="utf-8", xml_declaration=True).decode()
return tree_str
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata, xml=None):
def convert_metadata_to_xml(self, filename, metadata, xml=None):
# shorthand for the metadata
md = metadata
@ -81,125 +62,123 @@ class ComicInfoXml:
else:
# build a tree structure
root = ET.Element("ComicInfo")
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
# helper func
def assign(cix_entry, md_entry):
if md_entry is not None:
et_entry = root.find(cix_entry)
if et_entry is not None:
et_entry.text = "{0}".format(md_entry)
et_entry.text = str(md_entry)
else:
ET.SubElement(root, cix_entry).text = "{0}".format(md_entry)
ET.SubElement(root, cix_entry).text = str(md_entry)
assign('Title', md.title)
assign('Series', md.series)
assign('Number', md.issue)
assign('Count', md.issueCount)
assign('Volume', md.volume)
assign('AlternateSeries', md.alternateSeries)
assign('AlternateNumber', md.alternateNumber)
assign('StoryArc', md.storyArc)
assign('SeriesGroup', md.seriesGroup)
assign('AlternateCount', md.alternateCount)
assign('Summary', md.comments)
assign('Notes', md.notes)
assign('Year', md.year)
assign('Month', md.month)
assign('Day', md.day)
assign("Title", md.title)
assign("Series", md.series)
assign("Number", md.issue)
assign("Count", md.issue_count)
assign("Volume", md.volume)
assign("AlternateSeries", md.alternate_series)
assign("AlternateNumber", md.alternate_number)
assign("StoryArc", md.story_arc)
assign("SeriesGroup", md.series_group)
assign("AlternateCount", md.alternate_count)
assign("Summary", md.comments)
assign("Notes", md.notes)
assign("Year", md.year)
assign("Month", md.month)
assign("Day", md.day)
# need to specially process the credits, since they are structured
# differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
credit_writer_list = []
credit_penciller_list = []
credit_inker_list = []
credit_colorist_list = []
credit_letterer_list = []
credit_cover_list = []
credit_editor_list = []
# first, loop thru credits, and build a list for each role that CIX
# supports
for credit in metadata.credits:
if credit['role'].lower() in set(self.writer_synonyms):
credit_writer_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.writer_synonyms):
credit_writer_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.penciller_synonyms):
credit_penciller_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.penciller_synonyms):
credit_penciller_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.inker_synonyms):
credit_inker_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.inker_synonyms):
credit_inker_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.colorist_synonyms):
credit_colorist_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.colorist_synonyms):
credit_colorist_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.letterer_synonyms):
credit_letterer_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.letterer_synonyms):
credit_letterer_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.cover_synonyms):
credit_cover_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.cover_synonyms):
credit_cover_list.append(credit["person"].replace(",", ""))
if credit['role'].lower() in set(self.editor_synonyms):
credit_editor_list.append(credit['person'].replace(",", ""))
if credit["role"].lower() in set(self.editor_synonyms):
credit_editor_list.append(credit["person"].replace(",", ""))
# second, convert each list to string, and add to XML struct
assign('Writer', utils.listToString(credit_writer_list))
assign("Writer", utils.list_to_string(credit_writer_list))
assign('Penciller', utils.listToString(credit_penciller_list))
assign("Penciller", utils.list_to_string(credit_penciller_list))
assign('Inker', utils.listToString(credit_inker_list))
assign("Inker", utils.list_to_string(credit_inker_list))
assign('Colorist', utils.listToString(credit_colorist_list))
assign("Colorist", utils.list_to_string(credit_colorist_list))
assign('Letterer', utils.listToString(credit_letterer_list))
assign("Letterer", utils.list_to_string(credit_letterer_list))
assign('CoverArtist', utils.listToString(credit_cover_list))
assign("CoverArtist", utils.list_to_string(credit_cover_list))
assign('Editor', utils.listToString(credit_editor_list))
assign("Editor", utils.list_to_string(credit_editor_list))
assign('Publisher', md.publisher)
assign('Imprint', md.imprint)
assign('Genre', md.genre)
assign('Web', md.webLink)
assign('PageCount', md.pageCount)
assign('LanguageISO', md.language)
assign('Format', md.format)
assign('AgeRating', md.maturityRating)
if md.blackAndWhite is not None and md.blackAndWhite:
assign('BlackAndWhite', "Yes")
assign('Manga', md.manga)
assign('Characters', md.characters)
assign('Teams', md.teams)
assign('Locations', md.locations)
assign('ScanInformation', md.scanInfo)
assign("Publisher", md.publisher)
assign("Imprint", md.imprint)
assign("Genre", md.genre)
assign("Web", md.web_link)
assign("PageCount", md.page_count)
assign("LanguageISO", md.language)
assign("Format", md.format)
assign("AgeRating", md.maturity_rating)
if md.black_and_white is not None and md.black_and_white:
ET.SubElement(root, "BlackAndWhite").text = "Yes"
assign("Manga", md.manga)
assign("Characters", md.characters)
assign("Teams", md.teams)
assign("Locations", md.locations)
assign("ScanInformation", md.scan_info)
# loop and add the page entries under pages node
pages_node = root.find('Pages')
pages_node = root.find("Pages")
if pages_node is not None:
pages_node.clear()
else:
pages_node = ET.SubElement(root, 'Pages')
pages_node = ET.SubElement(root, "Pages")
for page_dict in md.pages:
page_node = ET.SubElement(pages_node, 'Page')
page_node = ET.SubElement(pages_node, "Page")
page_node.attrib = page_dict
# self pretty-print
self.indent(root)
utils.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
def convert_xml_to_metadata(self, tree):
root = tree.getroot()
if root.tag != 'ComicInfo':
raise 1
return None
if root.tag != "ComicInfo":
raise "1"
def get(name):
tag = root.find(name)
@ -209,74 +188,75 @@ class ComicInfoXml:
md = GenericMetadata()
md.series = utils.xlate(get('Series'))
md.title = utils.xlate(get('Title'))
md.issue = IssueString(utils.xlate(get('Number'))).asString()
md.issueCount = utils.xlate(get('Count'), True)
md.volume = utils.xlate(get('Volume'), True)
md.alternateSeries = utils.xlate(get('AlternateSeries'))
md.alternateNumber = IssueString(utils.xlate(get('AlternateNumber'))).asString()
md.alternateCount = utils.xlate(get('AlternateCount'), True)
md.comments = utils.xlate(get('Summary'))
md.notes = utils.xlate(get('Notes'))
md.year = utils.xlate(get('Year'), True)
md.month = utils.xlate(get('Month'), True)
md.day = utils.xlate(get('Day'), True)
md.publisher = utils.xlate(get('Publisher'))
md.imprint = utils.xlate(get('Imprint'))
md.genre = utils.xlate(get('Genre'))
md.webLink = utils.xlate(get('Web'))
md.language = utils.xlate(get('LanguageISO'))
md.format = utils.xlate(get('Format'))
md.manga = utils.xlate(get('Manga'))
md.characters = utils.xlate(get('Characters'))
md.teams = utils.xlate(get('Teams'))
md.locations = utils.xlate(get('Locations'))
md.pageCount = utils.xlate(get('PageCount'), True)
md.scanInfo = utils.xlate(get('ScanInformation'))
md.storyArc = utils.xlate(get('StoryArc'))
md.seriesGroup = utils.xlate(get('SeriesGroup'))
md.maturityRating = utils.xlate(get('AgeRating'))
md.series = utils.xlate(get("Series"))
md.title = utils.xlate(get("Title"))
md.issue = IssueString(utils.xlate(get("Number"))).as_string()
md.issue_count = utils.xlate(get("Count"), True)
md.volume = utils.xlate(get("Volume"), True)
md.alternate_series = utils.xlate(get("AlternateSeries"))
md.alternate_number = IssueString(utils.xlate(get("AlternateNumber"))).as_string()
md.alternate_count = utils.xlate(get("AlternateCount"), True)
md.comments = utils.xlate(get("Summary"))
md.notes = utils.xlate(get("Notes"))
md.year = utils.xlate(get("Year"), True)
md.month = utils.xlate(get("Month"), True)
md.day = utils.xlate(get("Day"), True)
md.publisher = utils.xlate(get("Publisher"))
md.imprint = utils.xlate(get("Imprint"))
md.genre = utils.xlate(get("Genre"))
md.web_link = utils.xlate(get("Web"))
md.language = utils.xlate(get("LanguageISO"))
md.format = utils.xlate(get("Format"))
md.manga = utils.xlate(get("Manga"))
md.characters = utils.xlate(get("Characters"))
md.teams = utils.xlate(get("Teams"))
md.locations = utils.xlate(get("Locations"))
md.page_count = utils.xlate(get("PageCount"), True)
md.scan_info = utils.xlate(get("ScanInformation"))
md.story_arc = utils.xlate(get("StoryArc"))
md.series_group = utils.xlate(get("SeriesGroup"))
md.maturity_rating = utils.xlate(get("AgeRating"))
tmp = utils.xlate(get('BlackAndWhite'))
tmp = utils.xlate(get("BlackAndWhite"))
if tmp is not None and tmp.lower() in ["yes", "true", "1"]:
md.blackAndWhite = True
md.black_and_white = True
# Now extract the credit info
for n in root:
if (n.tag == 'Writer' or
n.tag == 'Penciller' or
n.tag == 'Inker' or
n.tag == 'Colorist' or
n.tag == 'Letterer' or
n.tag == 'Editor'
if any(
[
n.tag == "Writer",
n.tag == "Penciller",
n.tag == "Inker",
n.tag == "Colorist",
n.tag == "Letterer",
n.tag == "Editor",
]
):
if n.text is not None:
for name in n.text.split(','):
md.addCredit(name.strip(), n.tag)
for name in n.text.split(","):
md.add_credit(name.strip(), n.tag)
if n.tag == 'CoverArtist':
if n.tag == "CoverArtist":
if n.text is not None:
for name in n.text.split(','):
md.addCredit(name.strip(), "Cover")
for name in n.text.split(","):
md.add_credit(name.strip(), "Cover")
# parse page data now
pages_node = root.find("Pages")
if pages_node is not None:
for page in pages_node:
md.pages.append(page.attrib)
# print page.attrib
md.isEmpty = False
md.is_empty = False
return md
def writeToExternalFile(self, filename, metadata, xml=None):
def write_to_external_file(self, filename, metadata, xml=None):
tree = self.convertMetadataToXML(self, metadata, xml)
# ET.dump(tree)
tree = self.convert_metadata_to_xml(self, metadata, xml)
tree.write(filename, encoding="utf-8", xml_declaration=True)
def readFromExternalFile(self, filename):
def read_from_external_file(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
return self.convert_xml_to_metadata(tree)

View File

@ -20,56 +20,62 @@ This should probably be re-written, but, well, it mostly works!
# Some portions of this code were modified from pyComicMetaThis project
# http://code.google.com/p/pycomicmetathis/
import re
import os
import re
from urllib.parse import unquote
class FileNameParser:
def __init__(self):
self.series = ""
self.volume = ""
self.year = ""
self.issue_count = ""
self.remainder = ""
self.issue = ""
def repl(self, m):
return ' ' * len(m.group())
return " " * len(m.group())
def fixSpaces(self, string, remove_dashes=True):
def fix_spaces(self, string, remove_dashes=True):
if remove_dashes:
placeholders = ['[-_]', ' +']
placeholders = [r"[-_]", r" +"]
else:
placeholders = ['[_]', ' +']
placeholders = [r"[_]", r" +"]
for ph in placeholders:
string = re.sub(ph, self.repl, string)
return string # .strip()
def getIssueCount(self, filename, issue_end):
def get_issue_count(self, filename, issue_end):
count = ""
filename = filename[issue_end:]
# replace any name separators with spaces
tmpstr = self.fixSpaces(filename)
tmpstr = self.fix_spaces(filename)
found = False
match = re.search('(?<=\sof\s)\d+(?=\s)', tmpstr, re.IGNORECASE)
match = re.search(r"(?<=\sof\s)\d+(?=\s)", tmpstr, re.IGNORECASE)
if match:
count = match.group()
found = True
if not found:
match = re.search('(?<=\(of\s)\d+(?=\))', tmpstr, re.IGNORECASE)
match = re.search(r"(?<=\(of\s)\d+(?=\))", tmpstr, re.IGNORECASE)
if match:
count = match.group()
found = True
count = count.lstrip("0")
return count
def getIssueNumber(self, filename):
def get_issue_number(self, filename):
"""Returns a tuple of issue number string, and start and end indexes in the filename
(The indexes will be used to split the string up for further parsing)
"""
found = False
issue = ''
issue = ""
start = 0
end = 0
@ -78,25 +84,25 @@ class FileNameParser:
if "--" in filename:
# the pattern seems to be that anything to left of the first "--"
# is the series name followed by issue
filename = re.sub("--.*", self.repl, filename)
filename = re.sub(r"--.*", self.repl, filename)
elif "__" in filename:
# the pattern seems to be that anything to left of the first "__"
# is the series name followed by issue
filename = re.sub("__.*", self.repl, filename)
filename = re.sub(r"__.*", self.repl, filename)
filename = filename.replace("+", " ")
# replace parenthetical phrases with spaces
filename = re.sub("\(.*?\)", self.repl, filename)
filename = re.sub("\[.*?\]", self.repl, filename)
filename = re.sub(r"\(.*?\)", self.repl, filename)
filename = re.sub(r"\[.*?]", self.repl, filename)
# replace any name separators with spaces
filename = self.fixSpaces(filename)
filename = self.fix_spaces(filename)
# remove any "of NN" phrase with spaces (problem: this could break on
# some titles)
filename = re.sub("of [\d]+", self.repl, filename)
filename = re.sub(r"of [\d]+", self.repl, filename)
# print u"[{0}]".format(filename)
@ -104,8 +110,8 @@ class FileNameParser:
# the same positions as original filename
# make a list of each word and its position
word_list = list()
for m in re.finditer("\S+", filename):
word_list = []
for m in re.finditer(r"\S+", filename):
word_list.append((m.group(0), m.start(), m.end()))
# remove the first word, since it can't be the issue number
@ -120,7 +126,7 @@ class FileNameParser:
# first look for a word with "#" followed by digits with optional suffix
# this is almost certainly the issue number
for w in reversed(word_list):
if re.match("#[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
if re.match(r"#[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
found = True
break
@ -128,13 +134,13 @@ class FileNameParser:
# list
if not found:
w = word_list[-1]
if re.match("[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
if re.match(r"[-]?(([0-9]*\.[0-9]+|[0-9]+)(\w*))", w[0]):
found = True
# now try to look for a # followed by any characters
if not found:
for w in reversed(word_list):
if re.match("#\S+", w[0]):
if re.match(r"#\S+", w[0]):
found = True
break
@ -142,12 +148,12 @@ class FileNameParser:
issue = w[0]
start = w[1]
end = w[2]
if issue[0] == '#':
if issue[0] == "#":
issue = issue[1:]
return issue, start, end
def getSeriesName(self, filename, issue_start):
def get_series_name(self, filename, issue_start):
"""Use the issue number string index to split the filename string"""
if issue_start != 0:
@ -157,15 +163,15 @@ class FileNameParser:
if "--" in filename:
# the pattern seems to be that anything to left of the first "--"
# is the series name followed by issue
filename = re.sub("--.*", self.repl, filename)
filename = re.sub(r"--.*", self.repl, filename)
elif "__" in filename:
# the pattern seems to be that anything to left of the first "__"
# is the series name followed by issue
filename = re.sub("__.*", self.repl, filename)
filename = re.sub(r"__.*", self.repl, filename)
filename = filename.replace("+", " ")
tmpstr = self.fixSpaces(filename, remove_dashes=False)
tmpstr = self.fix_spaces(filename, remove_dashes=False)
series = tmpstr
volume = ""
@ -177,10 +183,10 @@ class FileNameParser:
last_word = ""
# remove any parenthetical phrases
series = re.sub("\(.*?\)", "", series)
series = re.sub(r"\(.*?\)", "", series)
# search for volume number
match = re.search('(.+)([vV]|[Vv][oO][Ll]\.?\s?)(\d+)\s*$', series)
match = re.search(r"(.+)([vV]|[Vv][oO][Ll]\.?\s?)(\d+)\s*$", series)
if match:
series = match.group(1)
volume = match.group(3)
@ -189,7 +195,7 @@ class FileNameParser:
# since that's a common way to designate the volume
if volume == "":
# match either (YEAR), (YEAR-), or (YEAR-YEAR2)
match = re.search("(\()(\d{4})(-(\d{4}|)|)(\))", last_word)
match = re.search(r"(\()(\d{4})(-(\d{4}|)|)(\))", last_word)
if match:
volume = match.group(2)
@ -203,26 +209,26 @@ class FileNameParser:
try:
last_word = series.split()[-1]
if last_word.lower() in one_shot_words:
series = series.rsplit(' ', 1)[0]
series = series.rsplit(" ", 1)[0]
except:
pass
return series, volume.strip()
def getYear(self, filename, issue_end):
def get_year(self, filename, issue_end):
filename = filename[issue_end:]
year = ""
# look for four digit number with "(" ")" or "--" around it
match = re.search('(\(\d\d\d\d\))|(--\d\d\d\d--)', filename)
match = re.search(r"(\(\d\d\d\d\))|(--\d\d\d\d--)", filename)
if match:
year = match.group()
# remove non-digits
year = re.sub("[^0-9]", "", year)
year = re.sub(r"[^0-9]", "", year)
return year
def getRemainder(self, filename, year, count, volume, issue_end):
def get_remainder(self, filename, year, count, volume, issue_end):
"""Make a guess at where the the non-interesting stuff begins"""
remainder = ""
@ -234,7 +240,7 @@ class FileNameParser:
elif issue_end != 0:
remainder = filename[issue_end:]
remainder = self.fixSpaces(remainder, remove_dashes=False)
remainder = self.fix_spaces(remainder, remove_dashes=False)
if volume != "":
remainder = remainder.replace("Vol." + volume, "", 1)
if year != "":
@ -243,13 +249,11 @@ class FileNameParser:
remainder = remainder.replace("of " + count, "", 1)
remainder = remainder.replace("()", "")
remainder = remainder.replace(
" ",
" ") # cleans some whitespace mess
remainder = remainder.replace(" ", " ") # cleans some whitespace mess
return remainder.strip()
def parseFilename(self, filename):
def parse_filename(self, filename):
# remove the path
filename = os.path.basename(filename)
@ -267,21 +271,16 @@ class FileNameParser:
filename = filename.replace("_28", "(")
filename = filename.replace("_29", ")")
self.issue, issue_start, issue_end = self.getIssueNumber(filename)
self.series, self.volume = self.getSeriesName(filename, issue_start)
self.issue, issue_start, issue_end = self.get_issue_number(filename)
self.series, self.volume = self.get_series_name(filename, issue_start)
# provides proper value when the filename doesn't have a issue number
if issue_end == 0:
issue_end = len(self.series)
self.year = self.getYear(filename, issue_end)
self.issue_count = self.getIssueCount(filename, issue_end)
self.remainder = self.getRemainder(
filename,
self.year,
self.issue_count,
self.volume,
issue_end)
self.year = self.get_year(filename, issue_end)
self.issue_count = self.get_issue_count(filename, issue_end)
self.remainder = self.get_remainder(filename, self.year, self.issue_count, self.volume, issue_end)
if self.issue != "":
# strip off leading zeros

View File

@ -20,7 +20,9 @@ possible, however lossy it might be
# See the License for the specific language governing permissions and
# limitations under the License.
from . import utils
from typing import List, TypedDict
from comicapi import utils
class PageType:
@ -42,24 +44,34 @@ class PageType:
Other = "Other"
Deleted = "Deleted"
"""
class PageInfo:
Image = 0
Type = PageType.Story
DoublePage = False
ImageSize = 0
Key = ""
ImageWidth = 0
ImageHeight = 0
"""
class ImageMetadata(TypedDict):
Type: PageType
Image: int
ImageSize: str
ImageHeight: str
ImageWidth: str
class CreditMetadata(TypedDict):
person: str
role: str
primary: bool
class GenericMetadata:
writer_synonyms = ["writer", "plotter", "scripter"]
penciller_synonyms = ["artist", "penciller", "penciler", "breakdowns"]
inker_synonyms = ["inker", "artist", "finishes"]
colorist_synonyms = ["colorist", "colourist", "colorer", "colourer"]
letterer_synonyms = ["letterer"]
cover_synonyms = ["cover", "covers", "coverartist", "cover artist"]
editor_synonyms = ["editor"]
def __init__(self):
self.isEmpty = True
self.tagOrigin = None
self.is_empty = True
self.tag_origin = None
self.series = None
self.issue = None
@ -68,47 +80,47 @@ class GenericMetadata:
self.month = None
self.year = None
self.day = None
self.issueCount = None
self.issue_count = None
self.volume = None
self.genre = None
self.language = None # 2 letter iso code
self.comments = None # use same way as Summary in CIX
self.volumeCount = None
self.criticalRating = None
self.volume_count = None
self.critical_rating = None
self.country = None
self.alternateSeries = None
self.alternateNumber = None
self.alternateCount = None
self.alternate_series = None
self.alternate_number = None
self.alternate_count = None
self.imprint = None
self.notes = None
self.webLink = None
self.web_link = None
self.format = None
self.manga = None
self.blackAndWhite = None
self.pageCount = None
self.maturityRating = None
self.black_and_white = None
self.page_count = None
self.maturity_rating = None
self.storyArc = None
self.seriesGroup = None
self.scanInfo = None
self.story_arc = None
self.series_group = None
self.scan_info = None
self.characters = None
self.teams = None
self.locations = None
self.credits = list()
self.tags = list()
self.pages = list()
self.credits: List[CreditMetadata] = []
self.tags: List[str] = []
self.pages: List[ImageMetadata] = []
# Some CoMet-only items
self.price = None
self.isVersionOf = None
self.is_version_of = None
self.rights = None
self.identifier = None
self.lastMark = None
self.coverImage = None
self.last_mark = None
self.cover_image = None
def overlay(self, new_md):
"""Overlay a metadata object on this one
@ -124,35 +136,36 @@ class GenericMetadata:
else:
setattr(self, cur, new)
if not new_md.isEmpty:
self.isEmpty = False
new_md: GenericMetadata
if not new_md.is_empty:
self.is_empty = False
assign('series', new_md.series)
assign("series", new_md.series)
assign("issue", new_md.issue)
assign("issueCount", new_md.issueCount)
assign("issue_count", new_md.issue_count)
assign("title", new_md.title)
assign("publisher", new_md.publisher)
assign("day", new_md.day)
assign("month", new_md.month)
assign("year", new_md.year)
assign("volume", new_md.volume)
assign("volumeCount", new_md.volumeCount)
assign("volume_count", new_md.volume_count)
assign("genre", new_md.genre)
assign("language", new_md.language)
assign("country", new_md.country)
assign("criticalRating", new_md.criticalRating)
assign("alternateSeries", new_md.alternateSeries)
assign("alternateNumber", new_md.alternateNumber)
assign("alternateCount", new_md.alternateCount)
assign("critical_rating", new_md.critical_rating)
assign("alternate_series", new_md.alternate_series)
assign("alternate_number", new_md.alternate_number)
assign("alternate_count", new_md.alternate_count)
assign("imprint", new_md.imprint)
assign("webLink", new_md.webLink)
assign("web_link", new_md.web_link)
assign("format", new_md.format)
assign("manga", new_md.manga)
assign("blackAndWhite", new_md.blackAndWhite)
assign("maturityRating", new_md.maturityRating)
assign("storyArc", new_md.storyArc)
assign("seriesGroup", new_md.seriesGroup)
assign("scanInfo", new_md.scanInfo)
assign("black_and_white", new_md.black_and_white)
assign("maturity_rating", new_md.maturity_rating)
assign("story_arc", new_md.story_arc)
assign("series_group", new_md.series_group)
assign("scan_info", new_md.scan_info)
assign("characters", new_md.characters)
assign("teams", new_md.teams)
assign("locations", new_md.locations)
@ -160,12 +173,12 @@ class GenericMetadata:
assign("notes", new_md.notes)
assign("price", new_md.price)
assign("isVersionOf", new_md.isVersionOf)
assign("is_version_of", new_md.is_version_of)
assign("rights", new_md.rights)
assign("identifier", new_md.identifier)
assign("lastMark", new_md.lastMark)
assign("last_mark", new_md.last_mark)
self.overlayCredits(new_md.credits)
self.overlay_credits(new_md.credits)
# TODO
# not sure if the tags and pages should broken down, or treated
@ -179,66 +192,62 @@ class GenericMetadata:
if len(new_md.pages) > 0:
assign("pages", new_md.pages)
def overlayCredits(self, new_credits):
def overlay_credits(self, new_credits):
for c in new_credits:
if 'primary' in c and c['primary']:
primary = True
else:
primary = False
primary = bool("primary" in c and c["primary"])
# Remove credit role if person is blank
if c['person'] == "":
if c["person"] == "":
for r in reversed(self.credits):
if r['role'].lower() == c['role'].lower():
if r["role"].lower() == c["role"].lower():
self.credits.remove(r)
# otherwise, add it!
else:
self.addCredit(c['person'], c['role'], primary)
self.add_credit(c["person"], c["role"], primary)
def setDefaultPageList(self, count):
def set_default_page_list(self, count):
# generate a default page list, with the first page marked as the cover
for i in range(count):
page_dict = dict()
page_dict['Image'] = str(i)
page_dict = {}
page_dict["Image"] = str(i)
if i == 0:
page_dict['Type'] = PageType.FrontCover
page_dict["Type"] = PageType.FrontCover
self.pages.append(page_dict)
def getArchivePageIndex(self, pagenum):
def get_archive_page_index(self, pagenum):
# convert the displayed page number to the page index of the file in
# the archive
if pagenum < len(self.pages):
return int(self.pages[pagenum]['Image'])
else:
return int(self.pages[pagenum]["Image"])
return 0
def getCoverPageIndexList(self):
def get_cover_page_index_list(self):
# return a list of archive page indices of cover pages
coverlist = []
for p in self.pages:
if 'Type' in p and p['Type'] == PageType.FrontCover:
coverlist.append(int(p['Image']))
if "Type" in p and p["Type"] == PageType.FrontCover:
coverlist.append(int(p["Image"]))
if len(coverlist) == 0:
coverlist.append(0)
return coverlist
def addCredit(self, person, role, primary=False):
def add_credit(self, person, role, primary=False):
credit = dict()
credit['person'] = person
credit['role'] = role
credit = {}
credit["person"] = person
credit["role"] = role
if primary:
credit['primary'] = primary
credit["primary"] = primary
# look to see if it's not already there...
found = False
for c in self.credits:
if (c['person'].lower() == person.lower() and
c['role'].lower() == role.lower()):
if c["person"].lower() == person.lower() and c["role"].lower() == role.lower():
# no need to add it. just adjust the "primary" flag as needed
c['primary'] = primary
c["primary"] = primary
found = True
break
@ -247,64 +256,63 @@ class GenericMetadata:
def __str__(self):
vals = []
if self.isEmpty:
if self.is_empty:
return "No metadata"
def add_string(tag, val):
if val is not None and "{0}".format(val) != "":
if val is not None and str(val) != "":
vals.append((tag, val))
def add_attr_string(tag):
val = getattr(self, tag)
add_string(tag, getattr(self, tag))
add_attr_string("series")
add_attr_string("issue")
add_attr_string("issueCount")
add_attr_string("issue_count")
add_attr_string("title")
add_attr_string("publisher")
add_attr_string("year")
add_attr_string("month")
add_attr_string("day")
add_attr_string("volume")
add_attr_string("volumeCount")
add_attr_string("volume_count")
add_attr_string("genre")
add_attr_string("language")
add_attr_string("country")
add_attr_string("criticalRating")
add_attr_string("alternateSeries")
add_attr_string("alternateNumber")
add_attr_string("alternateCount")
add_attr_string("critical_rating")
add_attr_string("alternate_series")
add_attr_string("alternate_number")
add_attr_string("alternate_count")
add_attr_string("imprint")
add_attr_string("webLink")
add_attr_string("web_link")
add_attr_string("format")
add_attr_string("manga")
add_attr_string("price")
add_attr_string("isVersionOf")
add_attr_string("is_version_of")
add_attr_string("rights")
add_attr_string("identifier")
add_attr_string("lastMark")
add_attr_string("last_mark")
if self.blackAndWhite:
add_attr_string("blackAndWhite")
add_attr_string("maturityRating")
add_attr_string("storyArc")
add_attr_string("seriesGroup")
add_attr_string("scanInfo")
if self.black_and_white:
add_attr_string("black_and_white")
add_attr_string("maturity_rating")
add_attr_string("story_arc")
add_attr_string("series_group")
add_attr_string("scan_info")
add_attr_string("characters")
add_attr_string("teams")
add_attr_string("locations")
add_attr_string("comments")
add_attr_string("notes")
add_string("tags", utils.listToString(self.tags))
add_string("tags", utils.list_to_string(self.tags))
for c in self.credits:
primary = ""
if 'primary' in c and c['primary']:
if "primary" in c and c["primary"]:
primary = " [P]"
add_string("credit", c['role'] + ": " + c['person'] + primary)
add_string("credit", c["role"] + ": " + c["person"] + primary)
# find the longest field name
flen = 0

View File

@ -1,4 +1,3 @@
# coding=utf-8
"""Support for mixed digit/string type Issue field
Class for handling the odd permutations of an 'issue number' that the
@ -20,13 +19,8 @@ comics industry throws at us.
# See the License for the specific language governing permissions and
# limitations under the License.
#import utils
#import math
#import re
class IssueString:
def __init__(self, text):
# break up the issue number string into 2 parts: the numeric and suffix string.
@ -43,10 +37,8 @@ class IssueString:
if len(text) == 0:
return
text = str(text)
# skip the minus sign if it's first
if text[0] == '-':
if text[0] == "-":
start = 1
else:
start = 0
@ -78,7 +70,7 @@ class IssueString:
idx = 0
part1 = text[0:idx]
part2 = text[idx:len(text)]
part2 = text[idx : len(text)]
if part1 != "":
self.num = float(part1)
@ -86,9 +78,7 @@ class IssueString:
else:
self.suffix = text
# print "num: {0} suf: {1}".format(self.num, self.suffix)
def asString(self, pad=0):
def as_string(self, pad=0):
# return the float, left side zero-padded, with suffix attached
if self.num is None:
return self.suffix
@ -106,9 +96,9 @@ class IssueString:
# create padding
padding = ""
l = len(str(num_int))
if l < pad:
padding = "0" * (pad - l)
length = len(str(num_int))
if length < pad:
padding = "0" * (pad - length)
num_s = padding + num_s
if negative:
@ -116,16 +106,16 @@ class IssueString:
return num_s
def asFloat(self):
def as_float(self):
# return the float, with no suffix
if self.suffix == "½":
if self.num is not None:
return self.num + .5
else:
return .5
return self.num + 0.5
return 0.5
return self.num
def asInt(self):
def as_int(self):
# return the int version of the float
if self.num is None:
return None

View File

@ -1,4 +1,3 @@
# coding=utf-8
"""Some generic utilities"""
# Copyright 2012-2014 Anthony Beville
@ -15,19 +14,39 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import re
import platform
import locale
import codecs
import locale
import os
import platform
import re
import sys
import unicodedata
from collections import defaultdict
import pycountry
class UtilsVars:
already_fixed_encoding = False
def indent(elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for ele in elem:
indent(ele, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_actual_preferred_encoding():
preferred_encoding = locale.getpreferredencoding()
if platform.system() == "Darwin":
@ -50,26 +69,17 @@ def fix_output_encoding():
def get_recursive_filelist(pathlist):
"""Get a recursive list of of all files under all path items in the list"""
filename_encoding = sys.getfilesystemencoding()
filelist = []
for p in pathlist:
# if path is a folder, walk it recursively, and all files underneath
if isinstance(p, str):
# make sure string is unicode
#p = p.decode(filename_encoding) # , 'replace')
pass
elif not isinstance(p, str):
if not isinstance(p, str):
# it's probably a QString
p = str(p)
if os.path.isdir(p):
for root, dirs, files in os.walk(p):
for root, _, files in os.walk(p):
for f in files:
if isinstance(f, str):
# make sure string is unicode
#f = f.decode(filename_encoding, 'replace')
pass
elif not isinstance(f, str):
if not isinstance(f, str):
# it's probably a QString
f = str(f)
filelist.append(os.path.join(root, f))
@ -79,28 +89,26 @@ def get_recursive_filelist(pathlist):
return filelist
def listToString(l):
def list_to_string(lst):
string = ""
if l is not None:
for item in l:
if lst is not None:
for item in lst:
if len(string) > 0:
string += ", "
string += item
return string
def addtopath(dirname):
def add_to_path(dirname):
if dirname is not None and dirname != "":
# verify that path doesn't already contain the given dirname
tmpdirname = re.escape(dirname)
pattern = r"{sep}{dir}$|^{dir}{sep}|{sep}{dir}{sep}|^{dir}$".format(
dir=tmpdirname,
sep=os.pathsep)
pattern = r"(^|{sep}){dir}({sep}|$)".format(dir=tmpdirname, sep=os.pathsep)
match = re.search(pattern, os.environ['PATH'])
match = re.search(pattern, os.environ["PATH"])
if not match:
os.environ['PATH'] = dirname + os.pathsep + os.environ['PATH']
os.environ["PATH"] = dirname + os.pathsep + os.environ["PATH"]
def which(program):
@ -109,7 +117,7 @@ def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
@ -122,496 +130,109 @@ def which(program):
return None
def xlate(data, isInt=False):
class Default(dict):
def __missing__(self, key):
return None
def xlate(data, is_int=False):
if data is None or data == "":
return None
if isInt:
i = str(data).translate(Default(zip((ord(c) for c in "1234567890"),"1234567890")))
if is_int:
i = str(data).translate(defaultdict(lambda: None, zip((ord(c) for c in "1234567890"), "1234567890")))
if i == "0":
return "0"
if i is "":
if i == "":
return None
return int(i)
else:
return str(data)
def removearticles(text):
def remove_articles(text):
text = text.lower()
articles = ['and', 'a', '&', 'issue', 'the']
newText = ''
for word in text.split(' '):
articles = [
"&",
"a",
"am",
"an",
"and",
"as",
"at",
"be",
"but",
"by",
"for",
"if",
"is",
"issue",
"it",
"it's",
"its",
"itself",
"of",
"or",
"so",
"the",
"the",
"with",
]
new_text = ""
for word in text.split(" "):
if word not in articles:
newText += word + ' '
new_text += word + " "
newText = newText[:-1]
new_text = new_text[:-1]
return newText
return new_text
def sanitize_title(text):
# normalize unicode and convert to ascii. Does not work for everything eg ½ to 12 not 1/2
# this will probably cause issues with titles in other character sets e.g. chinese, japanese
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('ascii')
text = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode("ascii")
# comicvine keeps apostrophes a part of the word
text = text.replace("'", "")
text = text.replace("\"", "")
text = text.replace('"', "")
# comicvine ignores punctuation and accents
text = re.sub(r'[^A-Za-z0-9]+',' ', text)
text = re.sub(r"[^A-Za-z0-9]+", " ", text)
# remove extra space and articles and all lower case
text = removearticles(text).lower().strip()
text = remove_articles(text).lower().strip()
return text
def unique_file(file_name):
counter = 1
# returns ('/path/file', '.ext')
file_name_parts = os.path.splitext(file_name)
while True:
if not os.path.lexists(file_name):
return file_name
file_name = file_name_parts[
0] + ' (' + str(counter) + ')' + file_name_parts[1]
file_name = file_name_parts[0] + " (" + str(counter) + ")" + file_name_parts[1]
counter += 1
# -o- coding: utf-8 -o-
# ISO639 python dict
# official list in http://www.loc.gov/standards/iso639-2/php/code_list.php
languages = defaultdict(lambda: None)
lang_dict = {
'ab': 'Abkhaz',
'aa': 'Afar',
'af': 'Afrikaans',
'ak': 'Akan',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'an': 'Aragonese',
'hy': 'Armenian',
'as': 'Assamese',
'av': 'Avaric',
'ae': 'Avestan',
'ay': 'Aymara',
'az': 'Azerbaijani',
'bm': 'Bambara',
'ba': 'Bashkir',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bh': 'Bihari',
'bi': 'Bislama',
'bs': 'Bosnian',
'br': 'Breton',
'bg': 'Bulgarian',
'my': 'Burmese',
'ca': 'Catalan; Valencian',
'ch': 'Chamorro',
'ce': 'Chechen',
'ny': 'Chichewa; Chewa; Nyanja',
'zh': 'Chinese',
'cv': 'Chuvash',
'kw': 'Cornish',
'co': 'Corsican',
'cr': 'Cree',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'dv': 'Divehi; Maldivian;',
'nl': 'Dutch',
'dz': 'Dzongkha',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'ee': 'Ewe',
'fo': 'Faroese',
'fj': 'Fijian',
'fi': 'Finnish',
'fr': 'French',
'ff': 'Fula',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek, Modern',
'gn': 'Guaraní',
'gu': 'Gujarati',
'ht': 'Haitian',
'ha': 'Hausa',
'he': 'Hebrew (modern)',
'hz': 'Herero',
'hi': 'Hindi',
'ho': 'Hiri Motu',
'hu': 'Hungarian',
'ia': 'Interlingua',
'id': 'Indonesian',
'ie': 'Interlingue',
'ga': 'Irish',
'ig': 'Igbo',
'ik': 'Inupiaq',
'io': 'Ido',
'is': 'Icelandic',
'it': 'Italian',
'iu': 'Inuktitut',
'ja': 'Japanese',
'jv': 'Javanese',
'kl': 'Kalaallisut',
'kn': 'Kannada',
'kr': 'Kanuri',
'ks': 'Kashmiri',
'kk': 'Kazakh',
'km': 'Khmer',
'ki': 'Kikuyu, Gikuyu',
'rw': 'Kinyarwanda',
'ky': 'Kirghiz, Kyrgyz',
'kv': 'Komi',
'kg': 'Kongo',
'ko': 'Korean',
'ku': 'Kurdish',
'kj': 'Kwanyama, Kuanyama',
'la': 'Latin',
'lb': 'Luxembourgish',
'lg': 'Luganda',
'li': 'Limburgish',
'ln': 'Lingala',
'lo': 'Lao',
'lt': 'Lithuanian',
'lu': 'Luba-Katanga',
'lv': 'Latvian',
'gv': 'Manx',
'mk': 'Macedonian',
'mg': 'Malagasy',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mi': 'Māori',
'mr': 'Marathi (Marāṭhī)',
'mh': 'Marshallese',
'mn': 'Mongolian',
'na': 'Nauru',
'nv': 'Navajo, Navaho',
'nb': 'Norwegian Bokmål',
'nd': 'North Ndebele',
'ne': 'Nepali',
'ng': 'Ndonga',
'nn': 'Norwegian Nynorsk',
'no': 'Norwegian',
'ii': 'Nuosu',
'nr': 'South Ndebele',
'oc': 'Occitan',
'oj': 'Ojibwe, Ojibwa',
'cu': 'Old Church Slavonic',
'om': 'Oromo',
'or': 'Oriya',
'os': 'Ossetian, Ossetic',
'pa': 'Panjabi, Punjabi',
'pi': 'Pāli',
'fa': 'Persian',
'pl': 'Polish',
'ps': 'Pashto, Pushto',
'pt': 'Portuguese',
'qu': 'Quechua',
'rm': 'Romansh',
'rn': 'Kirundi',
'ro': 'Romanian, Moldavan',
'ru': 'Russian',
'sa': 'Sanskrit (Saṁskṛta)',
'sc': 'Sardinian',
'sd': 'Sindhi',
'se': 'Northern Sami',
'sm': 'Samoan',
'sg': 'Sango',
'sr': 'Serbian',
'gd': 'Scottish Gaelic',
'sn': 'Shona',
'si': 'Sinhala, Sinhalese',
'sk': 'Slovak',
'sl': 'Slovene',
'so': 'Somali',
'st': 'Southern Sotho',
'es': 'Spanish; Castilian',
'su': 'Sundanese',
'sw': 'Swahili',
'ss': 'Swati',
'sv': 'Swedish',
'ta': 'Tamil',
'te': 'Telugu',
'tg': 'Tajik',
'th': 'Thai',
'ti': 'Tigrinya',
'bo': 'Tibetan',
'tk': 'Turkmen',
'tl': 'Tagalog',
'tn': 'Tswana',
'to': 'Tonga',
'tr': 'Turkish',
'ts': 'Tsonga',
'tt': 'Tatar',
'tw': 'Twi',
'ty': 'Tahitian',
'ug': 'Uighur, Uyghur',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
've': 'Venda',
'vi': 'Vietnamese',
'vo': 'Volapük',
'wa': 'Walloon',
'cy': 'Welsh',
'wo': 'Wolof',
'fy': 'Western Frisian',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'za': 'Zhuang, Chuang',
'zu': 'Zulu',
}
countries = defaultdict(lambda: None)
for c in pycountry.countries:
if "alpha_2" in c._fields:
countries[c.alpha_2] = c.name
for lng in pycountry.languages:
if "alpha_2" in lng._fields:
languages[lng.alpha_2] = lng.name
countries = [
('AF', 'Afghanistan'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua And Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BA', 'Bosnia And Herzegowina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Rep'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Cote D`ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('TP', 'East Timor'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French S. Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GN', 'Guinea'),
('GW', 'Guinea-bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea (North)'),
('KR', 'Korea (South)'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Laos'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macau'),
('MK', 'Macedonia'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('AN', 'Netherlands Antilles'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('KN', 'Saint Kitts And Nevis'),
('LC', 'Saint Lucia'),
('VC', 'St Vincent/Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SH', 'St. Helena'),
('PM', 'St.Pierre'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania'),
('TH', 'Thailand'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad And Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('UK', 'United Kingdom'),
('US', 'United States'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VA', 'Vatican City State'),
('VE', 'Venezuela'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands (British)'),
('VI', 'Virgin Islands (U.S.)'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('YU', 'Yugoslavia'),
('ZR', 'Zaire'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe')
]
def get_language_from_iso(iso: str):
return languages[iso]
def getLanguageDict():
return lang_dict
def getLanguageFromISO(iso):
if iso is None:
def get_language(string):
if string is None:
return None
else:
return lang_dict[iso]
lang = get_language_from_iso(string)
if lang is None:
try:
return pycountry.languages.lookup(string).name
except:
return None
return lang