Compare commits

...

10 Commits

Author SHA1 Message Date
Timmy Welch
3f4b89629d Support Python 3.9 2024-03-03 22:13:12 -08:00
AJ Slater
0812d67919
update deps. bump version (#5) 2024-02-28 13:17:47 -08:00
AJ Slater
624b64d6ca
Merge pull request #4 from ajslater/develop
v0.2.0
2024-02-28 13:13:33 -08:00
AJ Slater
1aa1a6d6b7
Merge pull request #3 from ajslater/pre-release
v0.2.0a0
2024-02-28 13:09:44 -08:00
AJ Slater
16d362da8a exclude captain marvel from pulisher dectector 2024-02-25 01:56:28 -08:00
AJ Slater
32f8cb0f22 lint and add type notations 2024-02-24 19:40:33 -08:00
AJ Slater
0a17bbc0d9 fix test for title and not remainers 2024-02-24 19:04:45 -08:00
AJ Slater
7694a3e2fd enforce title position limits. reduce parse_series_and_title complexity. add type hints. 2024-02-24 18:21:07 -08:00
AJ Slater
d3b11d6361 cast date and remainder parts as strings 2024-02-23 22:16:51 -08:00
AJ Slater
29e6068db2 restore ORIGINAL_FORMAT_RE 2024-02-23 19:40:39 -08:00
10 changed files with 212 additions and 155 deletions

View File

@ -12,6 +12,8 @@
- ComicFilenameParser and ComicFilenameSerializer classes are available as well - ComicFilenameParser and ComicFilenameSerializer classes are available as well
as the old function API. as the old function API.
- New test cases thanks to @lordwelch & @bpepple - New test cases thanks to @lordwelch & @bpepple
- Titles must come after series and one other token, but before format and scan
info.
## v0.1.4 ## v0.1.4

View File

@ -3,10 +3,11 @@
from argparse import ArgumentParser from argparse import ArgumentParser
from pathlib import Path from pathlib import Path
from pprint import pprint from pprint import pprint
from comicfn2dict.parse import ComicFilenameParser from comicfn2dict.parse import ComicFilenameParser
def main(): def main() -> None:
"""Test parser.""" """Test parser."""
description = "Comic book archive read/write tool." description = "Comic book archive read/write tool."
parser = ArgumentParser(description=description) parser = ArgumentParser(description=description)
@ -23,7 +24,7 @@ def main():
cfnparser = ComicFilenameParser(name, verbose=args.verbose) cfnparser = ComicFilenameParser(name, verbose=args.verbose)
metadata = cfnparser.parse() metadata = cfnparser.parse()
if args.verbose: if args.verbose:
print("=" * 80) print("=" * 80) # noqa:T201
pprint(metadata) # noqa:T203 pprint(metadata) # noqa:T203

View File

@ -6,4 +6,4 @@ def print_log_header(label: str) -> None:
prefix = "-" * 3 + label prefix = "-" * 3 + label
suffix_len = 80 - len(prefix) suffix_len = 80 - len(prefix)
suffix = "-" * suffix_len suffix = "-" * suffix_len
print(prefix + suffix) print(prefix + suffix) # noqa: T201

View File

@ -1,10 +1,13 @@
"""Parse comic book archive names using the simple 'parse' parser.""" """Parse comic book archive names using the simple 'parse' parser."""
from pprint import pformat from __future__ import annotations
from calendar import month_abbr from calendar import month_abbr
from copy import copy from copy import copy
from pathlib import Path from pathlib import Path
from pprint import pformat
from re import Match, Pattern from re import Match, Pattern
from typing import Any from sys import maxsize
from comicfn2dict.log import print_log_header from comicfn2dict.log import print_log_header
from comicfn2dict.regex import ( from comicfn2dict.regex import (
ALPHA_MONTH_RANGE_RE, ALPHA_MONTH_RANGE_RE,
@ -18,8 +21,8 @@ from comicfn2dict.regex import (
ORIGINAL_FORMAT_SCAN_INFO_RE, ORIGINAL_FORMAT_SCAN_INFO_RE,
ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE, ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE,
PUBLISHER_AMBIGUOUS_RE, PUBLISHER_AMBIGUOUS_RE,
PUBLISHER_UNAMBIGUOUS_RE,
PUBLISHER_AMBIGUOUS_TOKEN_RE, PUBLISHER_AMBIGUOUS_TOKEN_RE,
PUBLISHER_UNAMBIGUOUS_RE,
PUBLISHER_UNAMBIGUOUS_TOKEN_RE, PUBLISHER_UNAMBIGUOUS_TOKEN_RE,
REGEX_SUBS, REGEX_SUBS,
REMAINING_GROUP_RE, REMAINING_GROUP_RE,
@ -32,42 +35,40 @@ from comicfn2dict.regex import (
YEAR_TOKEN_RE, YEAR_TOKEN_RE,
) )
_REMAINING_GROUP_KEYS = ("series", "title")
_TITLE_PRECEDING_KEYS = ("issue", "year", "volume")
_DATE_KEYS = frozenset({"year", "month", "day"}) _DATE_KEYS = frozenset({"year", "month", "day"})
_REMAINING_GROUP_KEYS = ("series", "title")
# Ordered by commonness.
_TITLE_PRECEDING_KEYS = ("issue", "year", "volume", "month")
class ComicFilenameParser: class ComicFilenameParser:
"""Parse a filename metadata into a dict.""" """Parse a filename metadata into a dict."""
def path_index(self, key: str): def path_index(self, key: str, default: int = -1) -> int:
"""Lazily retrieve and memoize the key's location in the path.""" """Lazily retrieve and memoize the key's location in the path."""
if key == "remainders": if key == "remainders":
return -1 return default
value: str = self.metadata.get(key, "") # type: ignore value: str = self.metadata.get(key, "") # type: ignore
if not value: if not value:
return -1 return default
if value not in self._path_indexes: if value not in self._path_indexes:
# XXX This is fragile, but it's difficult to calculate the original # XXX This is fragile, but it's difficult to calculate the original
# position at match time from the ever changing _unparsed_path. # position at match time from the ever changing _unparsed_path.
if key == "ext": index = self.path.rfind(value) if key == "ext" else self.path.find(value)
index = self.path.rfind(value)
else:
index = self.path.find(value)
self._path_indexes[value] = index self._path_indexes[value] = index
return self._path_indexes[value] return self._path_indexes[value]
def _log(self, label): def _log(self, label: str) -> None:
if not self._debug: if not self._debug:
return return
print_log_header(label) print_log_header(label)
combined = {} combined = {}
for key in self.metadata: for key in self.metadata:
combined[key] = (self.metadata.get(key), self.path_index(key)) combined[key] = (self.metadata.get(key), self.path_index(key))
print(" " + self._unparsed_path) print(" " + self._unparsed_path) # noqa: T201
print(" " + pformat(combined)) print(" " + pformat(combined)) # noqa: T201
def _parse_ext(self): def _parse_ext(self) -> None:
"""Pop the extension from the pathname.""" """Pop the extension from the pathname."""
path = Path(self._unparsed_path) path = Path(self._unparsed_path)
suffix = path.suffix suffix = path.suffix
@ -79,7 +80,7 @@ class ComicFilenameParser:
self.metadata["ext"] = ext self.metadata["ext"] = ext
self._unparsed_path = data self._unparsed_path = data
def _clean_dividers(self): def _clean_dividers(self) -> None:
"""Replace non space dividers and clean extra spaces out of string.""" """Replace non space dividers and clean extra spaces out of string."""
data = self._unparsed_path data = self._unparsed_path
@ -120,7 +121,7 @@ class ComicFilenameParser:
parts.append(token) parts.append(token)
self._unparsed_path = TOKEN_DELIMETER.join(parts) self._unparsed_path = TOKEN_DELIMETER.join(parts)
def _parse_items( def _parse_items( # noqa: PLR0913
self, self,
regex: Pattern, regex: Pattern,
require_all: bool = False, require_all: bool = False,
@ -142,21 +143,21 @@ class ComicFilenameParser:
if pop: if pop:
self._parse_items_pop_tokens(regex, first_only) self._parse_items_pop_tokens(regex, first_only)
def _parse_issue(self): def _parse_issue(self) -> None:
"""Parse Issue.""" """Parse Issue."""
self._parse_items(ISSUE_NUMBER_RE) self._parse_items(ISSUE_NUMBER_RE)
if "issue" not in self.metadata: if "issue" not in self.metadata:
self._parse_items(ISSUE_WITH_COUNT_RE) self._parse_items(ISSUE_WITH_COUNT_RE)
self._log("After Issue") self._log("After Issue")
def _parse_volume(self): def _parse_volume(self) -> None:
"""Parse Volume.""" """Parse Volume."""
self._parse_items(VOLUME_RE) self._parse_items(VOLUME_RE)
if "volume" not in self.metadata: if "volume" not in self.metadata:
self._parse_items(VOLUME_WITH_COUNT_RE) self._parse_items(VOLUME_WITH_COUNT_RE)
self._log("After Volume") self._log("After Volume")
def _alpha_month_to_numeric(self): def _alpha_month_to_numeric(self) -> None:
"""Translate alpha_month to numeric month.""" """Translate alpha_month to numeric month."""
if alpha_month := self.metadata.pop("alpha_month", ""): if alpha_month := self.metadata.pop("alpha_month", ""):
alpha_month = alpha_month.capitalize() # type: ignore alpha_month = alpha_month.capitalize() # type: ignore
@ -166,7 +167,7 @@ class ComicFilenameParser:
self.metadata["month"] = month self.metadata["month"] = month
break break
def _parse_dates(self): def _parse_dates(self) -> None:
"""Parse date schemes.""" """Parse date schemes."""
# Discard second month of alpha month ranges. # Discard second month of alpha month ranges.
self._unparsed_path = ALPHA_MONTH_RANGE_RE.sub(r"\1", self._unparsed_path) self._unparsed_path = ALPHA_MONTH_RANGE_RE.sub(r"\1", self._unparsed_path)
@ -192,9 +193,8 @@ class ComicFilenameParser:
self.metadata["volume"] = volume self.metadata["volume"] = volume
self._log("After Date") self._log("After Date")
def _parse_format_and_scan_info(self): def _parse_format_and_scan_info(self) -> None:
# Format & Scan Info """Format & Scan Info."""
#
self._parse_items( self._parse_items(
ORIGINAL_FORMAT_SCAN_INFO_RE, ORIGINAL_FORMAT_SCAN_INFO_RE,
require_all=True, require_all=True,
@ -231,7 +231,7 @@ class ComicFilenameParser:
self._parse_items(ISSUE_BEGIN_RE) self._parse_items(ISSUE_BEGIN_RE)
self._log("After Issue on ends of tokens") self._log("After Issue on ends of tokens")
def _parse_publisher(self): def _parse_publisher(self) -> None:
"""Parse Publisher.""" """Parse Publisher."""
# Pop single tokens so they don't end up titles. # Pop single tokens so they don't end up titles.
self._parse_items(PUBLISHER_UNAMBIGUOUS_TOKEN_RE, first_only=True) self._parse_items(PUBLISHER_UNAMBIGUOUS_TOKEN_RE, first_only=True)
@ -243,15 +243,19 @@ class ComicFilenameParser:
self._parse_items(PUBLISHER_AMBIGUOUS_RE, pop=False, first_only=True) self._parse_items(PUBLISHER_AMBIGUOUS_RE, pop=False, first_only=True)
self._log("After publisher") self._log("After publisher")
def _is_title_in_position(self, value): def _is_at_title_position(self, value: str) -> bool:
"""Does the title come after series and one other token if they exist.""" """Title is in correct position."""
title_index = self.path.find(value) title_index = self.path.find(value)
# Does a series come first. # Titles must come after series but before format and scan_info
if title_index < self.path_index("series"): if (
title_index < self.path_index("series")
or title_index > self.path_index("original_format", maxsize)
or title_index > self.path_index("scan_info", maxsize)
):
return False return False
# If other tokens exist then they much precede the title. # Titles must be after the series and one other token.
title_ok = False title_ok = False
other_tokens_exist = False other_tokens_exist = False
for preceding_key in _TITLE_PRECEDING_KEYS: for preceding_key in _TITLE_PRECEDING_KEYS:
@ -270,7 +274,27 @@ class ComicFilenameParser:
value = value.strip("'").strip() value = value.strip("'").strip()
return value.strip('"').strip() return value.strip('"').strip()
def _parse_series_and_title(self): def _parse_series_and_title_token(
self, remaining_key_index: int, tokens: list[str]
) -> str:
"""Parse one series or title token."""
key = _REMAINING_GROUP_KEYS[remaining_key_index]
if key in self.metadata:
return ""
token = tokens.pop(0)
match = REMAINING_GROUP_RE.search(token)
if not match:
return token
value = match.group()
if key == "title" and not self._is_at_title_position(value):
return token
value = NON_NUMBER_DOT_RE.sub(r"\1 \2", value)
value = self._grouping_operators_strip(value)
if value:
self.metadata[key] = value
return ""
def _parse_series_and_title(self) -> None:
"""Assign series and title.""" """Assign series and title."""
if not self._unparsed_path: if not self._unparsed_path:
return return
@ -279,28 +303,17 @@ class ComicFilenameParser:
unused_tokens = [] unused_tokens = []
tokens = self._unparsed_path.split(TOKEN_DELIMETER) tokens = self._unparsed_path.split(TOKEN_DELIMETER)
while tokens and remaining_key_index < len(_REMAINING_GROUP_KEYS): while tokens and remaining_key_index < len(_REMAINING_GROUP_KEYS):
key = _REMAINING_GROUP_KEYS[remaining_key_index] unused_token = self._parse_series_and_title_token(
if key in self.metadata: remaining_key_index, tokens
continue )
token = tokens.pop(0) if unused_token:
match = REMAINING_GROUP_RE.search(token) unused_tokens.append(unused_token)
if match:
value = match.group()
if key == "title" and not self._is_title_in_position(value):
unused_tokens.append(token)
continue
value = self._grouping_operators_strip(value)
value = NON_NUMBER_DOT_RE.sub(r"\1 \2", value)
self.metadata[key] = value
remaining_key_index += 1 remaining_key_index += 1
else:
unused_tokens.append(token)
self._unparsed_path = " ".join(unused_tokens) if unused_tokens else "" self._unparsed_path = " ".join(unused_tokens) if unused_tokens else ""
self._log("After Series & Title") self._log("After Series & Title")
def _add_remainders(self): def _add_remainders(self) -> None:
"""Add Remainders.""" """Add Remainders."""
remainders = [] remainders = []
for token in self._unparsed_path.split(TOKEN_DELIMETER): for token in self._unparsed_path.split(TOKEN_DELIMETER):
@ -310,7 +323,7 @@ class ComicFilenameParser:
if remainders: if remainders:
self.metadata["remainders"] = tuple(remainders) self.metadata["remainders"] = tuple(remainders)
def parse(self) -> dict[str, Any]: def parse(self) -> dict[str, str | tuple[str, ...]]:
"""Parse the filename with a hierarchy of regexes.""" """Parse the filename with a hierarchy of regexes."""
self._log("Init") self._log("Init")
self._parse_ext() self._parse_ext()
@ -345,7 +358,9 @@ class ComicFilenameParser:
self._path_indexes: dict[str, int] = {} self._path_indexes: dict[str, int] = {}
def comicfn2dict(path: str | Path, verbose: int = 0): def comicfn2dict(
"""Simple API.""" path: str | Path, verbose: int = 0
) -> dict[str, str | tuple[str, ...]]:
"""Simplfily the API."""
parser = ComicFilenameParser(path, verbose=verbose) parser = ComicFilenameParser(path, verbose=verbose)
return parser.parse() return parser.parse()

View File

@ -1,16 +1,8 @@
"""Parsing regexes.""" """Parsing regexes."""
import re from re import IGNORECASE, Pattern, compile
from types import MappingProxyType from types import MappingProxyType
PUBLISHERS_UNAMBIGUOUS: tuple[str, ...] = (
def re_compile(exp, parenthify=False):
"""Compile regex with options."""
if parenthify:
exp = r"\(" + exp + r"\)"
return re.compile(exp, flags=re.IGNORECASE)
PUBLISHERS_UNAMBIGUOUS = (
r"Abrams ComicArts", r"Abrams ComicArts",
r"BOOM! Studios", r"BOOM! Studios",
r"DC(\sComics)?", r"DC(\sComics)?",
@ -26,15 +18,15 @@ PUBLISHERS_UNAMBIGUOUS = (
r"SelfMadeHero", r"SelfMadeHero",
r"Titan Comics", r"Titan Comics",
) )
PUBLISHERS_AMBIGUOUS = ( PUBLISHERS_AMBIGUOUS: tuple[str, ...] = (
r"Marvel", r"(?<!Capt\.\s)(?<!Capt\s)(?<!Captain\s)Marvel",
r"Heavy Metal", r"Heavy Metal",
r"Epic", r"Epic",
r"Image", r"Image",
r"Mirage", r"Mirage",
) )
ORIGINAL_FORMAT_PATTERNS = ( ORIGINAL_FORMAT_PATTERNS: tuple[str, ...] = (
r"Anthology", r"Anthology",
r"(One|1)[-\s]Shot", r"(One|1)[-\s]Shot",
r"Annual", r"Annual",
@ -63,7 +55,7 @@ ORIGINAL_FORMAT_PATTERNS = (
r"Web([-\s]?(Comic|Rip))?", r"Web([-\s]?(Comic|Rip))?",
) )
MONTHS = ( MONTHS: tuple[str, ...] = (
r"Jan(uary)?", r"Jan(uary)?",
r"Feb(ruary)?", r"Feb(ruary)?",
r"Mar(ch)?", r"Mar(ch)?",
@ -78,7 +70,15 @@ MONTHS = (
r"Dec(ember)?", r"Dec(ember)?",
) )
TOKEN_DELIMETER = r"/" TOKEN_DELIMETER: str = r"/"
def re_compile(exp: str, parenthify: bool = False) -> Pattern:
"""Compile regex with options."""
if parenthify:
exp = r"\(" + exp + r"\)"
return compile(exp, flags=IGNORECASE)
# CLEAN # CLEAN
_TOKEN_DIVIDERS_RE = re_compile(r":") _TOKEN_DIVIDERS_RE = re_compile(r":")
@ -87,7 +87,7 @@ _EXTRA_SPACES_RE = re_compile(r"\s\s+")
_LEFT_PAREN_EQUIVALENT_RE = re_compile(r"\[") _LEFT_PAREN_EQUIVALENT_RE = re_compile(r"\[")
_RIGHT_PAREN_EQUIVALENT_RE = re_compile(r"\]") _RIGHT_PAREN_EQUIVALENT_RE = re_compile(r"\]")
_DOUBLE_UNDERSCORE_RE = re_compile(r"__(.*)__") _DOUBLE_UNDERSCORE_RE = re_compile(r"__(.*)__")
REGEX_SUBS: MappingProxyType[re.Pattern, tuple[str, int]] = MappingProxyType( REGEX_SUBS: MappingProxyType[Pattern, tuple[str, int]] = MappingProxyType(
{ {
_DOUBLE_UNDERSCORE_RE: (r"(\1)", 0), _DOUBLE_UNDERSCORE_RE: (r"(\1)", 0),
_TOKEN_DIVIDERS_RE: (TOKEN_DELIMETER, 1), _TOKEN_DIVIDERS_RE: (TOKEN_DELIMETER, 1),
@ -104,7 +104,7 @@ _MONTH_ALPHA_RE_EXP = r"(" + "(?P<alpha_month>" + r"|".join(MONTHS) + r")\.?" r"
_MONTH_NUMERIC_RE_EXP = r"(?P<month>0?\d|1[0-2]?)" _MONTH_NUMERIC_RE_EXP = r"(?P<month>0?\d|1[0-2]?)"
_MONTH_RE_EXP = r"(" + _MONTH_ALPHA_RE_EXP + r"|" + _MONTH_NUMERIC_RE_EXP + r")" _MONTH_RE_EXP = r"(" + _MONTH_ALPHA_RE_EXP + r"|" + _MONTH_NUMERIC_RE_EXP + r")"
_ALPHA_MONTH_RANGE = ( _ALPHA_MONTH_RANGE = (
r"\b" r"\b" # noqa: ISC003
+ r"(" + r"("
+ r"|".join(MONTHS) + r"|".join(MONTHS)
+ r")" + r")"
@ -115,7 +115,7 @@ _ALPHA_MONTH_RANGE = (
+ r")" + r")"
+ r")\b" + r")\b"
) )
ALPHA_MONTH_RANGE_RE = re_compile(_ALPHA_MONTH_RANGE) ALPHA_MONTH_RANGE_RE: Pattern = re_compile(_ALPHA_MONTH_RANGE)
_DAY_RE_EXP = r"(?P<day>([0-2]?\d|(3)[0-1]))" _DAY_RE_EXP = r"(?P<day>([0-2]?\d|(3)[0-1]))"
_DATE_DELIM = r"[-\s]+" _DATE_DELIM = r"[-\s]+"
@ -144,10 +144,10 @@ _YEAR_FIRST_DATE_RE_EXP = (
+ r"\b\)?)" + r"\b\)?)"
) )
MONTH_FIRST_DATE_RE = re_compile(_MONTH_FIRST_DATE_RE_EXP) MONTH_FIRST_DATE_RE: Pattern = re_compile(_MONTH_FIRST_DATE_RE_EXP)
YEAR_FIRST_DATE_RE = re_compile(_YEAR_FIRST_DATE_RE_EXP) YEAR_FIRST_DATE_RE: Pattern = re_compile(_YEAR_FIRST_DATE_RE_EXP)
YEAR_TOKEN_RE = re_compile(_YEAR_RE_EXP, parenthify=True) YEAR_TOKEN_RE: Pattern = re_compile(_YEAR_RE_EXP, parenthify=True)
YEAR_END_RE = re_compile(_YEAR_RE_EXP + r"\/|$") YEAR_END_RE: Pattern = re_compile(_YEAR_RE_EXP + r"\/|$")
# PAREN GROUPS # PAREN GROUPS
_OF_PATTERNS = r"|".join(ORIGINAL_FORMAT_PATTERNS) _OF_PATTERNS = r"|".join(ORIGINAL_FORMAT_PATTERNS)
@ -156,37 +156,39 @@ _SCAN_INFO_RE_EXP = r"(?P<scan_info>[^()]*)"
_ORIGINAL_FORMAT_SCAN_INFO_RE_EXP = ( _ORIGINAL_FORMAT_SCAN_INFO_RE_EXP = (
_ORIGINAL_FORMAT_RE_EXP + r"\s*[\(:-]" + _SCAN_INFO_RE_EXP # + r")?" _ORIGINAL_FORMAT_RE_EXP + r"\s*[\(:-]" + _SCAN_INFO_RE_EXP # + r")?"
) )
ORIGINAL_FORMAT_SCAN_INFO_RE = re_compile( # Keep this even though comicfn2dict doesn't use it directly
ORIGINAL_FORMAT_RE: Pattern = re_compile(_ORIGINAL_FORMAT_RE_EXP, parenthify=True)
ORIGINAL_FORMAT_SCAN_INFO_RE: Pattern = re_compile(
_ORIGINAL_FORMAT_SCAN_INFO_RE_EXP, parenthify=True _ORIGINAL_FORMAT_SCAN_INFO_RE_EXP, parenthify=True
) )
ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE = re_compile( ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE: Pattern = re_compile(
r"\(" + _ORIGINAL_FORMAT_RE_EXP + r"\).*\(" + _SCAN_INFO_RE_EXP + r"\)" r"\(" + _ORIGINAL_FORMAT_RE_EXP + r"\).*\(" + _SCAN_INFO_RE_EXP + r"\)"
) )
SCAN_INFO_SECONDARY_RE = re_compile(r"\b(?P<secondary_scan_info>c2c)\b") SCAN_INFO_SECONDARY_RE: Pattern = re_compile(r"\b(?P<secondary_scan_info>c2c)\b")
# ISSUE # ISSUE
_ISSUE_RE_EXP = r"(?P<issue>\w*(½|\d+)[\.\d+]*\w*)" _ISSUE_RE_EXP = r"(?P<issue>\w*(½|\d+)[\.\d+]*\w*)"
_ISSUE_COUNT_RE_EXP = r"\(of\s*(?P<issue_count>\d+)\)" _ISSUE_COUNT_RE_EXP = r"\(of\s*(?P<issue_count>\d+)\)"
ISSUE_NUMBER_RE = re_compile( ISSUE_NUMBER_RE: Pattern = re_compile(
r"(\(?#" + _ISSUE_RE_EXP + r"\)?)" + r"(\W*" + _ISSUE_COUNT_RE_EXP + r")?" r"(\(?#" + _ISSUE_RE_EXP + r"\)?)" + r"(\W*" + _ISSUE_COUNT_RE_EXP + r")?"
) )
ISSUE_WITH_COUNT_RE = re_compile( ISSUE_WITH_COUNT_RE: Pattern = re_compile(
r"(\(?" + _ISSUE_RE_EXP + r"\)?" + r"\W*" + _ISSUE_COUNT_RE_EXP + r")" r"(\(?" + _ISSUE_RE_EXP + r"\)?" + r"\W*" + _ISSUE_COUNT_RE_EXP + r")"
) )
ISSUE_END_RE = re_compile(r"([\/\s]\(?" + _ISSUE_RE_EXP + r"\)?(\/|$))") ISSUE_END_RE: Pattern = re_compile(r"([\/\s]\(?" + _ISSUE_RE_EXP + r"\)?(\/|$))")
ISSUE_BEGIN_RE = re_compile(r"((^|\/)\(?" + _ISSUE_RE_EXP + r"\)?[\/|\s])") ISSUE_BEGIN_RE: Pattern = re_compile(r"((^|\/)\(?" + _ISSUE_RE_EXP + r"\)?[\/|\s])")
# Volume # Volume
_VOLUME_COUNT_RE_EXP = r"\(of\s*(?P<volume_count>\d+)\)" _VOLUME_COUNT_RE_EXP = r"\(of\s*(?P<volume_count>\d+)\)"
VOLUME_RE = re_compile( VOLUME_RE: Pattern = re_compile(
r"(" + r"(?:v(?:ol(?:ume)?)?\.?)\s*(?P<volume>\d+)" r"(" + r"(?:v(?:ol(?:ume)?)?\.?)\s*(?P<volume>\d+)" # noqa: ISC003
r"(\W*" + _VOLUME_COUNT_RE_EXP + r")?" + r")" r"(\W*" + _VOLUME_COUNT_RE_EXP + r")?" + r")"
) )
VOLUME_WITH_COUNT_RE = re_compile( VOLUME_WITH_COUNT_RE: Pattern = re_compile(
r"(\(?" + r"(?P<volume>\d+)" + r"\)?" + r"\W*" + _VOLUME_COUNT_RE_EXP + r")" r"(\(?" + r"(?P<volume>\d+)" + r"\)?" + r"\W*" + _VOLUME_COUNT_RE_EXP + r")"
) )
BOOK_VOLUME_RE = re_compile(r"(?P<title>" + r"book\s*(?P<volume>\d+)" + r")") BOOK_VOLUME_RE: Pattern = re_compile(r"(?P<title>" + r"book\s*(?P<volume>\d+)" + r")")
# Publisher # Publisher
_PUBLISHER_UNAMBIGUOUS_RE_EXP = ( _PUBLISHER_UNAMBIGUOUS_RE_EXP = (
@ -195,15 +197,15 @@ _PUBLISHER_UNAMBIGUOUS_RE_EXP = (
_PUBLISHER_AMBIGUOUS_RE_EXP = ( _PUBLISHER_AMBIGUOUS_RE_EXP = (
r"(\b(?P<publisher>" + r"|".join(PUBLISHERS_AMBIGUOUS) + r")\b)" r"(\b(?P<publisher>" + r"|".join(PUBLISHERS_AMBIGUOUS) + r")\b)"
) )
PUBLISHER_UNAMBIGUOUS_TOKEN_RE = re_compile( PUBLISHER_UNAMBIGUOUS_TOKEN_RE: Pattern = re_compile(
r"(^|\/)" + _PUBLISHER_UNAMBIGUOUS_RE_EXP + r"($|\/)" r"(^|\/)" + _PUBLISHER_UNAMBIGUOUS_RE_EXP + r"($|\/)"
) )
PUBLISHER_AMBIGUOUS_TOKEN_RE = re_compile( PUBLISHER_AMBIGUOUS_TOKEN_RE: Pattern = re_compile(
r"(^|\/)" + _PUBLISHER_AMBIGUOUS_RE_EXP + r"($|\/)" r"(^|\/)" + _PUBLISHER_AMBIGUOUS_RE_EXP + r"($|\/)"
) )
PUBLISHER_UNAMBIGUOUS_RE = re_compile(_PUBLISHER_UNAMBIGUOUS_RE_EXP) PUBLISHER_UNAMBIGUOUS_RE: Pattern = re_compile(_PUBLISHER_UNAMBIGUOUS_RE_EXP)
PUBLISHER_AMBIGUOUS_RE = re_compile(_PUBLISHER_AMBIGUOUS_RE_EXP) PUBLISHER_AMBIGUOUS_RE = re_compile(_PUBLISHER_AMBIGUOUS_RE_EXP)
# LONG STRINGS # LONG STRINGS
REMAINING_GROUP_RE = re_compile(r"^[^\(].*[^\)]") REMAINING_GROUP_RE: Pattern = re_compile(r"^[^\(].*[^\)]")
NON_NUMBER_DOT_RE = re_compile(r"(\D)\.(\D)") NON_NUMBER_DOT_RE: Pattern = re_compile(r"(\D)\.(\D)")

View File

@ -1,8 +1,11 @@
"""Unparse comic filenames.""" """Unparse comic filenames."""
from __future__ import annotations
from calendar import month_abbr
from collections.abc import Callable, Mapping, Sequence from collections.abc import Callable, Mapping, Sequence
from contextlib import suppress from contextlib import suppress
from calendar import month_abbr
from types import MappingProxyType from types import MappingProxyType
from comicfn2dict.log import print_log_header from comicfn2dict.log import print_log_header
@ -39,12 +42,12 @@ _DATE_KEYS = ("year", "month", "day")
class ComicFilenameSerializer: class ComicFilenameSerializer:
"""Serialize Comic Filenames from dict.""" """Serialize Comic Filenames from dict."""
def _log(self, label, fn): def _log(self, label: str, fn: str) -> None:
"""Log progress.""" """Log progress."""
if not self._debug: if not self._debug:
return return
print_log_header(label) print_log_header(label)
print(fn) print(fn) # noqa: T201
def _add_date(self) -> None: def _add_date(self) -> None:
"""Construct date from Y-m-D if they exist.""" """Construct date from Y-m-D if they exist."""
@ -62,6 +65,7 @@ class ComicFilenameSerializer:
# noop if only day. # noop if only day.
break break
if parts: if parts:
parts = (str(part) for part in parts)
date = "-".join(parts) date = "-".join(parts)
self._log("After date", date) self._log("After date", date)
self.metadata = MappingProxyType({**self.metadata, "date": date}) self.metadata = MappingProxyType({**self.metadata, "date": date})
@ -72,13 +76,13 @@ class ComicFilenameSerializer:
if val in _EMPTY_VALUES: if val in _EMPTY_VALUES:
return "" return ""
final_fmt = fmt(val) if isinstance(fmt, Callable) else fmt final_fmt = fmt(val) if isinstance(fmt, Callable) else fmt
token = final_fmt.format(val).strip() return final_fmt.format(val).strip()
return token
def _add_remainder(self) -> str: def _add_remainder(self) -> str:
"""Add the remainders specially.""" """Add the remainders specially."""
if remainders := self.metadata.get("remainders"): if remainders := self.metadata.get("remainders"):
if isinstance(remainders, Sequence): if isinstance(remainders, Sequence):
remainders = (str(remainder) for remainder in remainders)
remainder = " ".join(remainders) remainder = " ".join(remainders)
else: else:
remainder = str(remainders) remainder = str(remainders)
@ -93,7 +97,7 @@ class ComicFilenameSerializer:
for tag, fmt in _FILENAME_FORMAT_TAGS: for tag, fmt in _FILENAME_FORMAT_TAGS:
if token := self._tokenize_tag(tag, fmt): if token := self._tokenize_tag(tag, fmt):
tokens.append(token) tokens.append(token)
self._log(f"After {tag}", tokens) self._log(f"After {tag}", str(tokens))
fn = " ".join(tokens) fn = " ".join(tokens)
fn += self._add_remainder() fn += self._add_remainder()
@ -107,12 +111,13 @@ class ComicFilenameSerializer:
return fn return fn
def __init__(self, metadata: Mapping, ext: bool = True, verbose: int = 0): def __init__(self, metadata: Mapping, ext: bool = True, verbose: int = 0):
"""Initialize."""
self.metadata: Mapping = metadata self.metadata: Mapping = metadata
self._ext: bool = ext self._ext: bool = ext
self._debug: bool = bool(verbose) self._debug: bool = bool(verbose)
def dict2comicfn(md: Mapping, ext: bool = True, verbose: int = 0) -> str: def dict2comicfn(md: Mapping, ext: bool = True, verbose: int = 0) -> str:
"""Simple API.""" """Simplify API."""
serializer = ComicFilenameSerializer(md, ext=ext, verbose=verbose) serializer = ComicFilenameSerializer(md, ext=ext, verbose=verbose)
return serializer.serialize() return serializer.serialize()

83
package-lock.json generated
View File

@ -303,9 +303,9 @@
} }
}, },
"node_modules/@babel/parser": { "node_modules/@babel/parser": {
"version": "7.23.9", "version": "7.24.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.0.tgz",
"integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", "integrity": "sha512-QuP/FxEAzMSjXygs8v4N9dvdXzEHN4W1oF3PxuWAtPo08UdM17u89RDMgjLn/mlc56iM0HlLmVkO/wgR+rDgHg==",
"dev": true, "dev": true,
"bin": { "bin": {
"parser": "bin/babel-parser.js" "parser": "bin/babel-parser.js"
@ -315,23 +315,23 @@
} }
}, },
"node_modules/@babel/template": { "node_modules/@babel/template": {
"version": "7.23.9", "version": "7.24.0",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.0.tgz",
"integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", "integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@babel/code-frame": "^7.23.5", "@babel/code-frame": "^7.23.5",
"@babel/parser": "^7.23.9", "@babel/parser": "^7.24.0",
"@babel/types": "^7.23.9" "@babel/types": "^7.24.0"
}, },
"engines": { "engines": {
"node": ">=6.9.0" "node": ">=6.9.0"
} }
}, },
"node_modules/@babel/traverse": { "node_modules/@babel/traverse": {
"version": "7.23.9", "version": "7.24.0",
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.0.tgz",
"integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", "integrity": "sha512-HfuJlI8qq3dEDmNU5ChzzpZRWq+oxCZQyMzIMEqLho+AQnhMnKQUzH6ydo3RBl/YjPCuk68Y6s0Gx0AeyULiWw==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@babel/code-frame": "^7.23.5", "@babel/code-frame": "^7.23.5",
@ -340,8 +340,8 @@
"@babel/helper-function-name": "^7.23.0", "@babel/helper-function-name": "^7.23.0",
"@babel/helper-hoist-variables": "^7.22.5", "@babel/helper-hoist-variables": "^7.22.5",
"@babel/helper-split-export-declaration": "^7.22.6", "@babel/helper-split-export-declaration": "^7.22.6",
"@babel/parser": "^7.23.9", "@babel/parser": "^7.24.0",
"@babel/types": "^7.23.9", "@babel/types": "^7.24.0",
"debug": "^4.3.1", "debug": "^4.3.1",
"globals": "^11.1.0" "globals": "^11.1.0"
}, },
@ -359,9 +359,9 @@
} }
}, },
"node_modules/@babel/types": { "node_modules/@babel/types": {
"version": "7.23.9", "version": "7.24.0",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.0.tgz",
"integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", "integrity": "sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@babel/helper-string-parser": "^7.23.4", "@babel/helper-string-parser": "^7.23.4",
@ -590,9 +590,9 @@
} }
}, },
"node_modules/@jridgewell/gen-mapping": { "node_modules/@jridgewell/gen-mapping": {
"version": "0.3.3", "version": "0.3.4",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.4.tgz",
"integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", "integrity": "sha512-Oud2QPM5dHviZNn4y/WhhYKSXksv+1xLEIsNrAbGcFzUN3ubqWRFT5gwPchNc5NuzILOU4tPBDTZ4VwhL8Y7cw==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@jridgewell/set-array": "^1.0.1", "@jridgewell/set-array": "^1.0.1",
@ -628,9 +628,9 @@
"dev": true "dev": true
}, },
"node_modules/@jridgewell/trace-mapping": { "node_modules/@jridgewell/trace-mapping": {
"version": "0.3.22", "version": "0.3.23",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.23.tgz",
"integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", "integrity": "sha512-9/4foRoUKp8s96tSkh8DlAAc5A0Ty8vLXld+l9gjKKY6ckwI8G15f0hskGmuLZu78ZlGa1vtsfOa+lnB4vG6Jg==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/resolve-uri": "^3.1.0",
@ -673,9 +673,9 @@
} }
}, },
"node_modules/@npmcli/config": { "node_modules/@npmcli/config": {
"version": "8.1.0", "version": "8.2.0",
"resolved": "https://registry.npmjs.org/@npmcli/config/-/config-8.1.0.tgz", "resolved": "https://registry.npmjs.org/@npmcli/config/-/config-8.2.0.tgz",
"integrity": "sha512-61LNEybTFaa9Z/f8y6X9s2Blc75aijZK67LxqC5xicBcfkw8M/88nYrRXGXxAUKm6GRlxTZ216dp1UK2+TbaYw==", "integrity": "sha512-YoEYZFg0hRSRP/Chmq+J4FvULFvji6SORUYWQc10FiJ+ReAnViXcDCENg6kM6dID04bAoKNUygrby798+gYBbQ==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"@npmcli/map-workspaces": "^3.0.2", "@npmcli/map-workspaces": "^3.0.2",
@ -856,9 +856,9 @@
"dev": true "dev": true
}, },
"node_modules/@types/node": { "node_modules/@types/node": {
"version": "20.11.20", "version": "20.11.22",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.22.tgz",
"integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", "integrity": "sha512-/G+IxWxma6V3E+pqK1tSl2Fo1kl41pK1yeCyDsgkF9WlVAme4j5ISYM2zR11bgLFJGLN5sVK40T4RJNuiZbEjA==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"undici-types": "~5.26.4" "undici-types": "~5.26.4"
@ -1364,9 +1364,9 @@
} }
}, },
"node_modules/caniuse-lite": { "node_modules/caniuse-lite": {
"version": "1.0.30001589", "version": "1.0.30001591",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001589.tgz", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001591.tgz",
"integrity": "sha512-vNQWS6kI+q6sBlHbh71IIeC+sRwK2N3EDySc/updIGhIee2x5z00J4c1242/5/d6EpEMdOnk/m+6tuk4/tcsqg==", "integrity": "sha512-PCzRMei/vXjJyL5mJtzNiUCKP59dm8Apqc3PH8gJkMnMXZGox93RbE76jHsmLwmIo6/3nsYIpJtx0O7u5PqFuQ==",
"dev": true, "dev": true,
"funding": [ "funding": [
{ {
@ -1780,9 +1780,9 @@
"dev": true "dev": true
}, },
"node_modules/electron-to-chromium": { "node_modules/electron-to-chromium": {
"version": "1.4.681", "version": "1.4.686",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.681.tgz", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.686.tgz",
"integrity": "sha512-1PpuqJUFWoXZ1E54m8bsLPVYwIVCRzvaL+n5cjigGga4z854abDnFRc+cTa2th4S79kyGqya/1xoR7h+Y5G5lg==", "integrity": "sha512-3avY1B+vUzNxEgkBDpKOP8WarvUAEwpRaiCL0He5OKWEFxzaOFiq4WoZEZe7qh0ReS7DiWoHMnYoQCKxNZNzSg==",
"dev": true "dev": true
}, },
"node_modules/emoji-regex": { "node_modules/emoji-regex": {
@ -2061,9 +2061,9 @@
} }
}, },
"node_modules/eslint-module-utils": { "node_modules/eslint-module-utils": {
"version": "2.8.0", "version": "2.8.1",
"resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz", "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz",
"integrity": "sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==", "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"debug": "^3.2.7" "debug": "^3.2.7"
@ -12663,10 +12663,13 @@
"dev": true "dev": true
}, },
"node_modules/yaml": { "node_modules/yaml": {
"version": "2.3.4", "version": "2.4.0",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.4.tgz", "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.0.tgz",
"integrity": "sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==", "integrity": "sha512-j9iR8g+/t0lArF4V6NE/QCfT+CO7iLqrXAHZbJdo+LfjqP1vR8Fg5bSiaq6Q2lOD1AUEVrEVIgABvBFYojJVYQ==",
"dev": true, "dev": true,
"bin": {
"yaml": "bin.mjs"
},
"engines": { "engines": {
"node": ">= 14" "node": ">= 14"
} }

10
poetry.lock generated
View File

@ -539,13 +539,13 @@ dev = ["twine (>=3.4.1)"]
[[package]] [[package]]
name = "pytest" name = "pytest"
version = "8.0.1" version = "8.0.2"
description = "pytest: simple powerful testing with Python" description = "pytest: simple powerful testing with Python"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{ file = "pytest-8.0.1-py3-none-any.whl", hash = "sha256:3e4f16fe1c0a9dc9d9389161c127c3edc5d810c38d6793042fb81d9f48a59fca" }, { file = "pytest-8.0.2-py3-none-any.whl", hash = "sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096" },
{ file = "pytest-8.0.1.tar.gz", hash = "sha256:267f6563751877d772019b13aacbe4e860d73fe8f651f28112e9ac37de7513ae" }, { file = "pytest-8.0.2.tar.gz", hash = "sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd" },
] ]
[package.dependencies] [package.dependencies]
@ -963,5 +963,5 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.10" python-versions = "^3.9"
content-hash = "ad7bc225fd2048867bce6d5b96c739554d4b7a16bd035a60e4d7d2d82ecd7811" content-hash = "39af5e6f01d257e457a710d8b126cbc467e520d7e2ad5942d3610fb503d5ce3a"

View File

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "comicfn2dict" name = "comicfn2dict"
version = "0.2.0a0" version = "0.2.0"
description = "Parse common comic filenames and return a dict of metadata attributes. Includes a cli." description = "Parse common comic filenames and return a dict of metadata attributes. Includes a cli."
license = "GPL-3.0-only" license = "GPL-3.0-only"
authors = ["AJ Slater <aj@slater.net>"] authors = ["AJ Slater <aj@slater.net>"]
@ -28,7 +28,7 @@ exclude = ["*/**/*~"]
include = [] include = []
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.10" python = "^3.9"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
neovim = "^0.3.1" neovim = "^0.3.1"
@ -125,7 +125,7 @@ exclude = "*~,.git/*,.mypy_cache/*,.pytest_cache/*,.venv*,__pycache__/*,cache/*,
extend-exclude = ["typings"] extend-exclude = ["typings"]
target-version = "py310" target-version = "py310"
[tool.lint.ruff] [tool.ruff.lint]
extend-ignore = [ extend-ignore = [
"S101", "S101",
"D203", "D203",

View File

@ -2,7 +2,6 @@
from types import MappingProxyType from types import MappingProxyType
TEST_COMIC_FIELDS = { TEST_COMIC_FIELDS = {
"series": "Long Series Name", "series": "Long Series Name",
"issue": "001", "issue": "001",
@ -56,11 +55,6 @@ FNS = {
"Long Series Name #001 (2000) Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS, "Long Series Name #001 (2000) Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name (2000) 001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS, "Long Series Name (2000) 001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name (2000) #001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS, "Long Series Name (2000) #001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name v1 (2000) #001 "
"Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS_VOL,
"Long Series Name 001 (2000) (TPB-Releaser) Title.cbz": TEST_COMIC_FIELDS,
"Long Series Name Vol 1 "
"(2000) (TPB) (Releaser & Releaser-Releaser) Title.cbr": TEST_COMIC_VOL_ONLY,
"Ultimate Craziness (2019) (Digital) (Friends-of-Bill).cbr": { "Ultimate Craziness (2019) (Digital) (Friends-of-Bill).cbr": {
"series": "Ultimate Craziness", "series": "Ultimate Craziness",
"year": "2019", "year": "2019",
@ -443,6 +437,41 @@ FNS.update(
"restored) (Shadowcat-Empire)", "restored) (Shadowcat-Empire)",
), ),
}, },
"Captain Science #001 (1950) The Beginning - nothing.cbz": {
"ext": "cbz",
"issue": "001",
"title": "The Beginning - nothing",
"series": "Captain Science",
"year": "1950",
},
"Captain Science #001-cix-cbi.cbr": {
"ext": "cbr",
"issue": "001",
"series": "Captain Science",
"title": "cix-cbi",
},
"Long Series Name v1 (2000) #001 "
"Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS_VOL,
"Long Series Name 001 (2000) (TPB-Releaser) Title.cbz": {
"series": "Long Series Name",
"issue": "001",
"year": "2000",
"original_format": "TPB",
"scan_info": "Releaser",
"remainders": ("Title",),
"ext": "cbz",
},
"Long Series Name Vol 1 "
"(2000) (TPB) (Releaser & Releaser-Releaser) Title.cbr": {
"series": "Long Series Name",
"volume": "1",
"issue": "1",
"remainders": ("Title",),
"original_format": "TPB",
"year": "2000",
"scan_info": "Releaser & Releaser-Releaser",
"ext": "cbr",
},
} }
) )