Merge pull request #4 from ajslater/develop

v0.2.0
This commit is contained in:
AJ Slater 2024-02-28 13:13:33 -08:00 committed by GitHub
commit 624b64d6ca
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 3566 additions and 5457 deletions

60
.circleci/config.yml Normal file
View File

@ -0,0 +1,60 @@
jobs:
build:
machine:
image: ubuntu-2204:current
environment:
DOCKER_CLI_EXPERIMENTAL: enabled
DOCKER_BUILDKIT: 1
steps:
- checkout
- run:
command: docker compose build comicfn2dict-builder
name: Build Builder
- run:
command: ./bin/docker-compose-exit.sh comicfn2dict-lint
name: comicfn2dict Lint
- run:
command: ./bin/docker-compose-exit.sh comicfn2dict-test
name: comicfn2dict Test
- store_test_results:
path: test-results/pytest
- store_artifacts:
path: test-results/coverage
- run:
command: ./bin/docker-compose-exit.sh comicfn2dict-build
name: Build comicfn2dict Dist
- persist_to_workspace:
paths:
- ./README.md
- ./bin
- ./dist
- ./pyproject.toml
root: .
deploy:
docker:
- image: cimg/python:3.12.1
steps:
- attach_workspace:
at: .
- run:
command: ./bin/publish-pypi.sh
version: 2.1
workflows:
main:
jobs:
- build:
filters:
branches:
only:
- develop
- pre-release
- main
- deploy:
filters:
branches:
only:
- pre-release
- main
requires:
- build
version: 2.1

13
.eslintignore Normal file
View File

@ -0,0 +1,13 @@
!.circleci
**/__pycache__
*test-results*
*~
.git
.mypy_cache
.pytest_cache
.ruff_cache
.venv
dist
node_modules
package-lock.json
typings

View File

@ -1,101 +0,0 @@
module.exports = {
root: true,
env: {
browser: true,
es2022: true,
node: true,
},
extends: [
"eslint:recommended",
// LANGS
"plugin:json/recommended",
"plugin:mdx/recommended",
"plugin:yaml/recommended",
// CODE QUALITY
"plugin:sonarjs/recommended",
"plugin:unicorn/all",
// PRACTICES
"plugin:array-func/recommended",
"plugin:eslint-comments/recommended",
"plugin:no-use-extend-native/recommended",
"plugin:optimize-regex/all",
"plugin:promise/recommended",
"plugin:import/recommended",
"plugin:switch-case/recommended",
// PRETTIER
"plugin:prettier/recommended",
"prettier", // prettier-config
// SECURITY
"plugin:no-unsanitized/DOM",
"plugin:security/recommended-legacy",
],
overrides: [
{
files: ["*.md"],
rules: {
"prettier/prettier": ["warn", { parser: "markdown" }],
},
},
],
parserOptions: {
ecmaVersion: "latest",
ecmaFeatures: {
impliedStrict: true,
},
},
plugins: [
"array-func",
"eslint-comments",
"json",
"import",
"no-constructor-bind",
"no-secrets",
"no-unsanitized",
"no-use-extend-native",
"optimize-regex",
"prettier",
"promise",
"simple-import-sort",
"switch-case",
"security",
"sonarjs",
"unicorn",
"yaml",
],
rules: {
"array-func/prefer-array-from": "off", // for modern browsers the spread operator, as preferred by unicorn, works fine.
"max-params": ["warn", 4],
"no-console": process.env.NODE_ENV === "production" ? "warn" : "off",
"no-debugger": process.env.NODE_ENV === "production" ? "warn" : "off",
"no-constructor-bind/no-constructor-bind": "error",
"no-constructor-bind/no-constructor-state": "error",
"no-secrets/no-secrets": "error",
"eslint-comments/no-unused-disable": 1,
"prettier/prettier": "warn",
"security/detect-object-injection": "off",
"simple-import-sort/exports": "warn",
"simple-import-sort/imports": "warn",
"space-before-function-paren": "off",
"switch-case/newline-between-switch-case": "off", // Malfunctioning
"unicorn/switch-case-braces": ["warn", "avoid"],
"unicorn/prefer-node-protocol": 0,
"unicorn/prevent-abbreviations": "off",
"unicorn/filename-case": [
"error",
{ case: "kebabCase", ignore: [".*.md"] },
],
},
ignorePatterns: [
"*~",
"**/__pycache__",
".git",
"!.circleci",
".mypy_cache",
".pytest_cache",
".venv*",
"dist",
"package-lock.json",
"test-results",
"typings",
],
};

20
Dockerfile Normal file
View File

@ -0,0 +1,20 @@
FROM python:3.12.1-bookworm
LABEL maintainer="AJ Slater <aj@slater.net>"
COPY debian.sources /etc/apt/sources.list.d/
# hadolint ignore=DL3008
RUN apt-get clean \
&& apt-get update \
&& apt-get install --no-install-recommends -y \
bash \
npm \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY bin ./bin
COPY package.json package-lock.json pyproject.toml poetry.lock Makefile ./
RUN make install-all
COPY . .

View File

@ -1,28 +1,28 @@
.PHONY: install-deps
## Update pip and install poetry
## @category Install
install-deps:
pip install --upgrade pip
pip install --upgrade poetry
npm install
.PHONY: install
## Install for production
## @category Install
install-prod: install-deps
poetry install --no-root --only-root
npm install
.PHONY: install-dev
## Install dev requirements
## @category Install
install-dev: install-deps
poetry install --no-root --only-root --with dev
npm install
.PHONY: install-all
## Install with all extras
## @category Install
install-all: install-deps
poetry install --no-root --all-extras
npm install
.PHONY: clean
## Clean pycaches

19
NEWS.md
View File

@ -1,5 +1,24 @@
# 📰 comicfn2dict News
## v0.2.0
- Titles are now parsed only if they occur after the series token AND after
either issue, year or volume.
- A more sophisticated date parser.
- Issue numbers that lead with a '#' character may start with alphabetical
characters.
- If volume is parsed, but issue number is not, the issue number is copied from
the volume number.
- ComicFilenameParser and ComicFilenameSerializer classes are available as well
as the old function API.
- New test cases thanks to @lordwelch & @bpepple
- Titles must come after series and one other token, but before format and scan
info.
## v0.1.4
- Require Python 3.10
## v0.1.3
- Fix README

View File

@ -4,16 +4,30 @@ An API and CLI for extracting structured comic metadata from filenames.
## Install
<!-- eslint-skip -->
```sh
pip install comicfn2dict
```
## API
look at `comicfn2dict/comicfn2dict.py`
<!-- eslint-skip -->
```python
from comicfn2dict import comicfn2dict, dict2comicfn
path = "Comic Series #001 Title (2024).cbz"
metadata: dict[str, str| tuple[str,...]] = comicfn2dict(path, verbose=0)
filename: str = dict2comicfn(metadata, bool=True, verbose=0)
```
## CLI
<!-- eslint-skip -->
```sh
comicfn2dict "Series Name #01 - Title (2023).cbz"
{'ext': 'cbz',

6
bin/docker-compose-exit.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
# Run a docker compose service and return its exit code
set -euo pipefail
SERVICE=$1
# docker compose without the dash doesn't have the exit-code-from param
docker compose up --exit-code-from "$SERVICE" "$SERVICE"

7
bin/publish-pypi.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
# Publish the created package
set -euo pipefail
cd "$(dirname "$0")/.."
pip3 install --upgrade pip
pip3 install --upgrade poetry
poetry publish -u "$PYPI_USER" -p "$PYPI_PASS"

View File

@ -1,3 +1,3 @@
"""Comic Filename to Dict parser and unparser."""
from .parse import comicfn2dict # noqa: F401
from .unparse import dict2comicfn # noqa: F401
from .parse import ComicFilenameParser, comicfn2dict # noqa: F401
from .unparse import ComicFilenameSerializer, dict2comicfn # noqa: F401

View File

@ -4,17 +4,27 @@ from argparse import ArgumentParser
from pathlib import Path
from pprint import pprint
from comicfn2dict.parse import comicfn2dict
from comicfn2dict.parse import ComicFilenameParser
def main():
def main() -> None:
"""Test parser."""
description = "Comic book archive read/write tool."
parser = ArgumentParser(description=description)
parser.add_argument("path", help="Path of comic filename to parse", type=Path)
parser.add_argument(
"-v",
"--verbose",
default=0,
action="count",
help="Display intermediate parsing steps. Good for debugging.",
)
args = parser.parse_args()
name = args.path.name
metadata = comicfn2dict(name)
cfnparser = ComicFilenameParser(name, verbose=args.verbose)
metadata = cfnparser.parse()
if args.verbose:
print("=" * 80) # noqa:T201
pprint(metadata) # noqa:T203

View File

@ -1,3 +0,0 @@
"""API import source."""
from comicfn2dict.parse import comicfn2dict # noqa: F401
from comicfn2dict.unparse import dict2comicfn # noqa: F401

9
comicfn2dict/log.py Normal file
View File

@ -0,0 +1,9 @@
"""Print log header."""
def print_log_header(label: str) -> None:
"""Print log header."""
prefix = "-" * 3 + label
suffix_len = 80 - len(prefix)
suffix = "-" * suffix_len
print(prefix + suffix) # noqa: T201

View File

@ -1,226 +1,364 @@
"""Parse comic book archive names using the simple 'parse' parser."""
import re
from calendar import month_abbr
from copy import copy
from pathlib import Path
from typing import Union
from pprint import pformat
from re import Match, Pattern
from sys import maxsize
from comicfn2dict.log import print_log_header
from comicfn2dict.regex import (
DASH_SPLIT_RE,
EXTRA_SPACES_RE,
ISSUE_ANYWHERE_RE,
ALPHA_MONTH_RANGE_RE,
BOOK_VOLUME_RE,
ISSUE_BEGIN_RE,
ISSUE_COUNT_RE,
ISSUE_END_RE,
ISSUE_NUMBER_RE,
ISSUE_TOKEN_RE,
NON_SPACE_DIVIDER_RE,
ORIGINAL_FORMAT_RE,
ISSUE_WITH_COUNT_RE,
MONTH_FIRST_DATE_RE,
NON_NUMBER_DOT_RE,
ORIGINAL_FORMAT_SCAN_INFO_RE,
ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE,
PUBLISHER_AMBIGUOUS_RE,
PUBLISHER_AMBIGUOUS_TOKEN_RE,
PUBLISHER_UNAMBIGUOUS_RE,
PUBLISHER_UNAMBIGUOUS_TOKEN_RE,
REGEX_SUBS,
REMAINING_GROUP_RE,
SCAN_INFO_RE,
SCAN_INFO_SECONDARY_RE,
TOKEN_DELIMETER,
VOLUME_RE,
YEAR_BEGIN_RE,
VOLUME_WITH_COUNT_RE,
YEAR_END_RE,
YEAR_FIRST_DATE_RE,
YEAR_TOKEN_RE,
)
_DATE_KEYS = frozenset({"year", "month", "day"})
_REMAINING_GROUP_KEYS = ("series", "title")
# Ordered by commonness.
_TITLE_PRECEDING_KEYS = ("issue", "year", "volume", "month")
def _parse_ext(name, suffix, metadata):
class ComicFilenameParser:
"""Parse a filename metadata into a dict."""
def path_index(self, key: str, default: int = -1) -> int:
"""Lazily retrieve and memoize the key's location in the path."""
if key == "remainders":
return default
value: str = self.metadata.get(key, "") # type: ignore
if not value:
return default
if value not in self._path_indexes:
# XXX This is fragile, but it's difficult to calculate the original
# position at match time from the ever changing _unparsed_path.
index = self.path.rfind(value) if key == "ext" else self.path.find(value)
self._path_indexes[value] = index
return self._path_indexes[value]
def _log(self, label: str) -> None:
if not self._debug:
return
print_log_header(label)
combined = {}
for key in self.metadata:
combined[key] = (self.metadata.get(key), self.path_index(key))
print(" " + self._unparsed_path) # noqa: T201
print(" " + pformat(combined)) # noqa: T201
def _parse_ext(self) -> None:
"""Pop the extension from the pathname."""
data = name.removesuffix(suffix)
path = Path(self._unparsed_path)
suffix = path.suffix
if not suffix:
return
data = path.name.removesuffix(suffix)
ext = suffix.lstrip(".")
if ext:
metadata["ext"] = ext
return data
self.metadata["ext"] = ext
self._unparsed_path = data
def _clean_dividers(data):
def _clean_dividers(self) -> None:
"""Replace non space dividers and clean extra spaces out of string."""
data = NON_SPACE_DIVIDER_RE.sub(" ", data)
return EXTRA_SPACES_RE.sub(" ", data)
data = self._unparsed_path
# Simple substitutions
for regex, pair in REGEX_SUBS.items():
replacement, count = pair
data = regex.sub(replacement, data, count=count).strip()
self._unparsed_path = data.strip()
self._log("After Clean Path")
def _get_data_list(path, metadata):
"""Prepare data list from a path or string."""
def _parse_items_update_metadata(
self, matches: Match, exclude: str, require_all: bool, first_only: bool
) -> bool:
"""Update Metadata."""
matched_metadata = {}
for key, value in matches.groupdict().items():
if value == exclude:
continue
if not value:
if require_all:
return False
continue
matched_metadata[key] = value
if first_only:
break
if not matched_metadata:
return False
self.metadata.update(matched_metadata)
return True
def _parse_items_pop_tokens(self, regex: Pattern, first_only: bool) -> None:
"""Pop tokens from unparsed path."""
count = 1 if first_only else 0
marked_str = regex.sub(TOKEN_DELIMETER, self._unparsed_path, count=count)
parts = []
for part in marked_str.split(TOKEN_DELIMETER):
if token := part.strip():
parts.append(token)
self._unparsed_path = TOKEN_DELIMETER.join(parts)
def _parse_items( # noqa: PLR0913
self,
regex: Pattern,
require_all: bool = False,
exclude: str = "",
first_only: bool = False,
pop: bool = True,
) -> None:
"""Parse a value from the data list into metadata and alter the data list."""
# Match
matches = regex.search(self._unparsed_path)
if not matches:
return
if not self._parse_items_update_metadata(
matches, exclude, require_all, first_only
):
return
if pop:
self._parse_items_pop_tokens(regex, first_only)
def _parse_issue(self) -> None:
"""Parse Issue."""
self._parse_items(ISSUE_NUMBER_RE)
if "issue" not in self.metadata:
self._parse_items(ISSUE_WITH_COUNT_RE)
self._log("After Issue")
def _parse_volume(self) -> None:
"""Parse Volume."""
self._parse_items(VOLUME_RE)
if "volume" not in self.metadata:
self._parse_items(VOLUME_WITH_COUNT_RE)
self._log("After Volume")
def _alpha_month_to_numeric(self) -> None:
"""Translate alpha_month to numeric month."""
if alpha_month := self.metadata.pop("alpha_month", ""):
alpha_month = alpha_month.capitalize() # type: ignore
for index, abbr in enumerate(month_abbr):
if abbr and alpha_month.startswith(abbr):
month = f"{index:02d}"
self.metadata["month"] = month
break
def _parse_dates(self) -> None:
"""Parse date schemes."""
# Discard second month of alpha month ranges.
self._unparsed_path = ALPHA_MONTH_RANGE_RE.sub(r"\1", self._unparsed_path)
# Month first date
self._parse_items(MONTH_FIRST_DATE_RE)
self._alpha_month_to_numeric()
# Year first date
if _DATE_KEYS - self.metadata.keys():
self._parse_items(YEAR_FIRST_DATE_RE)
self._alpha_month_to_numeric()
if "year" not in self.metadata:
self._parse_items(YEAR_TOKEN_RE, first_only=True)
if "volume" in self.metadata:
return
# A second year will be the real year.
# Move the first year to volume
if volume := self.metadata.get("year", ""):
self._parse_items(YEAR_TOKEN_RE)
if self.metadata.get("year", "") != volume:
self.metadata["volume"] = volume
self._log("After Date")
def _parse_format_and_scan_info(self) -> None:
"""Format & Scan Info."""
self._parse_items(
ORIGINAL_FORMAT_SCAN_INFO_RE,
require_all=True,
)
if "original_format" not in self.metadata:
self._parse_items(
ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE,
)
self._parse_items(SCAN_INFO_SECONDARY_RE)
if (
scan_info_secondary := self.metadata.pop("secondary_scan_info", "")
) and "scan_info" not in self.metadata:
self.metadata["scan_info"] = scan_info_secondary # type: ignore
self._log("After original_format & scan_info")
def _parse_ends_of_remaining_tokens(self):
# Volume left on the end of string tokens
if "volume" not in self.metadata:
self._parse_items(BOOK_VOLUME_RE)
self._log("After original_format & scan_info")
# Years left on the end of string tokens
year_end_matched = False
if "year" not in self.metadata:
self._parse_items(YEAR_END_RE, pop=False)
year_end_matched = "year" in self.metadata
self._log("After Year on end of token")
# Issue left on the end of string tokens
if "issue" not in self.metadata and not year_end_matched:
exclude: str = self.metadata.get("year", "") # type: ignore
self._parse_items(ISSUE_END_RE, exclude=exclude)
if "issue" not in self.metadata:
self._parse_items(ISSUE_BEGIN_RE)
self._log("After Issue on ends of tokens")
def _parse_publisher(self) -> None:
"""Parse Publisher."""
# Pop single tokens so they don't end up titles.
self._parse_items(PUBLISHER_UNAMBIGUOUS_TOKEN_RE, first_only=True)
if "publisher" not in self.metadata:
self._parse_items(PUBLISHER_AMBIGUOUS_TOKEN_RE, first_only=True)
if "publisher" not in self.metadata:
self._parse_items(PUBLISHER_UNAMBIGUOUS_RE, pop=False, first_only=True)
if "publisher" not in self.metadata:
self._parse_items(PUBLISHER_AMBIGUOUS_RE, pop=False, first_only=True)
self._log("After publisher")
def _is_at_title_position(self, value: str) -> bool:
"""Title is in correct position."""
title_index = self.path.find(value)
# Titles must come after series but before format and scan_info
if (
title_index < self.path_index("series")
or title_index > self.path_index("original_format", maxsize)
or title_index > self.path_index("scan_info", maxsize)
):
return False
# Titles must be after the series and one other token.
title_ok = False
other_tokens_exist = False
for preceding_key in _TITLE_PRECEDING_KEYS:
other_tokens_exist = True
if title_index > self.path_index(preceding_key):
title_ok = True
break
return title_ok or not other_tokens_exist
def _grouping_operators_strip(self, value: str) -> str:
"""Strip spaces and parens."""
value = value.strip()
value = value.strip("()").strip()
value = value.strip("-").strip()
value = value.strip(",").strip()
value = value.strip("'").strip()
return value.strip('"').strip()
def _parse_series_and_title_token(
self, remaining_key_index: int, tokens: list[str]
) -> str:
"""Parse one series or title token."""
key = _REMAINING_GROUP_KEYS[remaining_key_index]
if key in self.metadata:
return ""
token = tokens.pop(0)
match = REMAINING_GROUP_RE.search(token)
if not match:
return token
value = match.group()
if key == "title" and not self._is_at_title_position(value):
return token
value = NON_NUMBER_DOT_RE.sub(r"\1 \2", value)
value = self._grouping_operators_strip(value)
if value:
self.metadata[key] = value
return ""
def _parse_series_and_title(self) -> None:
"""Assign series and title."""
if not self._unparsed_path:
return
remaining_key_index = 0
unused_tokens = []
tokens = self._unparsed_path.split(TOKEN_DELIMETER)
while tokens and remaining_key_index < len(_REMAINING_GROUP_KEYS):
unused_token = self._parse_series_and_title_token(
remaining_key_index, tokens
)
if unused_token:
unused_tokens.append(unused_token)
remaining_key_index += 1
self._unparsed_path = " ".join(unused_tokens) if unused_tokens else ""
self._log("After Series & Title")
def _add_remainders(self) -> None:
"""Add Remainders."""
remainders = []
for token in self._unparsed_path.split(TOKEN_DELIMETER):
if remainder := token.strip():
remainders.append(remainder)
if remainders:
self.metadata["remainders"] = tuple(remainders)
def parse(self) -> dict[str, str | tuple[str, ...]]:
"""Parse the filename with a hierarchy of regexes."""
self._log("Init")
self._parse_ext()
self._clean_dividers()
self._parse_issue()
self._parse_volume()
self._parse_dates()
self._parse_format_and_scan_info()
self._parse_ends_of_remaining_tokens()
self._parse_publisher()
self._parse_series_and_title()
# Copy volume into issue if it's all we have.
if "issue" not in self.metadata and "volume" in self.metadata:
self.metadata["issue"] = self.metadata["volume"]
self._log("After issue can be volume")
self._add_remainders()
return self.metadata
def __init__(self, path: str | Path, verbose: int = 0):
"""Initialize."""
self._debug: bool = verbose > 0
# munge path
if isinstance(path, str):
path = path.strip()
path = Path(path)
data = _parse_ext(path.name, path.suffix, metadata)
data = _clean_dividers(data)
return DASH_SPLIT_RE.split(data)
p_path = Path(path)
self.path = str(p_path.name).strip()
self.metadata: dict[str, str | tuple[str, ...]] = {}
self._unparsed_path = copy(self.path)
self._path_indexes: dict[str, int] = {}
def _paren_strip(value: str):
"""Strip spaces and parens."""
return value.strip().strip("()").strip()
def _splicey_dicey(data_list, index, match, match_group: Union[int, str] = 0):
"""Replace a string token from a list with two strings and the value removed.
And return the value.
"""
value = match.group(match_group)
data = data_list.pop(index)
data_ends = []
if data_before := data[: match.start()].strip():
data_ends.append(data_before)
if data_after := data[match.end() :].strip():
data_ends.append(data_after)
data_list[index:index] = data_ends
return _paren_strip(value)
def _parse_original_format_and_scan_info(data_list, metadata):
"""Parse (ORIGINAL_FORMAT-SCAN_INFO)."""
original_format = None
scan_info = None
index = 0
match = None
for data in data_list:
match = ORIGINAL_FORMAT_SCAN_INFO_RE.search(data)
if match:
original_format = match.group("original_format")
try:
scan_info = match.group("scan_info")
except IndexError:
scan_info = None
break
index += 1
if original_format:
metadata["original_format"] = _paren_strip(original_format)
match_group = 1
if scan_info:
metadata["scan_info"] = _paren_strip(scan_info)
match_group = 0
_splicey_dicey(data_list, index, match, match_group=match_group)
else:
index = 0
return index
def _pop_value_from_token(
data_list: list,
metadata: dict,
regex: re.Pattern,
key: str,
index: int = 0,
):
"""Search token for value, splice and assign to metadata."""
data = data_list[index]
match = regex.search(data)
if match:
value = _splicey_dicey(data_list, index, match, key)
metadata[key] = value
return match
def _parse_item(
data_list,
metadata,
regex,
key,
start_index: int = 0,
):
"""Parse a value from the data list into metadata and alter the data list."""
index = start_index
dl_len = end_index = len(data_list)
if index >= end_index:
index = 0
while index < end_index:
match = _pop_value_from_token(data_list, metadata, regex, key, index)
if match:
break
index += 1
if index > dl_len and start_index > 0:
index = 0
end_index = start_index
return index
def _pop_issue_from_text_fields(data_list, metadata, index):
"""Search issue from ends of text fields."""
if "issue" not in metadata:
_pop_value_from_token(data_list, metadata, ISSUE_END_RE, "issue", index=index)
if "issue" not in metadata:
_pop_value_from_token(data_list, metadata, ISSUE_BEGIN_RE, "issue", index=index)
return data_list.pop(index)
def _assign_remaining_groups(data_list, metadata):
"""Assign series and title."""
index = 0
for key in _REMAINING_GROUP_KEYS:
try:
data = data_list[index]
except (IndexError, TypeError):
break
match = REMAINING_GROUP_RE.search(data) if data else None
if match:
value = _pop_issue_from_text_fields(data_list, metadata, index)
value = _paren_strip(value)
if value:
metadata[key] = value
else:
index += 1
def _pickup_issue(remainders, metadata):
"""Get issue from remaining tokens or anywhere in a pinch."""
if "issue" in metadata:
return
_parse_item(remainders, metadata, ISSUE_TOKEN_RE, "issue")
if "issue" in metadata:
return
_parse_item(remainders, metadata, ISSUE_ANYWHERE_RE, "issue")
def comicfn2dict(path):
"""Parse the filename with a hierarchy of regexes."""
metadata = {}
data_list = _get_data_list(path, metadata)
# Parse paren tokens
_parse_item(data_list, metadata, ISSUE_COUNT_RE, "issue_count")
_parse_item(data_list, metadata, YEAR_TOKEN_RE, "year")
of_index = _parse_original_format_and_scan_info(data_list, metadata)
if "original_format" not in metadata:
of_index = _parse_item(
data_list, metadata, ORIGINAL_FORMAT_RE, "original_format"
)
if "scan_info" not in metadata:
# Start searching for scan_info after original format.
_parse_item(
data_list,
metadata,
SCAN_INFO_RE,
"scan_info",
start_index=of_index + 1,
)
# Parse regular tokens
_parse_item(data_list, metadata, VOLUME_RE, "volume")
_parse_item(data_list, metadata, ISSUE_NUMBER_RE, "issue")
# Pickup year if not gotten.
if "year" not in metadata:
_parse_item(data_list, metadata, YEAR_BEGIN_RE, "year")
if "year" not in metadata:
_parse_item(data_list, metadata, YEAR_END_RE, "year")
# Pickup issue if it's a standalone token
if "issue" not in metadata:
_parse_item(data_list, metadata, ISSUE_TOKEN_RE, "issue")
# Series and Title. Also looks for issue.
_assign_remaining_groups(data_list, metadata)
# Final try for issue number.
_pickup_issue(data_list, metadata)
# Add Remainders
if data_list:
metadata["remainders"] = tuple(data_list)
return metadata
def comicfn2dict(
path: str | Path, verbose: int = 0
) -> dict[str, str | tuple[str, ...]]:
"""Simplfily the API."""
parser = ComicFilenameParser(path, verbose=verbose)
return parser.parse()

View File

@ -1,15 +1,32 @@
"""Parsing regexes."""
import re
from re import IGNORECASE, Pattern, compile
from types import MappingProxyType
PUBLISHERS_UNAMBIGUOUS: tuple[str, ...] = (
r"Abrams ComicArts",
r"BOOM! Studios",
r"DC(\sComics)?",
r"Dark Horse Comics",
r"Drawn & Quarterly",
r"Dynamite Entertainment",
r"IDW Publishing",
r"Icon Comics",
r"Kodansha",
r"Oni Press",
r"Pantheon Books",
r"SLG Publishing",
r"SelfMadeHero",
r"Titan Comics",
)
PUBLISHERS_AMBIGUOUS: tuple[str, ...] = (
r"(?<!Capt\.\s)(?<!Capt\s)(?<!Captain\s)Marvel",
r"Heavy Metal",
r"Epic",
r"Image",
r"Mirage",
)
def re_compile(exp, parenthify=False):
"""Compile regex with options."""
if parenthify:
exp = r"\(" + exp + r"\)"
return re.compile(exp, flags=re.IGNORECASE)
ORIGINAL_FORMAT_PATTERNS = (
ORIGINAL_FORMAT_PATTERNS: tuple[str, ...] = (
r"Anthology",
r"(One|1)[-\s]Shot",
r"Annual",
@ -35,41 +52,160 @@ ORIGINAL_FORMAT_PATTERNS = (
r"Sketch",
r"TPB",
r"Trade[-\s]Paper[-\s]?Back",
r"Web([-\s]?Comic)?",
r"Web([-\s]?(Comic|Rip))?",
)
MONTHS: tuple[str, ...] = (
r"Jan(uary)?",
r"Feb(ruary)?",
r"Mar(ch)?",
r"Apr(il)?",
r"May",
r"Jun(e)?",
r"Jul(y)?",
r"Aug(ust)?",
r"Sep(tember)?",
r"Oct(ober)?",
r"Nov(ember)?",
r"Dec(ember)?",
)
TOKEN_DELIMETER: str = r"/"
def re_compile(exp: str, parenthify: bool = False) -> Pattern:
"""Compile regex with options."""
if parenthify:
exp = r"\(" + exp + r"\)"
return compile(exp, flags=IGNORECASE)
# CLEAN
NON_SPACE_DIVIDER_RE = re_compile(r"[_\+]")
DASH_SPLIT_RE = re_compile(r"\s-\s")
EXTRA_SPACES_RE = re_compile(r"\s\s+")
_TOKEN_DIVIDERS_RE = re_compile(r":")
_SPACE_EQUIVALENT_RE = re_compile(r"_")
_EXTRA_SPACES_RE = re_compile(r"\s\s+")
_LEFT_PAREN_EQUIVALENT_RE = re_compile(r"\[")
_RIGHT_PAREN_EQUIVALENT_RE = re_compile(r"\]")
_DOUBLE_UNDERSCORE_RE = re_compile(r"__(.*)__")
REGEX_SUBS: MappingProxyType[Pattern, tuple[str, int]] = MappingProxyType(
{
_DOUBLE_UNDERSCORE_RE: (r"(\1)", 0),
_TOKEN_DIVIDERS_RE: (TOKEN_DELIMETER, 1),
_SPACE_EQUIVALENT_RE: (r" ", 0),
_EXTRA_SPACES_RE: (r" ", 0),
_LEFT_PAREN_EQUIVALENT_RE: (r"(", 0),
_RIGHT_PAREN_EQUIVALENT_RE: (r")", 0),
}
)
### DATES
_YEAR_RE_EXP = r"(?P<year>[12]\d{3})"
_MONTH_ALPHA_RE_EXP = r"(" + "(?P<alpha_month>" + r"|".join(MONTHS) + r")\.?" r")"
_MONTH_NUMERIC_RE_EXP = r"(?P<month>0?\d|1[0-2]?)"
_MONTH_RE_EXP = r"(" + _MONTH_ALPHA_RE_EXP + r"|" + _MONTH_NUMERIC_RE_EXP + r")"
_ALPHA_MONTH_RANGE = (
r"\b" # noqa: ISC003
+ r"("
+ r"|".join(MONTHS)
+ r")"
+ r"("
+ r"\.?-"
+ r"("
+ r"|".join(MONTHS)
+ r")"
+ r")\b"
)
ALPHA_MONTH_RANGE_RE: Pattern = re_compile(_ALPHA_MONTH_RANGE)
_DAY_RE_EXP = r"(?P<day>([0-2]?\d|(3)[0-1]))"
_DATE_DELIM = r"[-\s]+"
_MONTH_FIRST_DATE_RE_EXP = (
r"((\b|\(?)"
# Month
+ _MONTH_RE_EXP
# Day
+ r"("
+ _DATE_DELIM
+ _DAY_RE_EXP
+ r")?"
# Year
+ r"[,]?"
+ _DATE_DELIM
+ _YEAR_RE_EXP
+ r"(\)?|\b))"
)
_YEAR_FIRST_DATE_RE_EXP = (
r"(\b\(?"
+ _YEAR_RE_EXP
+ _DATE_DELIM
+ _MONTH_RE_EXP
+ _DATE_DELIM
+ _DAY_RE_EXP
+ r"\b\)?)"
)
MONTH_FIRST_DATE_RE: Pattern = re_compile(_MONTH_FIRST_DATE_RE_EXP)
YEAR_FIRST_DATE_RE: Pattern = re_compile(_YEAR_FIRST_DATE_RE_EXP)
YEAR_TOKEN_RE: Pattern = re_compile(_YEAR_RE_EXP, parenthify=True)
YEAR_END_RE: Pattern = re_compile(_YEAR_RE_EXP + r"\/|$")
# PAREN GROUPS
ISSUE_COUNT_RE = re_compile(r"of\s*(?P<issue_count>\d+)", parenthify=True)
_YEAR_RE_EXP = r"(?P<year>[12]\d{3})"
YEAR_TOKEN_RE = re_compile(_YEAR_RE_EXP, parenthify=True)
YEAR_BEGIN_RE = re_compile(r"^" + _YEAR_RE_EXP + r"\b")
YEAR_END_RE = re_compile(r"\b" + _YEAR_RE_EXP + r"$")
_OF_PATTERNS = r"|".join(ORIGINAL_FORMAT_PATTERNS)
_ORIGINAL_FORMAT_RE_EXP = r"(?P<original_format>" + _OF_PATTERNS + r")"
ORIGINAL_FORMAT_RE = re_compile(_ORIGINAL_FORMAT_RE_EXP, parenthify=True)
_SCAN_INFO_RE_EXP = r"(?P<scan_info>[^()]+?)"
SCAN_INFO_RE = re_compile(_SCAN_INFO_RE_EXP, parenthify=True)
_SCAN_INFO_RE_EXP = r"(?P<scan_info>[^()]*)"
_ORIGINAL_FORMAT_SCAN_INFO_RE_EXP = (
_ORIGINAL_FORMAT_RE_EXP + r"(?:-" + _SCAN_INFO_RE_EXP + r")?"
_ORIGINAL_FORMAT_RE_EXP + r"\s*[\(:-]" + _SCAN_INFO_RE_EXP # + r")?"
)
ORIGINAL_FORMAT_SCAN_INFO_RE = re_compile(
# Keep this even though comicfn2dict doesn't use it directly
ORIGINAL_FORMAT_RE: Pattern = re_compile(_ORIGINAL_FORMAT_RE_EXP, parenthify=True)
ORIGINAL_FORMAT_SCAN_INFO_RE: Pattern = re_compile(
_ORIGINAL_FORMAT_SCAN_INFO_RE_EXP, parenthify=True
)
ORIGINAL_FORMAT_SCAN_INFO_SEPARATE_RE: Pattern = re_compile(
r"\(" + _ORIGINAL_FORMAT_RE_EXP + r"\).*\(" + _SCAN_INFO_RE_EXP + r"\)"
)
# REGULAR TOKENS
VOLUME_RE = re_compile(r"((?:v(?:ol(?:ume)?)?\.?)\s*(?P<volume>\d+))")
_ISSUE_RE_EXP = r"(?P<issue>[\d½]+\.?\d*\w*)"
ISSUE_NUMBER_RE = re_compile(r"(#" + _ISSUE_RE_EXP + r")")
ISSUE_TOKEN_RE = re_compile(r"^(" + _ISSUE_RE_EXP + r")$")
ISSUE_END_RE = re_compile(r"\b(" + _ISSUE_RE_EXP + r")$")
ISSUE_BEGIN_RE = re_compile(r"^(" + _ISSUE_RE_EXP + r")\b")
ISSUE_ANYWHERE_RE = re_compile(r"\b(" + _ISSUE_RE_EXP + r")\b")
SCAN_INFO_SECONDARY_RE: Pattern = re_compile(r"\b(?P<secondary_scan_info>c2c)\b")
# ISSUE
_ISSUE_RE_EXP = r"(?P<issue>\w*(½|\d+)[\.\d+]*\w*)"
_ISSUE_COUNT_RE_EXP = r"\(of\s*(?P<issue_count>\d+)\)"
ISSUE_NUMBER_RE: Pattern = re_compile(
r"(\(?#" + _ISSUE_RE_EXP + r"\)?)" + r"(\W*" + _ISSUE_COUNT_RE_EXP + r")?"
)
ISSUE_WITH_COUNT_RE: Pattern = re_compile(
r"(\(?" + _ISSUE_RE_EXP + r"\)?" + r"\W*" + _ISSUE_COUNT_RE_EXP + r")"
)
ISSUE_END_RE: Pattern = re_compile(r"([\/\s]\(?" + _ISSUE_RE_EXP + r"\)?(\/|$))")
ISSUE_BEGIN_RE: Pattern = re_compile(r"((^|\/)\(?" + _ISSUE_RE_EXP + r"\)?[\/|\s])")
# Volume
_VOLUME_COUNT_RE_EXP = r"\(of\s*(?P<volume_count>\d+)\)"
VOLUME_RE: Pattern = re_compile(
r"(" + r"(?:v(?:ol(?:ume)?)?\.?)\s*(?P<volume>\d+)" # noqa: ISC003
r"(\W*" + _VOLUME_COUNT_RE_EXP + r")?" + r")"
)
VOLUME_WITH_COUNT_RE: Pattern = re_compile(
r"(\(?" + r"(?P<volume>\d+)" + r"\)?" + r"\W*" + _VOLUME_COUNT_RE_EXP + r")"
)
BOOK_VOLUME_RE: Pattern = re_compile(r"(?P<title>" + r"book\s*(?P<volume>\d+)" + r")")
# Publisher
_PUBLISHER_UNAMBIGUOUS_RE_EXP = (
r"(\b(?P<publisher>" + r"|".join(PUBLISHERS_UNAMBIGUOUS) + r")\b)"
)
_PUBLISHER_AMBIGUOUS_RE_EXP = (
r"(\b(?P<publisher>" + r"|".join(PUBLISHERS_AMBIGUOUS) + r")\b)"
)
PUBLISHER_UNAMBIGUOUS_TOKEN_RE: Pattern = re_compile(
r"(^|\/)" + _PUBLISHER_UNAMBIGUOUS_RE_EXP + r"($|\/)"
)
PUBLISHER_AMBIGUOUS_TOKEN_RE: Pattern = re_compile(
r"(^|\/)" + _PUBLISHER_AMBIGUOUS_RE_EXP + r"($|\/)"
)
PUBLISHER_UNAMBIGUOUS_RE: Pattern = re_compile(_PUBLISHER_UNAMBIGUOUS_RE_EXP)
PUBLISHER_AMBIGUOUS_RE = re_compile(_PUBLISHER_AMBIGUOUS_RE_EXP)
# LONG STRINGS
REMAINING_GROUP_RE = re_compile(r"^[\w].*[^\)]")
REMAINING_GROUP_RE: Pattern = re_compile(r"^[^\(].*[^\)]")
NON_NUMBER_DOT_RE: Pattern = re_compile(r"(\D)\.(\D)")

View File

@ -1,8 +1,13 @@
"""Unparse comic filenames."""
from typing import Callable
from calendar import month_abbr
from collections.abc import Callable, Mapping, Sequence
from contextlib import suppress
from types import MappingProxyType
from comicfn2dict.log import print_log_header
def issue_formatter(issue):
def issue_formatter(issue: str) -> str:
"""Formatter to zero pad issues."""
i = 0
issue = issue.lstrip("0")
@ -14,37 +19,103 @@ def issue_formatter(issue):
return "#{:0>" + str(pad) + "}"
_PAREN_FMT = "({})"
_FILENAME_FORMAT_TAGS = (
_PAREN_FMT: str = "({})"
_FILENAME_FORMAT_TAGS: tuple[tuple[str, str | Callable], ...] = (
("series", "{}"),
("volume", "v{}"),
("volume_count", "(of {:03})"),
("issue", issue_formatter),
("issue_count", "(of {:03})"),
("year", _PAREN_FMT),
("date", _PAREN_FMT),
("title", "{}"),
("publisher", _PAREN_FMT),
("original_format", _PAREN_FMT),
("scan_info", _PAREN_FMT),
)
_EMPTY_VALUES = (None, "")
_EMPTY_VALUES: tuple[None, str] = (None, "")
_DEFAULT_EXT = "cbz"
_DATE_KEYS = ("year", "month", "day")
def dict2comicfn(md, ext=True):
class ComicFilenameSerializer:
"""Serialize Comic Filenames from dict."""
def _log(self, label: str, fn: str) -> None:
"""Log progress."""
if not self._debug:
return
print_log_header(label)
print(fn) # noqa: T201
def _add_date(self) -> None:
"""Construct date from Y-m-D if they exist."""
if "date" in self.metadata:
return
parts = []
for key in _DATE_KEYS:
if part := self.metadata.get(key):
if key == "month" and not parts:
with suppress(TypeError):
part = month_abbr[int(part)]
parts.append(part)
if key == "month" and not parts:
# noop if only day.
break
if parts:
parts = (str(part) for part in parts)
date = "-".join(parts)
self._log("After date", date)
self.metadata = MappingProxyType({**self.metadata, "date": date})
def _tokenize_tag(self, tag: str, fmt: str | Callable) -> str:
"""Add tags to the string."""
val = self.metadata.get(tag)
if val in _EMPTY_VALUES:
return ""
final_fmt = fmt(val) if isinstance(fmt, Callable) else fmt
return final_fmt.format(val).strip()
def _add_remainder(self) -> str:
"""Add the remainders specially."""
if remainders := self.metadata.get("remainders"):
if isinstance(remainders, Sequence):
remainders = (str(remainder) for remainder in remainders)
remainder = " ".join(remainders)
else:
remainder = str(remainders)
return f"[{remainder}]"
return ""
def serialize(self) -> str:
"""Get our preferred basename from a metadata dict."""
if not md:
return None
self._add_date()
tokens = []
for tag, fmt in _FILENAME_FORMAT_TAGS:
val = md.get(tag)
if val in _EMPTY_VALUES:
continue
final_fmt = fmt(val) if isinstance(fmt, Callable) else fmt
token = final_fmt.format(val).strip()
if token:
if token := self._tokenize_tag(tag, fmt):
tokens.append(token)
self._log(f"After {tag}", str(tokens))
fn = " ".join(tokens)
if remainders := md.get("remainders"):
remainder = " ".join(remainders)
fn += f" - {remainder}"
if ext:
fn += "." + md.get("ext", "cbz")
fn += self._add_remainder()
self._log("After remainder", fn)
if self._ext:
ext = self.metadata.get("ext", _DEFAULT_EXT)
fn += f".{ext}"
self._log("After ext", fn)
return fn
def __init__(self, metadata: Mapping, ext: bool = True, verbose: int = 0):
"""Initialize."""
self.metadata: Mapping = metadata
self._ext: bool = ext
self._debug: bool = bool(verbose)
def dict2comicfn(md: Mapping, ext: bool = True, verbose: int = 0) -> str:
"""Simplify API."""
serializer = ComicFilenameSerializer(md, ext=ext, verbose=verbose)
return serializer.serialize()

11
debian.sources Normal file
View File

@ -0,0 +1,11 @@
Types: deb
URIs: http://deb.debian.org/debian
Suites: bookworm bookworm-updates
Components: main contrib non-free
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb
URIs: http://deb.debian.org/debian-security
Suites: bookworm-security
Components: main contrib non-free
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg

21
docker-compose.yaml Normal file
View File

@ -0,0 +1,21 @@
services:
comicfn2dict-builder:
build: .
image: comicfn2dict-builder
container_name: comicfn2dict-builder
comicfn2dict-lint:
image: comicfn2dict-builder
container_name: comicfn2dict-lint
command: make lint
comicfn2dict-test:
image: comicfn2dict-builder
container_name: comicfn2dict-test
command: make test
volumes:
- ./test-results/:/app/test-results/
comicfn2dict-build:
image: comicfn2dict-builder
container_name: comicfn2dict-build
volumes:
- ./dist/:/app/dist/
command: poetry build

186
eslint.config.js Normal file
View File

@ -0,0 +1,186 @@
import { FlatCompat } from "@eslint/eslintrc";
import js from "@eslint/js";
import arrayFunc from "eslint-plugin-array-func";
// import plugin broken for flag config
// https://github.com/import-js/eslint-plugin-import/issues/2556
// import importPlugin from "eslint-plugin-import";
import eslintPluginPrettierRecommended from "eslint-plugin-prettier/recommended";
import pluginSecurity from "eslint-plugin-security";
import eslintPluginUnicorn from "eslint-plugin-unicorn";
import globals from "globals";
const compat = new FlatCompat();
export default [
{
languageOptions: {
globals: {
...globals.node,
...globals.browser,
},
},
linterOptions: {
reportUnusedDisableDirectives: "warn",
},
plugins: {
// import: importPlugin,
unicorn: eslintPluginUnicorn,
},
rules: {
"array-func/prefer-array-from": "off", // for modern browsers the spread operator, as preferred by unicorn, works fine.
"max-params": ["warn", 4],
"no-console": "warn",
"no-debugger": "warn",
"no-constructor-bind/no-constructor-bind": "error",
"no-constructor-bind/no-constructor-state": "error",
"no-secrets/no-secrets": "error",
"prettier/prettier": "warn",
"security/detect-object-injection": "off",
"space-before-function-paren": "off",
"unicorn/switch-case-braces": ["warn", "avoid"],
"unicorn/prefer-node-protocol": 0,
"unicorn/prevent-abbreviations": "off",
"unicorn/filename-case": [
"error",
{ case: "kebabCase", ignore: [".*.md"] },
],
/*
...importPlugin.configs["recommended"].rules,
"import/no-unresolved": [
"error",
{
ignore: ["^[@]"],
},
],
*/
},
/*
settings: {
"import/parsers": {
espree: [".js", ".cjs", ".mjs", ".jsx"],
"@typescript-eslint/parser": [".ts"],
},
"import/resolver": {
typescript: true,
node: true,
},
},
*/
},
js.configs.recommended,
arrayFunc.configs.all,
pluginSecurity.configs.recommended,
eslintPluginPrettierRecommended,
...compat.config({
root: true,
env: {
browser: true,
es2024: true,
node: true,
},
extends: [
// LANGS
"plugin:jsonc/recommended-with-jsonc",
"plugin:markdown/recommended",
"plugin:toml/recommended",
"plugin:yml/standard",
"plugin:yml/prettier",
// CODE QUALITY
"plugin:sonarjs/recommended",
// PRACTICES
"plugin:eslint-comments/recommended",
// "plugin:import/recommended",
"plugin:no-use-extend-native/recommended",
"plugin:optimize-regex/all",
"plugin:promise/recommended",
"plugin:switch-case/recommended",
// SECURITY
"plugin:no-unsanitized/DOM",
],
overrides: [
{
files: ["**/*.md"],
processor: "markdown/markdown",
rules: {
"prettier/prettier": ["warn", { parser: "markdown" }],
},
},
{
files: ["**/*.md/*.js"], // Will match js code inside *.md files
rules: {
"no-unused-vars": "off",
"no-undef": "off",
},
},
{
files: ["**/*.md/*.sh"],
rules: {
"prettier/prettier": ["error", { parser: "sh" }],
},
},
{
files: ["*.yaml", "*.yml"],
//parser: "yaml-eslint-parser",
rules: {
"unicorn/filename-case": "off",
},
},
{
files: ["*.toml"],
//parser: "toml-eslint-parser",
rules: {
"prettier/prettier": ["error", { parser: "toml" }],
},
},
{
files: ["*.json", "*.json5", "*.jsonc"],
//parser: "jsonc-eslint-parser",
},
],
parserOptions: {
ecmaFeatures: {
impliedStrict: true,
},
ecmaVersion: "latest",
},
plugins: [
"eslint-comments",
//"import",
"markdown",
"no-constructor-bind",
"no-secrets",
"no-unsanitized",
"no-use-extend-native",
"optimize-regex",
"promise",
"simple-import-sort",
"sonarjs",
"switch-case",
"unicorn",
],
rules: {
"no-constructor-bind/no-constructor-bind": "error",
"no-constructor-bind/no-constructor-state": "error",
"no-secrets/no-secrets": "error",
"eslint-comments/no-unused-disable": 1,
"simple-import-sort/exports": "warn",
"simple-import-sort/imports": "warn",
"switch-case/newline-between-switch-case": "off", // Malfunctioning
},
ignorePatterns: [
"*~",
"**/__pycache__",
".git",
"!.circleci",
".mypy_cache",
".ruff_cache",
".pytest_cache",
".venv*",
"dist",
"node_modules",
"package-lock.json",
"test-results",
"typings",
],
}),
];

6335
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,10 @@
{
"version": "0.1.0",
"description": "linting",
"version": "0.2.0",
"description": "comicfn2dict linting",
"type": "module",
"scripts": {
"fix": "eslint_d --cache --fix --ignore-pattern frontend --ext .cjs,.mjs,.js,.json,.yaml,.md . && prettier --write .",
"lint": "eslint_d --cache --ignore-pattern frontend --ext .cjs,.mjs,.js,.json,.yaml,.md . && prettier --check .",
"fix": "eslint --cache --fix . && prettier --write .",
"lint": "eslint --cache . && prettier --check .",
"remark-check": "remark .",
"remark-fix": "remark . --output"
},
@ -13,12 +13,13 @@
"@prettier/plugin-xml",
"prettier-plugin-nginx",
"prettier-plugin-packagejson",
"prettier-plugin-sh"
"prettier-plugin-sh",
"prettier-plugin-toml"
],
"overrides": [
{
"files": [
"*.md"
"**/*.md"
],
"options": {
"proseWrap": "always"
@ -28,6 +29,7 @@
},
"remarkConfig": {
"plugins": [
"gfm",
"preset-lint-consistent",
"preset-lint-recommended",
"preset-lint-markdown-style-guide",
@ -42,36 +44,37 @@
"@prettier/plugin-xml": "^3.0.0",
"eslint": "^8.34.0",
"eslint-config-prettier": "^9.0.0",
"eslint-plugin-array-func": "^4.0.0",
"eslint-plugin-array-func": "^5.0.1",
"eslint-plugin-eslint-comments": "^3.2.0",
"eslint-plugin-import": "^2.25.4",
"eslint-plugin-json": "^3.1.0",
"eslint-plugin-mdx": "^3.0.0",
"eslint-plugin-jsonc": "^2.13.0",
"eslint-plugin-markdown": "^3.0.0",
"eslint-plugin-no-constructor-bind": "^2.0.4",
"eslint-plugin-no-secrets": "^0.8.9",
"eslint-plugin-no-unsanitized": "^4.0.0",
"eslint-plugin-no-use-extend-native": "^0.5.0",
"eslint-plugin-only-warn": "^1.0.2",
"eslint-plugin-optimize-regex": "^1.2.0",
"eslint-plugin-prettier": "^5.0.0-alpha.2",
"eslint-plugin-promise": "^6.0.0",
"eslint-plugin-scanjs-rules": "^0.2.1",
"eslint-plugin-security": "^2.1.0",
"eslint-plugin-simple-import-sort": "^10.0.0",
"eslint-plugin-sonarjs": "^0.23.0",
"eslint-plugin-simple-import-sort": "^12.0.0",
"eslint-plugin-sonarjs": "^0.24.0",
"eslint-plugin-switch-case": "^1.1.2",
"eslint-plugin-unicorn": "^50.0.1",
"eslint-plugin-yaml": "^0.5.0",
"eslint-plugin-toml": "^0.9.2",
"eslint-plugin-unicorn": "^51.0.1",
"eslint-plugin-yml": "^1.12.2",
"eslint_d": "^13.0.0",
"prettier": "^3.0.0",
"prettier-plugin-nginx": "^1.0.3",
"prettier-plugin-packagejson": "^2.4.4",
"prettier-plugin-sh": "^0.13.0",
"prettier-plugin-sh": "^0.14.0",
"prettier-plugin-toml": "^2.0.1",
"remark-cli": "^12.0.0",
"remark-gfm": "^4.0.0",
"remark-preset-lint-consistent": "^5.1.1",
"remark-preset-lint-markdown-style-guide": "^5.1.2",
"remark-preset-lint-recommended": "^6.1.2",
"remark-preset-prettier": "^2.0.1",
"toml": "^3.0.0"
"remark-preset-prettier": "^2.0.1"
}
}

318
poetry.lock generated
View File

@ -26,10 +26,27 @@ files = [
]
[package.extras]
dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"]
dev = [
"Pygments",
"build",
"chardet",
"pre-commit",
"pytest",
"pytest-cov",
"pytest-dependency",
"ruff",
"tomli",
"twine",
]
hard-encoding-detection = ["chardet"]
toml = ["tomli"]
types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"]
types = [
"chardet (>=5.1.0)",
"mypy",
"pytest",
"pytest-cov",
"pytest-dependency",
]
[[package]]
name = "colorama"
@ -44,63 +61,63 @@ files = [
[[package]]
name = "coverage"
version = "7.4.0"
version = "7.4.3"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"},
{file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"},
{file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"},
{file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"},
{file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"},
{file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"},
{file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"},
{file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"},
{file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"},
{file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"},
{file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"},
{file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"},
{file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"},
{file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"},
{file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"},
{file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"},
{file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"},
{file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"},
{file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"},
{file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"},
{file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"},
{file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"},
{file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"},
{file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"},
{file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"},
{file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"},
{file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"},
{file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"},
{file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"},
{file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"},
{file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"},
{file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"},
{file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"},
{file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"},
{file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"},
{file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"},
{file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"},
{file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"},
{file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"},
{file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"},
{file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"},
{file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"},
{file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"},
{file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"},
{file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"},
{file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"},
{file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"},
{file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"},
{file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"},
{file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"},
{file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"},
{file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"},
{ file = "coverage-7.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8580b827d4746d47294c0e0b92854c85a92c2227927433998f0d3320ae8a71b6" },
{ file = "coverage-7.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718187eeb9849fc6cc23e0d9b092bc2348821c5e1a901c9f8975df0bc785bfd4" },
{ file = "coverage-7.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:767b35c3a246bcb55b8044fd3a43b8cd553dd1f9f2c1eeb87a302b1f8daa0524" },
{ file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae7f19afe0cce50039e2c782bff379c7e347cba335429678450b8fe81c4ef96d" },
{ file = "coverage-7.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba3a8aaed13770e970b3df46980cb068d1c24af1a1968b7818b69af8c4347efb" },
{ file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ee866acc0861caebb4f2ab79f0b94dbfbdbfadc19f82e6e9c93930f74e11d7a0" },
{ file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:506edb1dd49e13a2d4cac6a5173317b82a23c9d6e8df63efb4f0380de0fbccbc" },
{ file = "coverage-7.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6545d97c98a192c5ac995d21c894b581f1fd14cf389be90724d21808b657e2" },
{ file = "coverage-7.4.3-cp310-cp310-win32.whl", hash = "sha256:f6a09b360d67e589236a44f0c39218a8efba2593b6abdccc300a8862cffc2f94" },
{ file = "coverage-7.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:18d90523ce7553dd0b7e23cbb28865db23cddfd683a38fb224115f7826de78d0" },
{ file = "coverage-7.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cbbe5e739d45a52f3200a771c6d2c7acf89eb2524890a4a3aa1a7fa0695d2a47" },
{ file = "coverage-7.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:489763b2d037b164846ebac0cbd368b8a4ca56385c4090807ff9fad817de4113" },
{ file = "coverage-7.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451f433ad901b3bb00184d83fd83d135fb682d780b38af7944c9faeecb1e0bfe" },
{ file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcc66e222cf4c719fe7722a403888b1f5e1682d1679bd780e2b26c18bb648cdc" },
{ file = "coverage-7.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ec74cfef2d985e145baae90d9b1b32f85e1741b04cd967aaf9cfa84c1334f3" },
{ file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:abbbd8093c5229c72d4c2926afaee0e6e3140de69d5dcd918b2921f2f0c8baba" },
{ file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:35eb581efdacf7b7422af677b92170da4ef34500467381e805944a3201df2079" },
{ file = "coverage-7.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8249b1c7334be8f8c3abcaaa996e1e4927b0e5a23b65f5bf6cfe3180d8ca7840" },
{ file = "coverage-7.4.3-cp311-cp311-win32.whl", hash = "sha256:cf30900aa1ba595312ae41978b95e256e419d8a823af79ce670835409fc02ad3" },
{ file = "coverage-7.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:18c7320695c949de11a351742ee001849912fd57e62a706d83dfc1581897fa2e" },
{ file = "coverage-7.4.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b51bfc348925e92a9bd9b2e48dad13431b57011fd1038f08316e6bf1df107d10" },
{ file = "coverage-7.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d6cdecaedea1ea9e033d8adf6a0ab11107b49571bbb9737175444cea6eb72328" },
{ file = "coverage-7.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b2eccb883368f9e972e216c7b4c7c06cabda925b5f06dde0650281cb7666a30" },
{ file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c00cdc8fa4e50e1cc1f941a7f2e3e0f26cb2a1233c9696f26963ff58445bac7" },
{ file = "coverage-7.4.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4a8dd3dcf4cbd3165737358e4d7dfbd9d59902ad11e3b15eebb6393b0446e" },
{ file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:062b0a75d9261e2f9c6d071753f7eef0fc9caf3a2c82d36d76667ba7b6470003" },
{ file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ebe7c9e67a2d15fa97b77ea6571ce5e1e1f6b0db71d1d5e96f8d2bf134303c1d" },
{ file = "coverage-7.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c0a120238dd71c68484f02562f6d446d736adcc6ca0993712289b102705a9a3a" },
{ file = "coverage-7.4.3-cp312-cp312-win32.whl", hash = "sha256:37389611ba54fd6d278fde86eb2c013c8e50232e38f5c68235d09d0a3f8aa352" },
{ file = "coverage-7.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:d25b937a5d9ffa857d41be042b4238dd61db888533b53bc76dc082cb5a15e914" },
{ file = "coverage-7.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:28ca2098939eabab044ad68850aac8f8db6bf0b29bc7f2887d05889b17346454" },
{ file = "coverage-7.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:280459f0a03cecbe8800786cdc23067a8fc64c0bd51dc614008d9c36e1659d7e" },
{ file = "coverage-7.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c0cdedd3500e0511eac1517bf560149764b7d8e65cb800d8bf1c63ebf39edd2" },
{ file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a9babb9466fe1da12417a4aed923e90124a534736de6201794a3aea9d98484e" },
{ file = "coverage-7.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec9de46a33cf2dd87a5254af095a409ea3bf952d85ad339751e7de6d962cde6" },
{ file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:16bae383a9cc5abab9bb05c10a3e5a52e0a788325dc9ba8499e821885928968c" },
{ file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2c854ce44e1ee31bda4e318af1dbcfc929026d12c5ed030095ad98197eeeaed0" },
{ file = "coverage-7.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ce8c50520f57ec57aa21a63ea4f325c7b657386b3f02ccaedeccf9ebe27686e1" },
{ file = "coverage-7.4.3-cp38-cp38-win32.whl", hash = "sha256:708a3369dcf055c00ddeeaa2b20f0dd1ce664eeabde6623e516c5228b753654f" },
{ file = "coverage-7.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:1bf25fbca0c8d121a3e92a2a0555c7e5bc981aee5c3fdaf4bb7809f410f696b9" },
{ file = "coverage-7.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3b253094dbe1b431d3a4ac2f053b6d7ede2664ac559705a704f621742e034f1f" },
{ file = "coverage-7.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77fbfc5720cceac9c200054b9fab50cb2a7d79660609200ab83f5db96162d20c" },
{ file = "coverage-7.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6679060424faa9c11808598504c3ab472de4531c571ab2befa32f4971835788e" },
{ file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af154d617c875b52651dd8dd17a31270c495082f3d55f6128e7629658d63765" },
{ file = "coverage-7.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8640f1fde5e1b8e3439fe482cdc2b0bb6c329f4bb161927c28d2e8879c6029ee" },
{ file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:69b9f6f66c0af29642e73a520b6fed25ff9fd69a25975ebe6acb297234eda501" },
{ file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0842571634f39016a6c03e9d4aba502be652a6e4455fadb73cd3a3a49173e38f" },
{ file = "coverage-7.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a78ed23b08e8ab524551f52953a8a05d61c3a760781762aac49f8de6eede8c45" },
{ file = "coverage-7.4.3-cp39-cp39-win32.whl", hash = "sha256:c0524de3ff096e15fcbfe8f056fdb4ea0bf497d584454f344d59fce069d3e6e9" },
{ file = "coverage-7.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:0209a6369ccce576b43bb227dc8322d8ef9e323d089c6f3f26a597b09cb4d2aa" },
{ file = "coverage-7.4.3-pp38.pp39.pp310-none-any.whl", hash = "sha256:7cbde573904625509a3f37b6fecea974e363460b556a627c60dc2f47e2fffa51" },
{ file = "coverage-7.4.3.tar.gz", hash = "sha256:276f6077a5c61447a48d133ed13e759c09e62aff0dc84274a68dc18660104d52" },
]
[package.dependencies]
@ -111,12 +128,12 @@ toml = ["tomli"]
[[package]]
name = "cssbeautifier"
version = "1.14.11"
version = "1.15.1"
description = "CSS unobfuscator and beautifier."
optional = false
python-versions = "*"
files = [
{file = "cssbeautifier-1.14.11.tar.gz", hash = "sha256:40544c2b62bbcb64caa5e7f37a02df95654e5ce1bcacadac4ca1f3dc89c31513"},
{ file = "cssbeautifier-1.15.1.tar.gz", hash = "sha256:9f7064362aedd559c55eeecf6b6bed65e05f33488dcbe39044f0403c26e1c006" },
]
[package.dependencies]
@ -169,13 +186,12 @@ tqdm = ">=4.62.2,<5.0.0"
[[package]]
name = "editorconfig"
version = "0.12.3"
version = "0.12.4"
description = "EditorConfig File Locator and Interpreter for Python"
optional = false
python-versions = "*"
files = [
{file = "EditorConfig-0.12.3-py3-none-any.whl", hash = "sha256:6b0851425aa875b08b16789ee0eeadbd4ab59666e9ebe728e526314c4a2e52c1"},
{file = "EditorConfig-0.12.3.tar.gz", hash = "sha256:57f8ce78afcba15c8b18d46b5170848c88d56fd38f05c2ec60dbbfcb8996e89e"},
{ file = "EditorConfig-0.12.4.tar.gz", hash = "sha256:24857fa1793917dd9ccf0c7810a07e05404ce9b823521c7dce22a4fb5d125f80" },
]
[[package]]
@ -298,12 +314,12 @@ files = [
[[package]]
name = "jsbeautifier"
version = "1.14.11"
version = "1.15.1"
description = "JavaScript unobfuscator and beautifier."
optional = false
python-versions = "*"
files = [
{file = "jsbeautifier-1.14.11.tar.gz", hash = "sha256:6b632581ea60dd1c133cd25a48ad187b4b91f526623c4b0fb5443ef805250505"},
{ file = "jsbeautifier-1.15.1.tar.gz", hash = "sha256:ebd733b560704c602d744eafc839db60a1ee9326e30a2a80c4adb8718adc1b24" },
]
[package.dependencies]
@ -312,13 +328,13 @@ six = ">=1.13.0"
[[package]]
name = "json5"
version = "0.9.14"
version = "0.9.17"
description = "A Python implementation of the JSON5 data format."
optional = false
python-versions = "*"
python-versions = ">=3.8"
files = [
{file = "json5-0.9.14-py2.py3-none-any.whl", hash = "sha256:740c7f1b9e584a468dbb2939d8d458db3427f2c93ae2139d05f47e453eae964f"},
{file = "json5-0.9.14.tar.gz", hash = "sha256:9ed66c3a6ca3510a976a9ef9b8c0787de24802724ab1860bc0153c7fdd589b02"},
{ file = "json5-0.9.17-py2.py3-none-any.whl", hash = "sha256:f8ec1ecf985951d70f780f6f877c4baca6a47b6e61e02c4cd190138d10a7805a" },
{ file = "json5-0.9.17.tar.gz", hash = "sha256:717d99d657fa71b7094877b1d921b1cce40ab444389f6d770302563bb7dfd9ae" },
]
[package.extras]
@ -471,13 +487,13 @@ files = [
[[package]]
name = "pluggy"
version = "1.3.0"
version = "1.4.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
{file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
{ file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981" },
{ file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" },
]
[package.extras]
@ -505,13 +521,13 @@ test = ["pytest"]
[[package]]
name = "pyright"
version = "1.1.347"
version = "1.1.351"
description = "Command line wrapper for pyright"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyright-1.1.347-py3-none-any.whl", hash = "sha256:14dd31b594aa3ec464894f66b8a2d206ebef1501e52789eb88cf2a79b0907fbe"},
{file = "pyright-1.1.347.tar.gz", hash = "sha256:17ea09322f60080f82abc4e622e43d1a5ebaa407ba86963b15b2bc01cca256e0"},
{ file = "pyright-1.1.351-py3-none-any.whl", hash = "sha256:83b44b25396ae20661fc5f133c3fce30928ff1296d4f2e5ff0bca5fcf03eb89d" },
{ file = "pyright-1.1.351.tar.gz", hash = "sha256:01124099714eebd7f6525d8cbfa350626b56dfaf771cfcd55c03e69f0f1efbbd" },
]
[package.dependencies]
@ -523,13 +539,13 @@ dev = ["twine (>=3.4.1)"]
[[package]]
name = "pytest"
version = "7.4.4"
version = "8.0.1"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
{ file = "pytest-8.0.1-py3-none-any.whl", hash = "sha256:3e4f16fe1c0a9dc9d9389161c127c3edc5d810c38d6793042fb81d9f48a59fca" },
{ file = "pytest-8.0.1.tar.gz", hash = "sha256:267f6563751877d772019b13aacbe4e860d73fe8f651f28112e9ac37de7513ae" },
]
[package.dependencies]
@ -537,11 +553,21 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = { version = ">=1.0.0rc8", markers = "python_version < \"3.11\"" }
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
pluggy = ">=1.3.0,<2.0"
tomli = { version = ">=1.0.0", markers = "python_version < \"3.11\"" }
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
testing = [
"argcomplete",
"attrs (>=19.2.0)",
"hypothesis (>=3.56)",
"mock",
"nose",
"pygments (>=2.7.2)",
"requests",
"setuptools",
"xmlschema",
]
[[package]]
name = "pytest-cov"
@ -559,7 +585,14 @@ coverage = {version = ">=5.2.1", extras = ["toml"]}
pytest = ">=4.6"
[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
testing = [
"fields",
"hunter",
"process-tests",
"pytest-xdist",
"six",
"virtualenv",
]
[[package]]
name = "pytest-gitignore"
@ -600,6 +633,7 @@ files = [
{ file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34" },
{ file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28" },
{ file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9" },
{ file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef" },
{ file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0" },
{ file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4" },
{ file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54" },
@ -757,45 +791,94 @@ files = [
[[package]]
name = "ruff"
version = "0.1.13"
version = "0.2.2"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.1.13-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e3fd36e0d48aeac672aa850045e784673449ce619afc12823ea7868fcc41d8ba"},
{file = "ruff-0.1.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9fb6b3b86450d4ec6a6732f9f60c4406061b6851c4b29f944f8c9d91c3611c7a"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b13ba5d7156daaf3fd08b6b993360a96060500aca7e307d95ecbc5bb47a69296"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9ebb40442f7b531e136d334ef0851412410061e65d61ca8ce90d894a094feb22"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226b517f42d59a543d6383cfe03cccf0091e3e0ed1b856c6824be03d2a75d3b6"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5f0312ba1061e9b8c724e9a702d3c8621e3c6e6c2c9bd862550ab2951ac75c16"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2f59bcf5217c661254bd6bc42d65a6fd1a8b80c48763cb5c2293295babd945dd"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6894b00495e00c27b6ba61af1fc666f17de6140345e5ef27dd6e08fb987259d"},
{file = "ruff-0.1.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a1600942485c6e66119da294c6294856b5c86fd6df591ce293e4a4cc8e72989"},
{file = "ruff-0.1.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ee3febce7863e231a467f90e681d3d89210b900d49ce88723ce052c8761be8c7"},
{file = "ruff-0.1.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:dcaab50e278ff497ee4d1fe69b29ca0a9a47cd954bb17963628fa417933c6eb1"},
{file = "ruff-0.1.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f57de973de4edef3ad3044d6a50c02ad9fc2dff0d88587f25f1a48e3f72edf5e"},
{file = "ruff-0.1.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7a36fa90eb12208272a858475ec43ac811ac37e91ef868759770b71bdabe27b6"},
{file = "ruff-0.1.13-py3-none-win32.whl", hash = "sha256:a623349a505ff768dad6bd57087e2461be8db58305ebd5577bd0e98631f9ae69"},
{file = "ruff-0.1.13-py3-none-win_amd64.whl", hash = "sha256:f988746e3c3982bea7f824c8fa318ce7f538c4dfefec99cd09c8770bd33e6539"},
{file = "ruff-0.1.13-py3-none-win_arm64.whl", hash = "sha256:6bbbc3042075871ec17f28864808540a26f0f79a4478c357d3e3d2284e832998"},
{file = "ruff-0.1.13.tar.gz", hash = "sha256:e261f1baed6291f434ffb1d5c6bd8051d1c2a26958072d38dfbec39b3dda7352"},
{ file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0a9efb032855ffb3c21f6405751d5e147b0c6b631e3ca3f6b20f917572b97eb6" },
{ file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d450b7fbff85913f866a5384d8912710936e2b96da74541c82c1b458472ddb39" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecd46e3106850a5c26aee114e562c329f9a1fbe9e4821b008c4404f64ff9ce73" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e22676a5b875bd72acd3d11d5fa9075d3a5f53b877fe7b4793e4673499318ba" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1695700d1e25a99d28f7a1636d85bafcc5030bba9d0578c0781ba1790dbcf51c" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b0c232af3d0bd8f521806223723456ffebf8e323bd1e4e82b0befb20ba18388e" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f63d96494eeec2fc70d909393bcd76c69f35334cdbd9e20d089fb3f0640216ca" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a61ea0ff048e06de273b2e45bd72629f470f5da8f71daf09fe481278b175001" },
{ file = "ruff-0.2.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1439c8f407e4f356470e54cdecdca1bd5439a0673792dbe34a2b0a551a2fe3" },
{ file = "ruff-0.2.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:940de32dc8853eba0f67f7198b3e79bc6ba95c2edbfdfac2144c8235114d6726" },
{ file = "ruff-0.2.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c126da55c38dd917621552ab430213bdb3273bb10ddb67bc4b761989210eb6e" },
{ file = "ruff-0.2.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3b65494f7e4bed2e74110dac1f0d17dc8e1f42faaa784e7c58a98e335ec83d7e" },
{ file = "ruff-0.2.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1ec49be4fe6ddac0503833f3ed8930528e26d1e60ad35c2446da372d16651ce9" },
{ file = "ruff-0.2.2-py3-none-win32.whl", hash = "sha256:d920499b576f6c68295bc04e7b17b6544d9d05f196bb3aac4358792ef6f34325" },
{ file = "ruff-0.2.2-py3-none-win_amd64.whl", hash = "sha256:cc9a91ae137d687f43a44c900e5d95e9617cb37d4c989e462980ba27039d239d" },
{ file = "ruff-0.2.2-py3-none-win_arm64.whl", hash = "sha256:c9d15fc41e6054bfc7200478720570078f0b41c9ae4f010bcc16bd6f4d1aacdd" },
{ file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d" },
]
[[package]]
name = "setuptools"
version = "69.0.3"
version = "69.1.1"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"},
{file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"},
{ file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56" },
{ file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8" },
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
docs = [
"furo",
"jaraco.packaging (>=9.3)",
"jaraco.tidelift (>=1.4)",
"pygments-github-lexers (==0.0.5)",
"rst.linker (>=1.9)",
"sphinx (<7.2.5)",
"sphinx (>=3.5)",
"sphinx-favicon",
"sphinx-inline-tabs",
"sphinx-lint",
"sphinx-notfound-page (>=1,<2)",
"sphinx-reredirects",
"sphinxcontrib-towncrier",
]
testing = [
"build[virtualenv]",
"filelock (>=3.4.0)",
"flake8-2020",
"ini2toml[lite] (>=0.9)",
"jaraco.develop (>=7.21)",
"jaraco.envs (>=2.2)",
"jaraco.path (>=3.2.0)",
"packaging (>=23.2)",
"pip (>=19.1)",
"pytest (>=6)",
"pytest-checkdocs (>=2.4)",
"pytest-cov",
"pytest-enabler (>=2.2)",
"pytest-home (>=0.5)",
"pytest-mypy (>=0.9.1)",
"pytest-perf",
"pytest-ruff (>=0.2.1)",
"pytest-timeout",
"pytest-xdist",
"tomli-w (>=1.0.0)",
"virtualenv (>=13.0.0)",
"wheel",
]
testing-integration = [
"build[virtualenv] (>=1.0.3)",
"filelock (>=3.4.0)",
"jaraco.envs (>=2.2)",
"jaraco.path (>=3.2.0)",
"packaging (>=23.2)",
"pytest",
"pytest-enabler",
"pytest-xdist",
"tomli",
"virtualenv (>=13.0.0)",
"wheel",
]
[[package]]
name = "six"
@ -808,17 +891,6 @@ files = [
{ file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" },
]
[[package]]
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
]
[[package]]
name = "tomli"
version = "2.0.1"
@ -832,13 +904,13 @@ files = [
[[package]]
name = "tqdm"
version = "4.66.1"
version = "4.66.2"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
{file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
{file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
{ file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9" },
{ file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531" },
]
[package.dependencies]
@ -863,17 +935,17 @@ files = [
[[package]]
name = "vulture"
version = "2.10"
version = "2.11"
description = "Find dead code"
optional = false
python-versions = ">=3.8"
files = [
{file = "vulture-2.10-py2.py3-none-any.whl", hash = "sha256:568a4176db7468d0157817ae3bb1847a19f1ddc629849af487f9d3b279bff77d"},
{file = "vulture-2.10.tar.gz", hash = "sha256:2a5c3160bffba77595b6e6dfcc412016bd2a09cd4b66cdf7fbba913684899f6f"},
{ file = "vulture-2.11-py2.py3-none-any.whl", hash = "sha256:12d745f7710ffbf6aeb8279ba9068a24d4e52e8ed333b8b044035c9d6b823aba" },
{ file = "vulture-2.11.tar.gz", hash = "sha256:f0fbb60bce6511aad87ee0736c502456737490a82d919a44e6d92262cb35f1c2" },
]
[package.dependencies]
toml = "*"
tomli = { version = ">=1.1.0", markers = "python_version < \"3.11\"" }
[[package]]
name = "wheel"
@ -891,5 +963,5 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
content-hash = "8bec070b355fa8b409f8d9dfb9dae82433b11c3f65fc17dd14652e9e25e62f7a"
python-versions = "^3.10"
content-hash = "ad7bc225fd2048867bce6d5b96c739554d4b7a16bd035a60e4d7d2d82ecd7811"

View File

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "comicfn2dict"
version = "0.1.3"
version = "0.2.0a4"
description = "Parse common comic filenames and return a dict of metadata attributes. Includes a cli."
license = "GPL-3.0-only"
authors = ["AJ Slater <aj@slater.net>"]
@ -20,12 +20,15 @@ classifiers = [
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
]
packages = [{ include = "comicfn2dict" }, { include = "tests", format = "sdist" }]
packages = [
{ include = "comicfn2dict" },
{ include = "tests", format = "sdist" },
]
exclude = ["*/**/*~"]
include = []
[tool.poetry.dependencies]
python = "^3.9"
python = "^3.10"
[tool.poetry.group.dev.dependencies]
neovim = "^0.3.1"
@ -42,7 +45,7 @@ pytest-gitignore = "^1.3"
codespell = "^2.1.0"
pyright = "^1.1.232"
radon = { version = "^6.0.1", extras = ["toml"] }
ruff = "^0.1.2"
ruff = "^0.2.1"
types-python-dateutil = "^2.8.19"
vulture = "^2.3"
@ -77,7 +80,7 @@ omit = [
"dist/*",
"node_modules/*",
"test-results/*",
"typings/*"
"typings/*",
]
[tool.pyright]
@ -98,12 +101,10 @@ exclude = [
useLibraryCodeForTypes = true
reportMissingImports = true
reportImportCycles = true
pythonVersion = "3.9"
pythonVersion = "3.10"
pythonPlatform = "All"
[tool.pytest.ini_options]
junit_family = "xunit2"
# --black
addopts = """
--junit-xml=test-results/pytest/results.xml
-ra
@ -113,21 +114,38 @@ addopts = """
--cov-append
--cov-report=html
--cov-report=term
--ignore=.git
--ignore=cache
--ignore=frontend
--ignore=typings
"""
junit_family = "xunit2"
testpaths = "tests"
[tool.radon]
exclude = "*~,.git/*,.mypy_cache/*,.pytest_cache/*,.venv*,__pycache__/*,cache/*,dist/*,node_modules/*,test-results/*,typings/*"
[tool.ruff]
extend-exclude = ["typings"]
extend-ignore = ["S101", "D203", "D213",
target-version = "py310"
[tool.ruff.lint]
extend-ignore = [
"S101",
"D203",
"D213",
# Format ignores
"W191", "E501", "E111", "E114", "E117", "D206", "D300", "Q000", "Q001",
"Q002", "Q003", "COM812", "COM819", "ISC001", "ISC002"
"W191",
"E501",
"E111",
"E114",
"E117",
"D206",
"D300",
"Q000",
"Q001",
"Q002",
"Q003",
"COM812",
"COM819",
"ISC001",
"ISC002",
]
extend-select = [
"A",
@ -168,19 +186,16 @@ extend-select = [
"TRY",
"UP",
"W",
"YTT"
"YTT",
# "ANN", "ERA", "COM"
]
external = ["V101"]
# format = "grouped"
# show-source = true
target-version = "py39"
task-tags = ["TODO", "FIXME", "XXX", "http", "HACK"]
[tool.ruff.per-file-ignores]
[tool.ruff.lint.per-file-ignores]
"tests/*" = ["SLF001", "T201", "T203"]
[tool.ruff.pycodestyle]
[tool.ruff.lint.pycodestyle]
ignore-overlong-task-comments = true
[tool.vulture]

View File

@ -1,5 +1,7 @@
"""Test filenames with human parsed correct results."""
from types import MappingProxyType
TEST_COMIC_FIELDS = {
"series": "Long Series Name",
"issue": "001",
@ -22,6 +24,7 @@ TEST_COMIC_FIELDS_VOL = {
TEST_COMIC_VOL_ONLY = {
"series": "Long Series Name",
"volume": "1",
"issue": "1",
"title": "Title",
"original_format": "TPB",
"year": "2000",
@ -29,6 +32,7 @@ TEST_COMIC_VOL_ONLY = {
"ext": "cbr",
}
# Tests for 0.1.0
FNS = {
"Night of 1000 Wolves 001 (2013).cbz": {
"series": "Night of 1000 Wolves",
@ -51,11 +55,6 @@ FNS = {
"Long Series Name #001 (2000) Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name (2000) 001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name (2000) #001 Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name v1 (2000) #001 "
"Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS_VOL,
"Long Series Name 001 (2000) (TPB-Releaser) Title.cbz": TEST_COMIC_FIELDS,
"Long Series Name Vol 1 "
"(2000) (TPB) (Releaser & Releaser-Releaser) Title.cbr": TEST_COMIC_VOL_ONLY,
"Ultimate Craziness (2019) (Digital) (Friends-of-Bill).cbr": {
"series": "Ultimate Craziness",
"year": "2019",
@ -73,26 +72,17 @@ FNS = {
"Arkenstone Vol. 01 - The Smell of Burnt Toast (2020) (digital) (My-brother).cbr": {
"series": "Arkenstone",
"volume": "01",
"issue": "01",
"year": "2020",
"ext": "cbr",
"scan_info": "My-brother",
"title": "The Smell of Burnt Toast",
"original_format": "digital",
},
"Bardude - The Last Thing I Remember.cbz": {
"series": "Bardude",
"title": "The Last Thing I Remember",
"ext": "cbz",
},
"Drunkguy - The Man Without Fear - 01.cbz": {
"series": "Drunkguy",
"title": "The Man Without Fear",
"issue": "01",
"ext": "cbz",
},
"The_Arkenstone_v03_(2002)_(Digital)_(DR_&amp;_Quenya-Elves).cbr": {
"series": "The Arkenstone",
"volume": "03",
"issue": "03",
"year": "2002",
"ext": "cbr",
"scan_info": "DR &amp; Quenya-Elves",
@ -111,6 +101,7 @@ FNS = {
"Kartalk Library Edition v01 (1992) (digital) (Son of Ultron-Empire).cbr": {
"series": "Kartalk Library Edition",
"volume": "01",
"issue": "01",
"year": "1992",
"ext": "cbr",
"original_format": "digital",
@ -119,15 +110,15 @@ FNS = {
"Kind of Deadly v02 - Last Bullet (2006) (Digital) (Zone-Empire).cbr": {
"series": "Kind of Deadly",
"volume": "02",
"issue": "02",
"year": "2006",
"ext": "cbr",
"original_format": "Digital",
"scan_info": "Zone-Empire",
"title": "Last Bullet",
},
"Jeremy John - A Big Long Title (2017) (digital-Minutement).cbz": {
"series": "Jeremy John",
"title": "A Big Long Title",
"Jeremy John - Not A Title (2017) (digital-Minutement).cbz": {
"series": "Jeremy John - Not A Title",
"year": "2017",
"ext": "cbz",
"original_format": "digital",
@ -139,8 +130,7 @@ FNS = {
"year": "2006",
"ext": "cbz",
"scan_info": "Minutemen-Faessla",
# "original_format": "digital",
"remainders": ("(digital",),
"original_format": "digital",
},
"Jeremy John 003 (2007) (4 covers) (digital) (Minutemen-Faessla).cbz": {
"series": "Jeremy John",
@ -154,6 +144,7 @@ FNS = {
"Jeremy John v01 - Uninterested! (2007) (Digital) (Asgard-Empire).cbr": {
"series": "Jeremy John",
"volume": "01",
"issue": "01",
"year": "2007",
"ext": "cbr",
"original_format": "Digital",
@ -180,6 +171,7 @@ FNS = {
"Darkwad by Carlos Zemo v01 - Knuckle Fight (2009) (Digital) (Zone-Empire).cbr": {
"series": "Darkwad by Carlos Zemo",
"volume": "01",
"issue": "01",
"year": "2009",
"ext": "cbr",
"title": "Knuckle Fight",
@ -243,3 +235,273 @@ FNS = {
"ext": "cbz",
},
}
# Tests for 0.2.0
FNS.update(
{
# Philosopy change regarding dashes.
"Bardude - The Last Thing I Remember.cbz": {
"series": "Bardude - The Last Thing I Remember",
"ext": "cbz",
},
"Drunkguy - The Man Without Fear - 01.cbz": {
"series": "Drunkguy - The Man Without Fear",
"issue": "01",
"ext": "cbz",
},
# BIG Change. title after token. more stripping.
"'Batman - Superman - World's Finest 022 (2024) (Webrip) (The Last Kryptonian-DCP).cbz": {
"ext": "cbz",
"issue": "022",
"original_format": "Webrip",
"series": "Batman - Superman - World's Finest",
"scan_info": "The Last Kryptonian-DCP",
"year": "2024",
},
# Issue number starting with a letter requested in https://github.com/comictagger/comictagger/issues/543
# word characters now allowed to lead issue numbers only if preceded by a # marker
"batman #B01 title.cbz": {
"ext": "cbz",
"issue": "B01",
"series": "batman",
"title": "title",
},
"Monster_Island_v1_#2__repaired__c2c.cbz": {
"ext": "cbz",
"issue": "2",
"series": "Monster Island",
"volume": "1",
"scan_info": "c2c",
"remainders": ("(repaired)",),
},
# Extra - in the series
" X-Men-V1-#067.cbr": {
"ext": "cbr",
"issue": "067",
"series": "X-Men",
"volume": "1",
"remainders": ("-",),
},
"Aquaman - Green Arrow - Deep Target #01 (of 07) (2021).cbr": {
"ext": "cbr",
"issue": "01",
"series": "Aquaman - Green Arrow - Deep Target",
"year": "2021",
"issue_count": "07",
},
# CT only separates this into a title if the '-' is attached to the previous word eg 'aquaman- Green Arrow'. @bpepple opened a ticket for this https://github.com/ajslater/comicfn2dict/issues/1 already
"Batman_-_Superman_#020_(2021).cbr": {
"ext": "cbr",
"issue": "020",
"series": "Batman - Superman",
"year": "2021",
},
# Publishers like to re-print some of their annuals using this format for the year
"Batman '89 (2021) .cbr": {
"ext": "cbr",
"series": "Batman '89",
"year": "2021",
},
# This made the parser in CT much more complicated. It's understandable that this isn't parsed on the first few iterations of this project
"Star Wars - War of the Bounty Hunters - IG-88 (2021).cbz": {
"ext": "cbz",
"series": "Star Wars - War of the Bounty Hunters - IG-88",
"year": "2021",
}, # The addition of the '#1' turns this into the same as 'Aquaman - Green Arrow - Deep Target' above
"Star Wars - War of the Bounty Hunters - IG-88 #1 (2021).cbz": {
"ext": "cbz",
"issue": "1",
"series": "Star Wars - War of the Bounty Hunters - IG-88",
"year": "2021",
},
"Free Comic Book Day - Avengers.Hulk (2021).cbz": {
"ext": "cbz",
"series": "Free Comic Book Day - Avengers Hulk",
"year": "2021",
},
# CT assumes the volume is also the issue number if it can't find an issue number
"Avengers By Brian Michael Bendis volume 03 (2013).cbz": {
"ext": "cbz",
"issue": "03",
"series": "Avengers By Brian Michael Bendis",
"volume": "03",
"year": "2013",
},
# CT catches the year
"Marvel Previews #002 (January 2022).cbr": {
"ext": "cbr",
"issue": "002",
"series": "Marvel Previews",
"publisher": "Marvel",
"month": "01",
"year": "2022",
},
"Test Numeric Year #2 2001-02-24.cbz": {
"ext": "cbz",
"issue": "2",
"series": "Test Numeric Year",
"year": "2001",
"month": "02",
"day": "24",
},
"Test Month First Date 02-24-2001.cbz": {
"ext": "cbz",
"series": "Test Month First Date",
"year": "2001",
"month": "02",
"day": "24",
},
# CT notices that this is a full date, CT doesn't actually return the month or day though just removes it
"X-Men, 2021-08-04 (#02).cbz": {
"ext": "cbz",
"issue": "02",
"series": "X-Men",
"year": "2021",
"month": "08",
"day": "04",
},
# 4 digit issue number
# should this be an issue number if year DONE?.
"action comics 1024.cbz": {
"ext": "cbz",
"issue": "1024",
"series": "action comics",
},
# This is a contrived test case. I've never seen this I just wanted to handle it with my parser
"Cory Doctorow's Futuristic Tales of the Here and Now #0.0.1 (2007).cbz": {
"ext": "cbz",
"issue": "0.0.1",
"series": "Cory Doctorow's Futuristic Tales of the Here and Now",
"year": "2007",
},
# CT treats ':' the same as '-' but here the ':' is attached to 'Now' which CT sees as a title separation
"Cory Doctorow's Futuristic Tales of the Here and Now: Anda's Game #001 (2007).cbz": {
"ext": "cbz",
"issue": "001",
"series": "Cory Doctorow's Futuristic Tales of the Here and Now",
"title": "Anda's Game",
"year": "2007",
},
# If a title ends in a year, it's not an issue (and is a year if no year)
"Blade Runner Free Comic Book Day 2021 (2021).cbr": {
"ext": "cbr",
"series": "Blade Runner Free Comic Book Day 2021",
"year": "2021",
},
# If a year occurs after another year, and no volume, do volume / year
"Super Strange Yarns (1957) #92 (1969).cbz": {
"ext": "cbz",
"issue": "92",
"series": "Super Strange Yarns",
"volume": "1957",
"year": "1969",
},
# CT checks for the following '(of 06)' after the '03' and marks it as the volume
"Elephantmen 2259 #008 - Simple Truth 03 (of 06) (2021).cbr": {
"ext": "cbr",
"issue": "008",
"series": "Elephantmen 2259",
"title": "Simple Truth",
"volume": "03",
"year": "2021",
"volume_count": "06",
},
# CT treats book like 'v' but also adds it as the title (matches ComicVine for this particular series)
"Bloodshot Book 03 (2020).cbr": {
"ext": "cbr",
"issue": "03",
"series": "Bloodshot",
"title": "Book 03",
"volume": "03",
"year": "2020",
},
# c2c aka "cover to cover" is fairly common and CT moves it to scan_info/remainder
"Marvel Two In One V1 #090 c2c.cbr": {
"ext": "cbr",
"issue": "090",
"series": "Marvel Two In One",
"publisher": "Marvel",
"volume": "1",
"scan_info": "c2c",
},
# CT treats '[]' as equivalent to '()', catches DC as a publisher and 'Sep-Oct 1951' as dates and removes them. CT doesn't catch the digital though so that could be better but I blame whoever made this atrocious filename
"Wonder Woman #49 DC Sep-Oct 1951 digital [downsized, lightened, 4 missing story pages restored] (Shadowcat-Empire).cbz": {
"ext": "cbz",
"issue": "49",
"series": "Wonder Woman",
"publisher": "DC",
"year": "1951",
"month": "09",
"remainders": (
"digital (downsized, lightened, 4 missing story pages "
"restored) (Shadowcat-Empire)",
),
},
"Captain Science #001 (1950) The Beginning - nothing.cbz": {
"ext": "cbz",
"issue": "001",
"title": "The Beginning - nothing",
"series": "Captain Science",
"year": "1950",
},
"Captain Science #001-cix-cbi.cbr": {
"ext": "cbr",
"issue": "001",
"series": "Captain Science",
"title": "cix-cbi",
},
"Long Series Name v1 (2000) #001 "
"Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS_VOL,
"Long Series Name 001 (2000) (TPB-Releaser) Title.cbz": {
"series": "Long Series Name",
"issue": "001",
"year": "2000",
"original_format": "TPB",
"scan_info": "Releaser",
"remainders": ("Title",),
"ext": "cbz",
},
"Long Series Name Vol 1 "
"(2000) (TPB) (Releaser & Releaser-Releaser) Title.cbr": {
"series": "Long Series Name",
"volume": "1",
"issue": "1",
"remainders": ("Title",),
"original_format": "TPB",
"year": "2000",
"scan_info": "Releaser & Releaser-Releaser",
"ext": "cbr",
},
}
)
# first_key, first_val = NEW.popitem()
# FNS[first_key] = first_val
PARSE_FNS = MappingProxyType(FNS)
SERIALIZE_FNS = MappingProxyType(
{
"Long Series Name #001 (2000) Title (TPB) (Releaser).cbz": TEST_COMIC_FIELDS,
"Long Series Name v1 #001 "
"(2000) Title (TPB) (Releaser & Releaser-Releaser).cbr": TEST_COMIC_VOL_ONLY,
"Series Name (2000-12-31).cbz": {
"series": "Series Name",
"year": "2000",
"month": "12",
"day": "31",
"ext": "cbz",
},
"Series Name (2000-12).cbz": {
"series": "Series Name",
"year": "2000",
"month": "12",
"ext": "cbz",
},
"Series Name (Dec-31).cbz": {
"series": "Series Name",
"month": "12",
"day": "31",
"ext": "cbz",
},
}
)

View File

@ -1,22 +1,18 @@
"""Tests for filename parsing."""
from pprint import pprint
from types import MappingProxyType
import pytest
from deepdiff.diff import DeepDiff
from comicfn2dict import comicfn2dict
from tests.comic_filenames import FNS
ALL_FIELDS = frozenset({"series", "volume", "issue", "issue_count", "year", "ext"})
FIELD_SCHEMA = MappingProxyType({key: None for key in ALL_FIELDS})
from comicfn2dict import ComicFilenameParser
from tests.comic_filenames import PARSE_FNS
@pytest.mark.parametrize("item", FNS.items())
@pytest.mark.parametrize("item", PARSE_FNS.items())
def test_parse_filename(item):
"""Test filename parsing."""
fn, defined_fields = item
md = comicfn2dict(fn)
md = ComicFilenameParser(fn, verbose=1).parse()
diff = DeepDiff(defined_fields, md, ignore_order=True)
print(fn)
pprint(defined_fields)

View File

@ -0,0 +1,13 @@
"""Tests for filename parsing."""
import pytest
from comicfn2dict import ComicFilenameSerializer
from tests.comic_filenames import SERIALIZE_FNS
@pytest.mark.parametrize("item", SERIALIZE_FNS.items())
def test_serialize_dict(item):
"""Test metadata serialization."""
test_fn, md = item
fn = ComicFilenameSerializer(md).serialize()
assert test_fn == fn