Enable stricter mypy configuration

This commit is contained in:
Timmy Welch 2023-11-23 15:58:00 -08:00
parent 9aad872ae6
commit 305eb1dec5
8 changed files with 77 additions and 37 deletions

View File

@ -26,7 +26,7 @@ path = f"dist/{app_name}"
zip_file = pathlib.Path(f"dist/{final_name}.zip")
def addToZip(zf, path, zippath):
def addToZip(zf: zipfile.ZipFile, path: str, zippath: str) -> None:
if os.path.isfile(path):
zf.write(path, zippath)
elif os.path.isdir(path):

View File

@ -6,7 +6,7 @@ import calendar
import os
import unicodedata
from enum import Enum, auto
from typing import Any, Callable
from typing import Any, Callable, Protocol
class ItemType(Enum):
@ -87,11 +87,16 @@ class Item:
return f"{self.val}: index: {self.pos}: {self.typ}"
class LexerFunc(Protocol):
def __call__(self, __origin: Lexer) -> LexerFunc | None:
...
class Lexer:
def __init__(self, string: str, allow_issue_start_with_letter: bool = False) -> None:
self.input: str = string # The string being scanned
# The next lexing function to enter
self.state: Callable[[Lexer], Callable | None] | None = None # type: ignore[type-arg]
self.state: LexerFunc | None = None
self.pos: int = -1 # Current position in the input
self.start: int = 0 # Start position of this item
self.lastPos: int = 0 # Position of most recent item returned by nextItem
@ -171,20 +176,22 @@ class Lexer:
# Errorf returns an error token and terminates the scan by passing
# Back a nil pointer that will be the next state, terminating self.nextItem.
def errorf(lex: Lexer, message: str) -> Callable[[Lexer], Callable | None] | None: # type: ignore[type-arg]
def errorf(lex: Lexer, message: str) -> Any:
lex.items.append(Item(ItemType.Error, lex.start, message))
return None
# Scans the elements inside action delimiters.
def lex_filename(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # type: ignore[type-arg]
def lex_filename(lex: Lexer) -> LexerFunc | None:
r = lex.get()
if r == eof:
if lex.paren_depth != 0:
return errorf(lex, "unclosed left paren")
errorf(lex, "unclosed left paren")
return None
if lex.brace_depth != 0:
return errorf(lex, "unclosed left paren")
errorf(lex, "unclosed left paren")
return None
lex.emit(ItemType.EOF)
return None
elif is_space(r):
@ -230,7 +237,8 @@ def lex_filename(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # ty
lex.emit(ItemType.RightParen)
lex.paren_depth -= 1
if lex.paren_depth < 0:
return errorf(lex, "unexpected right paren " + r)
errorf(lex, "unexpected right paren " + r)
return None
elif r == "{":
lex.emit(ItemType.LeftBrace)
@ -239,7 +247,8 @@ def lex_filename(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # ty
lex.emit(ItemType.RightBrace)
lex.brace_depth -= 1
if lex.brace_depth < 0:
return errorf(lex, "unexpected right brace " + r)
errorf(lex, "unexpected right brace " + r)
return None
elif r == "[":
lex.emit(ItemType.LeftSBrace)
@ -248,19 +257,21 @@ def lex_filename(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # ty
lex.emit(ItemType.RightSBrace)
lex.sbrace_depth -= 1
if lex.sbrace_depth < 0:
return errorf(lex, "unexpected right brace " + r)
errorf(lex, "unexpected right brace " + r)
return None
elif is_symbol(r):
if unicodedata.category(r) == "Sc":
return lex_currency
lex.accept_run(is_symbol)
lex.emit(ItemType.Symbol)
else:
return errorf(lex, "unrecognized character in action: " + repr(r))
errorf(lex, "unrecognized character in action: " + repr(r))
return None
return lex_filename
def lex_currency(lex: Lexer) -> Callable:
def lex_currency(lex: Lexer) -> LexerFunc:
orig = lex.pos
lex.accept_run(is_space)
if lex.peek().isnumeric():
@ -272,7 +283,7 @@ def lex_currency(lex: Lexer) -> Callable:
return lex_filename
def lex_operator(lex: Lexer) -> Callable: # type: ignore[type-arg]
def lex_operator(lex: Lexer) -> LexerFunc:
lex.accept_run("-|:;")
lex.emit(ItemType.Operator)
return lex_filename
@ -280,7 +291,7 @@ def lex_operator(lex: Lexer) -> Callable: # type: ignore[type-arg]
# LexSpace scans a run of space characters.
# One space has already been seen.
def lex_space(lex: Lexer) -> Callable: # type: ignore[type-arg]
def lex_space(lex: Lexer) -> LexerFunc:
lex.accept_run(is_space)
lex.emit(ItemType.Space)
@ -288,7 +299,7 @@ def lex_space(lex: Lexer) -> Callable: # type: ignore[type-arg]
# Lex_text scans an alphanumeric.
def lex_text(lex: Lexer) -> Callable: # type: ignore[type-arg]
def lex_text(lex: Lexer) -> LexerFunc:
while True:
r = lex.get()
if is_alpha_numeric(r):
@ -327,7 +338,7 @@ def cal(value: str) -> set[Any]:
return set(month_abbr + month_name + day_abbr + day_name)
def lex_number(lex: Lexer) -> Callable[[Lexer], Callable | None] | None: # type: ignore[type-arg]
def lex_number(lex: Lexer) -> LexerFunc | None:
if not lex.scan_number():
return errorf(lex, "bad number syntax: " + lex.input[lex.start : lex.pos])
# Complex number logic removed. Messes with math operations without space

View File

@ -27,7 +27,7 @@ import os
import re
from operator import itemgetter
from re import Match
from typing import Callable, TypedDict
from typing import Protocol, TypedDict
from urllib.parse import unquote
from text2digits import text2digits
@ -343,6 +343,11 @@ protofolius_issue_number_scheme = {
}
class ParserFunc(Protocol):
def __call__(self, __origin: Parser) -> ParserFunc | None:
...
eof = filenamelexer.Item(filenamelexer.ItemType.EOF, -1, "")
@ -360,7 +365,7 @@ class Parser:
remove_publisher: bool = False,
protofolius_issue_number_scheme: bool = False,
) -> None:
self.state: Callable[[Parser], Callable | None] | None = None # type: ignore[type-arg]
self.state: ParserFunc | None = None
self.pos = -1
self.firstItem = True
@ -450,7 +455,7 @@ class Parser:
self.state = self.state(self)
def parse(p: Parser) -> Callable[[Parser], Callable | None] | None: # type: ignore[type-arg]
def parse(p: Parser) -> ParserFunc:
item: filenamelexer.Item = p.get()
# We're done, time to do final processing
if item.typ == filenamelexer.ItemType.EOF:
@ -706,7 +711,7 @@ def parse(p: Parser) -> Callable[[Parser], Callable | None] | None: # type: ign
# TODO: What about more esoteric numbers???
def parse_issue_number(p: Parser) -> Callable[[Parser], Callable | None] | None: # type: ignore[type-arg]
def parse_issue_number(p: Parser) -> ParserFunc:
item = p.input[p.pos]
if p.filename_info["issue"]:
@ -739,7 +744,7 @@ def parse_issue_number(p: Parser) -> Callable[[Parser], Callable | None] | None:
# i=None is a split in the series
def parse_series(p: Parser, i: filenamelexer.Item | None) -> Callable[[Parser], Callable | None] | None:
def parse_series(p: Parser, i: filenamelexer.Item | None) -> ParserFunc:
current = []
prev_space = False
@ -1016,7 +1021,7 @@ def split_series(items: list[list[filenamelexer.Item]]) -> tuple[list[filenamele
return series, title
def parse_finish(p: Parser) -> Callable[[Parser], Callable | None] | None: # type: ignore[type-arg]
def parse_finish(p: Parser) -> None:
for part in p.series:
p.used_items.extend(part)
p.series_parts, p.title_parts = split_series(p.series)
@ -1104,7 +1109,7 @@ def get_remainder(p: Parser) -> str:
return remainder.strip()
def parse_info_specifier(p: Parser) -> Callable[[Parser], Callable | None] | None: # type: ignore[type-arg]
def parse_info_specifier(p: Parser) -> ParserFunc:
item = p.input[p.pos]
index = p.pos

View File

@ -23,7 +23,7 @@ import unicodedata
from collections import defaultdict
from collections.abc import Iterable, Mapping
from shutil import which # noqa: F401
from typing import Any
from typing import Any, TypeVar
import comicapi.data
from comicapi import filenamelexer, filenameparser
@ -39,7 +39,7 @@ except ImportError:
logger = logging.getLogger(__name__)
def _custom_key(tup):
def _custom_key(tup: Any) -> Any:
import natsort
lst = []
@ -52,7 +52,10 @@ def _custom_key(tup):
return tuple(lst)
def os_sorted(lst: Iterable) -> Iterable:
T = TypeVar("T")
def os_sorted(lst: Iterable[T]) -> Iterable[T]:
import natsort
key = _custom_key

View File

@ -18,6 +18,7 @@ from __future__ import annotations
import io
import logging
import math
from collections.abc import Sequence
from functools import reduce
from statistics import median
from typing import TypeVar
@ -98,8 +99,8 @@ class ImageHasher:
Implementation follows http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html
"""
def generate_dct2(block, axis=0):
def dct1(block):
def generate_dct2(block: Sequence[Sequence[float]], axis: int = 0) -> list[list[float]]:
def dct1(block: Sequence[float]) -> list[float]:
"""Perform 1D Discrete Cosine Transform (DCT) on a given block."""
N = len(block)
dct_block = [0.0] * N
@ -134,7 +135,7 @@ class ImageHasher:
return dct_block
def convert_image_to_ndarray(image):
def convert_image_to_ndarray(image: Image.Image) -> Sequence[Sequence[float]]:
width, height = image.size
pixels2 = []
@ -173,12 +174,14 @@ class ImageHasher:
@staticmethod
def hamming_distance(h1: T, h2: T) -> int:
if isinstance(h1, int) or isinstance(h2, int):
if isinstance(h1, int):
n1 = h1
else:
n1 = int(h1, 16)
if isinstance(h2, int):
n2 = h2
else:
# convert hex strings to ints
n1 = int(h1, 16)
n2 = int(h2, 16)
# xor the two numbers

View File

@ -221,7 +221,7 @@ class ComicVineTalker(ComicTalker):
try:
test_url = urljoin(url, "issue/1/")
cv_response: CVResult = requests.get(
cv_response: CVResult = requests.get( # type: ignore[type-arg]
test_url,
headers={"user-agent": "comictagger/" + self.version},
params={
@ -400,12 +400,12 @@ class ComicVineTalker(ComicTalker):
return formatted_filtered_issues_result
def _get_cv_content(self, url: str, params: dict[str, Any]) -> CVResult:
def _get_cv_content(self, url: str, params: dict[str, Any]) -> CVResult[T]:
"""
Get the content from the CV server.
"""
with self.limiter.ratelimit("cv", delay=True):
cv_response: CVResult = self._get_url_content(url, params)
cv_response: CVResult[T] = self._get_url_content(url, params)
if cv_response["status_code"] != 1:
logger.debug(
@ -463,7 +463,7 @@ class ComicVineTalker(ComicTalker):
return formatted_results
def _format_series(self, record) -> ComicSeries:
def _format_series(self, record: CVSeries) -> ComicSeries:
# Flatten publisher to name only
if record.get("publisher") is None:
pub_name = ""

View File

@ -292,3 +292,21 @@ per-file-ignores =
comictaggerlib/cli.py: T20
build-tools/generate_settngs.py: T20
tests/*: L
[mypy]
check_untyped_defs = true
disallow_any_generics = true
disallow_incomplete_defs = true
disallow_untyped_defs = true
warn_redundant_casts = true
warn_unused_ignores = true
[mypy-testing.*]
disallow_untyped_defs = false
disallow_incomplete_defs = false
check_untyped_defs = false
[mypy-tests.*]
disallow_untyped_defs = false
disallow_incomplete_defs = false
check_untyped_defs = false

View File

@ -248,5 +248,5 @@ class MockResponse:
self.result = result
self.content = content
def json(self) -> dict[str, list]:
def json(self) -> dict[str, list[Any]]:
return self.result