2019-07-06 23:00:00 -07:00
|
|
|
from comicapi import comicarchive
|
|
|
|
from blinker import Namespace
|
2019-07-11 17:35:30 -07:00
|
|
|
from io import BytesIO
|
2019-07-15 22:51:10 -07:00
|
|
|
from wand.image import Image
|
2019-07-11 17:35:30 -07:00
|
|
|
|
2019-07-06 23:00:00 -07:00
|
|
|
import os, sys
|
|
|
|
|
|
|
|
from scripts import database
|
|
|
|
|
|
|
|
rpi_signals = Namespace()
|
|
|
|
comic_loaded = rpi_signals.signal("comic-loaded")
|
|
|
|
|
2019-07-15 22:51:10 -07:00
|
|
|
publishers_to_ignore = ["***REMOVED***"]
|
|
|
|
|
2019-07-06 23:00:00 -07:00
|
|
|
# Directories
|
|
|
|
|
2019-07-15 22:51:10 -07:00
|
|
|
RPI_COMICS_DIRECTORY = "/usb/storage/media/Comics/"
|
|
|
|
RPI_MOVIES_DIRECTORY = "/usb/storage/media/Videos/Movies/"
|
|
|
|
RPI_TV_SHOWS_DIRECTORY = "/usb/storage/media/Videos/TV/"
|
|
|
|
RPI_VIDEOS_DIRECTORY = "/usb/storage/media/Videos/Videos/"
|
|
|
|
RPI_GAMES_DIRECTORY = "/usb/storage/media/games/"
|
|
|
|
RPI_MUSIC_DIRECTORY = "/usb/storage/media/Music/"
|
|
|
|
|
|
|
|
MC_COMICS_DIRECTORY = "C:\\Users\\Matthew\\Documents\\Comics"
|
|
|
|
|
|
|
|
COMICS_DIRECTORY = RPI_COMICS_DIRECTORY if os.path.exists(RPI_COMICS_DIRECTORY) else MC_COMICS_DIRECTORY
|
2019-07-06 23:00:00 -07:00
|
|
|
|
|
|
|
#############
|
|
|
|
|
|
|
|
|
|
|
|
def get_comics():
|
2019-07-11 17:35:30 -07:00
|
|
|
total_comics = 0
|
|
|
|
comics_in_db = 0
|
|
|
|
comics_added = 0
|
2019-07-06 23:00:00 -07:00
|
|
|
meta = []
|
2019-07-11 17:35:30 -07:00
|
|
|
thumbnails = []
|
2019-07-06 23:00:00 -07:00
|
|
|
i = 0
|
|
|
|
for root, dirs, files in os.walk(COMICS_DIRECTORY):
|
|
|
|
for f in files:
|
|
|
|
if f.endswith(".cbr"):
|
2019-07-11 17:35:30 -07:00
|
|
|
total_comics += 1
|
2019-07-06 23:00:00 -07:00
|
|
|
path = os.path.join(root, f)
|
|
|
|
if not database.comic_path_in_db(path):
|
2019-07-11 17:35:30 -07:00
|
|
|
try:
|
|
|
|
test_path = path.encode("utf8")
|
|
|
|
except Exception as e:
|
|
|
|
print("encoding failed on:", path)
|
|
|
|
print(e)
|
|
|
|
continue
|
|
|
|
archive = open_comic(path)
|
2019-07-15 22:51:10 -07:00
|
|
|
md = archive.readCIX()
|
|
|
|
if md.publisher in publishers_to_ignore:
|
|
|
|
continue
|
|
|
|
print(path)
|
|
|
|
meta.append((path, md))
|
2019-07-11 17:35:30 -07:00
|
|
|
thumbnails.append(get_comic_thumbnails(archive))
|
|
|
|
comics_added += 1
|
|
|
|
comics_in_db += 1
|
2019-07-06 23:00:00 -07:00
|
|
|
i += 1
|
2019-07-11 17:35:30 -07:00
|
|
|
if i >= 20:
|
|
|
|
comic_loaded.send("anonymous", meta=meta.copy(), thumbnails=thumbnails.copy())
|
2019-07-06 23:00:00 -07:00
|
|
|
meta.clear()
|
2019-07-11 17:35:30 -07:00
|
|
|
thumbnails.clear()
|
2019-07-06 23:00:00 -07:00
|
|
|
i = 0
|
2019-07-11 17:35:30 -07:00
|
|
|
else:
|
|
|
|
comics_in_db += 1
|
|
|
|
print("total number of comics:", total_comics)
|
|
|
|
print("comics in database:", comics_in_db)
|
|
|
|
print("number of comics added:", comics_added)
|
|
|
|
comic_loaded.send("anonymous", meta=meta, thumbnails=thumbnails)
|
|
|
|
|
|
|
|
|
|
|
|
def get_comic_thumbnails(comic):
|
|
|
|
thumbnails = []
|
2019-07-15 22:51:10 -07:00
|
|
|
size = "256x256"
|
|
|
|
new_height = 256
|
|
|
|
new_width = 256
|
2019-07-11 17:35:30 -07:00
|
|
|
for page in range(comic.getNumberOfPages()):
|
|
|
|
image_bytes = BytesIO(comic.getPage(page))
|
2019-07-15 22:51:10 -07:00
|
|
|
image = Image(file=image_bytes)
|
|
|
|
orig_height = image.height
|
|
|
|
orig_width = image.width
|
|
|
|
if (orig_width/orig_height)*new_height <= new_width:
|
|
|
|
height = int((orig_height/orig_width) * new_width)
|
|
|
|
width = new_width
|
|
|
|
else:
|
|
|
|
width = int((orig_width/orig_height) * new_height)
|
|
|
|
height = new_height
|
|
|
|
image.thumbnail(width, height)
|
|
|
|
thumbnails.append((image.make_blob(), "image/"+image.format))
|
2019-07-11 17:35:30 -07:00
|
|
|
return thumbnails
|
|
|
|
|
|
|
|
|
|
|
|
def open_comic(path):
|
2019-07-15 22:51:10 -07:00
|
|
|
archive = comicarchive.ComicArchive(path, default_image_path="static/images/icon.png")
|
2019-07-11 17:35:30 -07:00
|
|
|
return archive
|
2019-07-06 23:00:00 -07:00
|
|
|
|
|
|
|
|
|
|
|
def bytestring_path(path):
|
|
|
|
"""Given a path, which is either a bytes or a unicode, returns a str
|
|
|
|
path (ensuring that we never deal with Unicode pathnames).
|
|
|
|
"""
|
|
|
|
# Pass through bytestrings.
|
|
|
|
if isinstance(path, bytes):
|
|
|
|
return path
|
|
|
|
|
|
|
|
# Try to encode with default encodings, but fall back to UTF8.
|
|
|
|
try:
|
|
|
|
return path.encode(_fsencoding())
|
|
|
|
except (UnicodeError, LookupError):
|
|
|
|
return path.encode('utf8')
|
|
|
|
|
|
|
|
|
|
|
|
def _fsencoding():
|
|
|
|
"""Get the system's filesystem encoding. On Windows, this is always
|
|
|
|
UTF-8 (not MBCS).
|
|
|
|
"""
|
|
|
|
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
|
|
|
|
if encoding == 'mbcs':
|
|
|
|
# On Windows, a broken encoding known to Python as "MBCS" is
|
|
|
|
# used for the filesystem. However, we only use the Unicode API
|
|
|
|
# for Windows paths, so the encoding is actually immaterial so
|
|
|
|
# we can avoid dealing with this nastiness. We arbitrarily
|
|
|
|
# choose UTF-8.
|
|
|
|
encoding = 'utf8'
|
|
|
|
return encoding
|