2019-08-23 21:34:42 -07:00
|
|
|
from flask import current_app
|
2019-07-06 23:00:00 -07:00
|
|
|
from comicapi import comicarchive
|
|
|
|
from blinker import Namespace
|
2019-07-23 15:49:57 -07:00
|
|
|
|
2019-07-11 17:35:30 -07:00
|
|
|
from io import BytesIO
|
2019-07-15 22:51:10 -07:00
|
|
|
from wand.image import Image
|
2020-03-27 17:08:03 -07:00
|
|
|
import os, re
|
2019-08-23 21:34:42 -07:00
|
|
|
import inspect
|
2019-07-06 23:00:00 -07:00
|
|
|
|
|
|
|
from scripts import database
|
|
|
|
|
|
|
|
rpi_signals = Namespace()
|
|
|
|
comic_loaded = rpi_signals.signal("comic-loaded")
|
2019-07-23 15:49:57 -07:00
|
|
|
movie_loaded = rpi_signals.signal("movie-loaded")
|
2019-07-30 15:19:03 -07:00
|
|
|
tv_show_loaded = rpi_signals.signal("tv_show_loaded")
|
|
|
|
tv_episodes_loaded = rpi_signals.signal("tv_episodes_loaded")
|
2019-07-06 23:00:00 -07:00
|
|
|
|
2019-07-15 22:51:10 -07:00
|
|
|
publishers_to_ignore = ["***REMOVED***"]
|
|
|
|
|
2019-07-06 23:00:00 -07:00
|
|
|
# Directories
|
|
|
|
|
2019-07-15 22:51:10 -07:00
|
|
|
RPI_COMICS_DIRECTORY = "/usb/storage/media/Comics/"
|
|
|
|
RPI_MOVIES_DIRECTORY = "/usb/storage/media/Videos/Movies/"
|
|
|
|
RPI_TV_SHOWS_DIRECTORY = "/usb/storage/media/Videos/TV/"
|
|
|
|
RPI_VIDEOS_DIRECTORY = "/usb/storage/media/Videos/Videos/"
|
|
|
|
RPI_GAMES_DIRECTORY = "/usb/storage/media/games/"
|
|
|
|
RPI_MUSIC_DIRECTORY = "/usb/storage/media/Music/"
|
|
|
|
|
|
|
|
MC_COMICS_DIRECTORY = "C:\\Users\\Matthew\\Documents\\Comics"
|
|
|
|
|
|
|
|
COMICS_DIRECTORY = RPI_COMICS_DIRECTORY if os.path.exists(RPI_COMICS_DIRECTORY) else MC_COMICS_DIRECTORY
|
2019-07-23 15:49:57 -07:00
|
|
|
MOVIES_DIRECTORY = RPI_MOVIES_DIRECTORY
|
2019-07-30 15:19:03 -07:00
|
|
|
TV_SHOWS_DIRECTORY = RPI_TV_SHOWS_DIRECTORY
|
2019-07-06 23:00:00 -07:00
|
|
|
|
|
|
|
#############
|
|
|
|
|
|
|
|
|
|
|
|
def get_comics():
|
2019-07-11 17:35:30 -07:00
|
|
|
total_comics = 0
|
|
|
|
comics_in_db = 0
|
|
|
|
comics_added = 0
|
2019-07-06 23:00:00 -07:00
|
|
|
meta = []
|
2019-07-11 17:35:30 -07:00
|
|
|
thumbnails = []
|
2019-07-06 23:00:00 -07:00
|
|
|
i = 0
|
|
|
|
for root, dirs, files in os.walk(COMICS_DIRECTORY):
|
|
|
|
for f in files:
|
2019-07-25 00:02:54 -07:00
|
|
|
if "temp" in root:
|
|
|
|
continue
|
2019-07-06 23:00:00 -07:00
|
|
|
if f.endswith(".cbr"):
|
2019-07-11 17:35:30 -07:00
|
|
|
total_comics += 1
|
2019-07-06 23:00:00 -07:00
|
|
|
path = os.path.join(root, f)
|
|
|
|
if not database.comic_path_in_db(path):
|
2019-07-11 17:35:30 -07:00
|
|
|
try:
|
|
|
|
test_path = path.encode("utf8")
|
|
|
|
except Exception as e:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("encoding failed on: "+path)
|
2019-07-11 17:35:30 -07:00
|
|
|
continue
|
|
|
|
archive = open_comic(path)
|
2019-07-15 22:51:10 -07:00
|
|
|
md = archive.readCIX()
|
|
|
|
if md.publisher in publishers_to_ignore:
|
|
|
|
continue
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info(path)
|
2020-03-19 19:25:44 -07:00
|
|
|
try:
|
|
|
|
meta.append((path, md))
|
|
|
|
thumbnails.append(get_comic_thumbnails(archive))
|
|
|
|
comics_added += 1
|
|
|
|
i += 1
|
|
|
|
except Exception as e:
|
|
|
|
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
|
|
|
|
continue
|
2019-07-23 15:49:57 -07:00
|
|
|
if i >= 2:
|
2019-07-11 17:35:30 -07:00
|
|
|
comic_loaded.send("anonymous", meta=meta.copy(), thumbnails=thumbnails.copy())
|
2019-07-06 23:00:00 -07:00
|
|
|
meta.clear()
|
2019-07-11 17:35:30 -07:00
|
|
|
thumbnails.clear()
|
2019-07-06 23:00:00 -07:00
|
|
|
i = 0
|
2019-07-23 15:49:57 -07:00
|
|
|
comics_in_db += 1
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("total number of comics: "+str(total_comics))
|
|
|
|
current_app.logger.info("comics in database: "+str(comics_in_db))
|
|
|
|
current_app.logger.info("number of comics added: "+str(comics_added))
|
2019-07-11 17:35:30 -07:00
|
|
|
comic_loaded.send("anonymous", meta=meta, thumbnails=thumbnails)
|
|
|
|
|
|
|
|
|
2019-07-23 15:49:57 -07:00
|
|
|
def get_comic(path):
|
|
|
|
meta = []
|
|
|
|
thumbnails = []
|
2019-07-30 15:19:03 -07:00
|
|
|
if path.endswith(".cbr"):
|
|
|
|
if not database.comic_path_in_db(path):
|
|
|
|
try:
|
|
|
|
test_path = path.encode("utf8")
|
|
|
|
except Exception as e:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("encoding failed on: "+path)
|
2019-07-30 15:19:03 -07:00
|
|
|
return
|
|
|
|
archive = open_comic(path)
|
|
|
|
md = archive.readCIX()
|
|
|
|
if md.publisher in publishers_to_ignore:
|
|
|
|
return
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info(path)
|
2019-07-30 15:19:03 -07:00
|
|
|
meta.append((path, md))
|
2020-03-19 19:25:44 -07:00
|
|
|
try:
|
|
|
|
thumbnails.append(get_comic_thumbnails(archive))
|
|
|
|
except Exception as e:
|
|
|
|
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
|
|
|
|
return
|
2019-07-30 15:19:03 -07:00
|
|
|
comic_loaded.send("anonymous", meta=meta, thumbnails=thumbnails)
|
2019-07-23 15:49:57 -07:00
|
|
|
|
|
|
|
|
2019-07-11 17:35:30 -07:00
|
|
|
def get_comic_thumbnails(comic):
|
|
|
|
thumbnails = []
|
2019-07-15 22:51:10 -07:00
|
|
|
size = "256x256"
|
|
|
|
new_height = 256
|
|
|
|
new_width = 256
|
2019-07-11 17:35:30 -07:00
|
|
|
for page in range(comic.getNumberOfPages()):
|
|
|
|
image_bytes = BytesIO(comic.getPage(page))
|
2019-07-15 22:51:10 -07:00
|
|
|
image = Image(file=image_bytes)
|
|
|
|
orig_height = image.height
|
|
|
|
orig_width = image.width
|
2019-07-25 00:02:54 -07:00
|
|
|
if orig_height >= orig_width:
|
2019-07-15 22:51:10 -07:00
|
|
|
width = int((orig_width/orig_height) * new_height)
|
|
|
|
height = new_height
|
2019-07-23 15:49:57 -07:00
|
|
|
else:
|
|
|
|
height = int((orig_height/orig_width) * new_width)
|
|
|
|
width = new_width
|
2019-07-15 22:51:10 -07:00
|
|
|
image.thumbnail(width, height)
|
|
|
|
thumbnails.append((image.make_blob(), "image/"+image.format))
|
2019-07-11 17:35:30 -07:00
|
|
|
return thumbnails
|
|
|
|
|
|
|
|
|
|
|
|
def open_comic(path):
|
2019-07-15 22:51:10 -07:00
|
|
|
archive = comicarchive.ComicArchive(path, default_image_path="static/images/icon.png")
|
2019-07-11 17:35:30 -07:00
|
|
|
return archive
|
2019-07-06 23:00:00 -07:00
|
|
|
|
|
|
|
|
2019-07-23 15:49:57 -07:00
|
|
|
def get_movies():
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("start load movies")
|
2020-03-27 17:08:03 -07:00
|
|
|
pattern = r"(?P<title>.+) \((?P<year>\d+)\)(?P<extended>\(extended\))?(?P<directors_cut> Director's Cut)?(?P<extension>\.mkv)"
|
2019-07-23 15:49:57 -07:00
|
|
|
movies = []
|
|
|
|
total_movies = 0
|
|
|
|
movies_in_db = 0
|
|
|
|
movies_added = 0
|
|
|
|
for root, dirs, files in os.walk(MOVIES_DIRECTORY):
|
|
|
|
for f in files:
|
|
|
|
if f.endswith(".mkv"):
|
2019-07-25 00:02:54 -07:00
|
|
|
total_movies += 1
|
2019-07-23 15:49:57 -07:00
|
|
|
path = os.path.join(root, f)
|
|
|
|
if not database.movie_path_in_db(path):
|
|
|
|
try:
|
2020-03-27 17:08:03 -07:00
|
|
|
match = re.match(pattern, f)
|
2019-07-23 15:49:57 -07:00
|
|
|
if not match:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info(f+" did not match regex.")
|
2019-07-23 15:49:57 -07:00
|
|
|
continue
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("movie path: "+path)
|
2020-03-27 17:08:03 -07:00
|
|
|
title = match.group("title")
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("movie title: "+title)
|
2020-03-27 17:08:03 -07:00
|
|
|
year = int(match.group("year"))
|
2019-08-22 10:43:14 -07:00
|
|
|
extended = False
|
|
|
|
directors_cut = False
|
2020-03-27 17:08:03 -07:00
|
|
|
if match.group("extended"):
|
2019-08-22 10:43:14 -07:00
|
|
|
extended = True
|
2020-03-27 17:08:03 -07:00
|
|
|
imdb_data = database.imdb_get_movie(title.replace(match.group("extended"), ""), year)
|
|
|
|
elif match.group("directors_cut"):
|
|
|
|
imdb_data = database.imdb_get_movie(title.replace(match.group("directors_cut"), ""), year)
|
2019-08-22 10:43:14 -07:00
|
|
|
directors_cut = True
|
2019-07-23 15:49:57 -07:00
|
|
|
else:
|
|
|
|
imdb_data = database.imdb_get_movie(title, year)
|
|
|
|
if not imdb_data:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("could not get imdb data for: "+title+" "+str(year))
|
2019-07-23 15:49:57 -07:00
|
|
|
continue
|
|
|
|
imdb_id = imdb_data["tconst"]
|
|
|
|
length = imdb_data["runtimeMinutes"]
|
|
|
|
|
|
|
|
tmdb_data = database.tmdb_get_movie_by_imdb_id(imdb_id)
|
|
|
|
if not tmdb_data:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("could not get tmdb data")
|
2019-07-23 15:49:57 -07:00
|
|
|
continue
|
|
|
|
tmdb_id = tmdb_data[0]
|
|
|
|
description = tmdb_data[1]
|
|
|
|
poster_path = tmdb_data[2]
|
|
|
|
backdrop_path = tmdb_data[3]
|
2019-07-25 00:02:54 -07:00
|
|
|
movies_added += 1
|
2019-07-23 15:49:57 -07:00
|
|
|
|
|
|
|
movies.append((path, imdb_id, tmdb_id, title, year, length, description, extended, directors_cut, poster_path, backdrop_path))
|
|
|
|
if len(movies) >= 20:
|
|
|
|
movie_loaded.send("anonymous", movies=movies.copy())
|
|
|
|
movies.clear()
|
|
|
|
except Exception as e:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) +" "+ str(e))
|
|
|
|
# print(e)
|
2019-07-25 00:02:54 -07:00
|
|
|
movies_in_db += 1
|
2019-07-23 15:49:57 -07:00
|
|
|
movie_loaded.send("anonymous", movies=movies)
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("finish load movies")
|
|
|
|
current_app.logger.info("total movies: "+str(total_movies))
|
|
|
|
current_app.logger.info("movies in database: "+str(movies_in_db))
|
|
|
|
current_app.logger.info("movies added: "+str(movies_added))
|
2019-07-30 15:19:03 -07:00
|
|
|
|
|
|
|
|
|
|
|
def get_tv_shows():
|
2020-03-27 17:08:03 -07:00
|
|
|
dir_pattern = r"(?P<title>.+) \((?P<year>\d+)\)"
|
2019-07-30 15:19:03 -07:00
|
|
|
for dir in sorted(os.listdir(TV_SHOWS_DIRECTORY)):
|
2020-03-27 17:08:03 -07:00
|
|
|
dir_match = re.match(dir_pattern, dir)
|
2019-07-30 15:19:03 -07:00
|
|
|
if dir_match:
|
|
|
|
path = TV_SHOWS_DIRECTORY+dir+"/"
|
|
|
|
if not database.tv_show_path_in_db(path):
|
2020-03-27 17:08:03 -07:00
|
|
|
series_name = dir_match.group("title")
|
|
|
|
series_year = int(dir_match.group("year"))
|
2019-07-30 15:19:03 -07:00
|
|
|
imdb_data = database.imdb_get_tv_show(series_name, series_year)
|
|
|
|
if not imdb_data:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("could not get imdb data for:"+series_name+" "+str(series_year))
|
|
|
|
# print("could not get imdb data for:", series_name, series_year)
|
2019-07-30 15:19:03 -07:00
|
|
|
continue
|
|
|
|
imdb_id = imdb_data["tconst"]
|
|
|
|
tmdb_data = database.tmdb_get_tv_show_by_imdb_id(imdb_id)
|
|
|
|
if not tmdb_data:
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("could not get tmdb data for:" + series_name + " " + str(series_year))
|
|
|
|
# print("could not get tmdb data for:", series_name, series_year)
|
2020-03-27 17:08:03 -07:00
|
|
|
with open("/var/lib/rpiWebApp/log.txt", "a") as f:
|
2019-07-30 15:19:03 -07:00
|
|
|
f.write("could not get tmdb data for: " + imdb_id + " " + series_name + " " + str(series_year)+"\n")
|
|
|
|
continue
|
|
|
|
tmdb_id = tmdb_data[0]
|
|
|
|
description = tmdb_data[1]
|
|
|
|
poster_path = tmdb_data[2]
|
|
|
|
tv_show_data = (imdb_id, tmdb_id, series_name, series_year, description, poster_path, path)
|
|
|
|
tv_show_loaded.send("anonymous", tv_show=tv_show_data)
|
2019-08-23 21:34:42 -07:00
|
|
|
current_app.logger.info("finished load tv shows.")
|
2019-07-30 15:19:03 -07:00
|
|
|
|
|
|
|
|
|
|
|
def get_tv_episodes():
|
2020-03-19 19:25:44 -07:00
|
|
|
try:
|
2020-03-27 17:08:03 -07:00
|
|
|
video_pattern = r"S(?P<season>\d+)E(?P<episode>\d+) - (?P<title>.+)(?P<extension>.mp4|.mkv)"
|
2020-03-19 19:25:44 -07:00
|
|
|
rows = database.get_all_tv_shows()
|
|
|
|
for tv_show in rows:
|
|
|
|
episodes = []
|
|
|
|
for video in sorted(os.listdir(tv_show.path)):
|
2020-03-27 17:08:03 -07:00
|
|
|
video_match = re.match(video_pattern, video)
|
2020-03-19 19:25:44 -07:00
|
|
|
if video_match:
|
|
|
|
path = tv_show.path + video
|
|
|
|
if not database.tv_episode_path_in_db(path):
|
2020-03-27 17:08:03 -07:00
|
|
|
season = int(video_match.group("season"))
|
|
|
|
episode = int(video_match.group("episode"))
|
|
|
|
episode_name = video_match.group("title")
|
2020-03-19 19:25:44 -07:00
|
|
|
episode_imdb_data = database.imdb_get_tv_episode(tv_show.imdb_id, season, episode)
|
|
|
|
if not episode_imdb_data:
|
|
|
|
current_app.logger.info("could not get imdb data for: "+tv_show.title+" "+str(tv_show.year)+" "+str(season)+" "+str(episode))
|
|
|
|
print("could not get imdb data for:", tv_show.title, tv_show.year, season, episode)
|
|
|
|
continue
|
|
|
|
episode_imdb_id = episode_imdb_data["tconst"]
|
|
|
|
episode_tmdb_data = database.tmdb_get_tv_episode_by_imdb_id(episode_imdb_id)
|
|
|
|
if not episode_tmdb_data:
|
|
|
|
current_app.logger.info("could not get tmdb data for: "+tv_show.title+" "+str(tv_show.year)+" "+str(season)+" "+str(episode))
|
2020-03-27 17:08:03 -07:00
|
|
|
with open("/var/lib/rpiWebApp/log.txt", "w") as f:
|
2020-03-19 19:25:44 -07:00
|
|
|
f.write("could not get tmdb data for: " + episode_imdb_id + " " + tv_show.title + " " + str(
|
|
|
|
tv_show.year) + " " + str(season) + " " + str(episode) + "\n")
|
|
|
|
continue
|
|
|
|
episode_tmdb_id = episode_tmdb_data[0]
|
|
|
|
episode_description = episode_tmdb_data[1]
|
|
|
|
episode_still_path = episode_tmdb_data[2]
|
|
|
|
episodes.append((episode_imdb_id, tv_show.imdb_id, episode_tmdb_id, episode_name, season, episode,
|
|
|
|
episode_description, episode_still_path, path))
|
|
|
|
tv_episodes_loaded.send("anonymous", tv_episodes=episodes)
|
|
|
|
current_app.logger.info("finished load tv episodes.")
|
|
|
|
except Exception as e:
|
|
|
|
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
|
|
|
|
|