rpiwebapp-public/scripts/func.py

479 lines
18 KiB
Python
Raw Normal View History

2019-08-23 21:34:42 -07:00
from flask import current_app
2019-07-06 23:00:00 -07:00
from comicapi import comicarchive
from blinker import Namespace
from datetime import timedelta
from io import BytesIO
from wand.image import Image
import os, re
2019-08-23 21:34:42 -07:00
import inspect
import json
import enzyme
import requests
2019-07-06 23:00:00 -07:00
from scripts import database
rpi_signals = Namespace()
comic_loaded = rpi_signals.signal("comic-loaded")
movie_loaded = rpi_signals.signal("movie-loaded")
2019-07-30 15:19:03 -07:00
tv_show_loaded = rpi_signals.signal("tv_show_loaded")
tv_episodes_loaded = rpi_signals.signal("tv_episodes_loaded")
games_loaded = rpi_signals.signal("games_loaded")
2019-07-06 23:00:00 -07:00
publishers_to_ignore = ["***REMOVED***"]
2019-07-06 23:00:00 -07:00
# Directories
RPI_COMICS_DIRECTORY = "/usb/storage/media/Comics/"
RPI_MOVIES_DIRECTORY = "/usb/storage/media/Videos/Movies/"
RPI_TV_SHOWS_DIRECTORY = "/usb/storage/media/Videos/TV/"
RPI_VIDEOS_DIRECTORY = "/usb/storage/media/Videos/Videos/"
RPI_GAMES_DIRECTORY = "/usb/storage/Games/"
RPI_MUSIC_DIRECTORY = "/usb/storage/media/Music/"
MC_COMICS_DIRECTORY = "/mnt/c/Users/Matthew/Documents/Comics/"
MC_MOVIES_DIRECTORY = "/mnt/c/Users/Matthew/Documents/Movies/"
MC_TV_SHOWS_DIRECTORY = "/mnt/c/Users/Matthew/Documents/TV/"
MC_GAMES_DIRECTORY = "/mnt/g/Humble Bundle/rpi/"
COMICS_DIRECTORY = RPI_COMICS_DIRECTORY if os.path.exists(RPI_COMICS_DIRECTORY) else MC_COMICS_DIRECTORY
MOVIES_DIRECTORY = RPI_MOVIES_DIRECTORY if os.path.exists(RPI_MOVIES_DIRECTORY) else MC_MOVIES_DIRECTORY
TV_SHOWS_DIRECTORY = RPI_TV_SHOWS_DIRECTORY if os.path.exists(RPI_TV_SHOWS_DIRECTORY) else MC_TV_SHOWS_DIRECTORY
GAMES_DIRECTORY = RPI_GAMES_DIRECTORY if os.path.exists(RPI_GAMES_DIRECTORY) else MC_GAMES_DIRECTORY
2019-07-06 23:00:00 -07:00
#############
def get_comics():
total_comics = 0
comics_in_db = 0
comics_added = 0
2019-07-06 23:00:00 -07:00
meta = []
thumbnails = []
2019-07-06 23:00:00 -07:00
i = 0
for root, dirs, files in os.walk(COMICS_DIRECTORY):
for f in files:
if "temp" in root:
continue
2019-07-06 23:00:00 -07:00
if f.endswith(".cbr"):
total_comics += 1
2019-07-06 23:00:00 -07:00
path = os.path.join(root, f)
if not database.comic_path_in_db(path):
try:
test_path = path.encode("utf8")
except Exception as e:
2019-08-23 21:34:42 -07:00
current_app.logger.info("encoding failed on: "+path)
continue
archive = open_comic(path)
md = archive.readCIX()
if md.publisher in publishers_to_ignore:
continue
2019-08-23 21:34:42 -07:00
current_app.logger.info(path)
try:
meta.append((path, md))
thumbnails.append(get_comic_thumbnails(archive))
comics_added += 1
i += 1
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
continue
if i >= 2:
comic_loaded.send("anonymous", meta=meta.copy(), thumbnails=thumbnails.copy())
2019-07-06 23:00:00 -07:00
meta.clear()
thumbnails.clear()
2019-07-06 23:00:00 -07:00
i = 0
comics_in_db += 1
2019-08-23 21:34:42 -07:00
current_app.logger.info("total number of comics: "+str(total_comics))
current_app.logger.info("comics in database: "+str(comics_in_db))
current_app.logger.info("number of comics added: "+str(comics_added))
comic_loaded.send("anonymous", meta=meta, thumbnails=thumbnails)
def get_comic(path):
meta = []
thumbnails = []
2019-07-30 15:19:03 -07:00
if path.endswith(".cbr"):
if not database.comic_path_in_db(path):
try:
test_path = path.encode("utf8")
except Exception as e:
2019-08-23 21:34:42 -07:00
current_app.logger.info("encoding failed on: "+path)
2019-07-30 15:19:03 -07:00
return
archive = open_comic(path)
md = archive.readCIX()
if md.publisher in publishers_to_ignore:
return
2019-08-23 21:34:42 -07:00
current_app.logger.info(path)
2019-07-30 15:19:03 -07:00
meta.append((path, md))
try:
thumbnails.append(get_comic_thumbnails(archive))
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
return
2019-07-30 15:19:03 -07:00
comic_loaded.send("anonymous", meta=meta, thumbnails=thumbnails)
def get_comic_thumbnails(comic):
thumbnails = []
size = "256x256"
new_height = 256
new_width = 256
for page in range(comic.getNumberOfPages()):
image_bytes = BytesIO(comic.getPage(page))
image = Image(file=image_bytes)
orig_height = image.height
orig_width = image.width
if orig_height >= orig_width:
width = int((orig_width/orig_height) * new_height)
height = new_height
else:
height = int((orig_height/orig_width) * new_width)
width = new_width
image.thumbnail(width, height)
thumbnails.append((image.make_blob(), "image/"+image.format))
return thumbnails
def open_comic(path):
archive = comicarchive.ComicArchive(path, default_image_path="static/images/icon.png")
return archive
2019-07-06 23:00:00 -07:00
def get_movies():
2019-08-23 21:34:42 -07:00
current_app.logger.info("start load movies")
pattern = r"(?P<title>.+) \((?P<year>\d+)\)(?P<extended>\(extended\))?(?P<directors_cut> Director's Cut)?(?P<extension>\.mkv)"
movies = []
total_movies = 0
movies_in_db = 0
movies_added = 0
for root, dirs, files in os.walk(MOVIES_DIRECTORY):
for f in files:
if f.endswith(".mkv"):
total_movies += 1
path = os.path.join(root, f)
if not database.movie_path_in_db(path):
try:
match = re.match(pattern, f)
if not match:
2019-08-23 21:34:42 -07:00
current_app.logger.info(f+" did not match regex.")
continue
2019-08-23 21:34:42 -07:00
current_app.logger.info("movie path: "+path)
title = match.group("title")
2019-08-23 21:34:42 -07:00
current_app.logger.info("movie title: "+title)
year = int(match.group("year"))
extended = False
directors_cut = False
if match.group("extended"):
extended = True
imdb_data = database.imdb_get_movie(title.replace(match.group("extended"), ""), year)
elif match.group("directors_cut"):
imdb_data = database.imdb_get_movie(title.replace(match.group("directors_cut"), ""), year)
directors_cut = True
else:
imdb_data = database.imdb_get_movie(title, year)
if not imdb_data:
2019-08-23 21:34:42 -07:00
current_app.logger.info("could not get imdb data for: "+title+" "+str(year))
continue
imdb_id = imdb_data["tconst"]
length = imdb_data["runtimeMinutes"]
tmdb_data = database.tmdb_get_movie_by_imdb_id(imdb_id)
if not tmdb_data:
2019-08-23 21:34:42 -07:00
current_app.logger.info("could not get tmdb data")
continue
tmdb_id = tmdb_data[0]
description = tmdb_data[1]
poster_path = tmdb_data[2]
backdrop_path = tmdb_data[3]
movies_added += 1
movies.append((path, imdb_id, tmdb_id, title, year, length, description, extended, directors_cut, poster_path, backdrop_path))
if len(movies) >= 20:
movie_loaded.send("anonymous", movies=movies.copy())
movies.clear()
except Exception as e:
2019-08-23 21:34:42 -07:00
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) +" "+ str(e))
# print(e)
movies_in_db += 1
movie_loaded.send("anonymous", movies=movies)
2019-08-23 21:34:42 -07:00
current_app.logger.info("finish load movies")
current_app.logger.info("total movies: "+str(total_movies))
current_app.logger.info("movies in database: "+str(movies_in_db))
current_app.logger.info("movies added: "+str(movies_added))
2019-07-30 15:19:03 -07:00
def get_movie(path):
pattern = r"(?P<title>.+) \((?P<year>\d+)\)(?P<extended>\(extended\))?(?P<directors_cut> Director's Cut)?(?P<extension>\.mkv)"
movies = []
if not database.movie_path_in_db(path):
try:
match = re.match(pattern, path)
if not match:
current_app.logger.info(path + " did not match regex.")
return
current_app.logger.info("movie path: " + path)
title = match.group("title")
current_app.logger.info("movie title: " + title)
year = int(match.group("year"))
extended = False
directors_cut = False
if match.group("extended"):
extended = True
imdb_data = database.imdb_get_movie(title.replace(match.group("extended"), ""), year)
elif match.group("directors_cut"):
imdb_data = database.imdb_get_movie(title.replace(match.group("directors_cut"), ""), year)
directors_cut = True
else:
imdb_data = database.imdb_get_movie(title, year)
if not imdb_data:
current_app.logger.info("could not get imdb data for: " + title + " " + str(year))
return
imdb_id = imdb_data["tconst"]
length = imdb_data["runtimeMinutes"]
tmdb_data = database.tmdb_get_movie_by_imdb_id(imdb_id)
if not tmdb_data:
current_app.logger.info("could not get tmdb data")
return
tmdb_id = tmdb_data[0]
description = tmdb_data[1]
poster_path = tmdb_data[2]
backdrop_path = tmdb_data[3]
movies.append((path, imdb_id, tmdb_id, title, year, length, description, extended, directors_cut,
poster_path, backdrop_path))
movie_loaded.send("anonymous", movies=movies.copy())
movies.clear()
current_app.logger.info("finish load movie")
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
2019-07-30 15:19:03 -07:00
def get_tv_shows():
dir_pattern = r"(?P<title>.+) \((?P<year>\d+)\)"
2019-07-30 15:19:03 -07:00
for dir in sorted(os.listdir(TV_SHOWS_DIRECTORY)):
dir_match = re.match(dir_pattern, dir)
2019-07-30 15:19:03 -07:00
if dir_match:
path = TV_SHOWS_DIRECTORY+dir
2019-07-30 15:19:03 -07:00
if not database.tv_show_path_in_db(path):
info = {}
if os.path.exists(path+"/info.json"):
with open(path+"/info.json") as f:
info = json.load(f)
series_name = dir_match.group("title")
series_year = int(dir_match.group("year"))
imdb_data = database.imdb_get_tv_show(series_name, series_year, info)
2019-07-30 15:19:03 -07:00
if not imdb_data:
2019-08-23 21:34:42 -07:00
current_app.logger.info("could not get imdb data for:"+series_name+" "+str(series_year))
# print("could not get imdb data for:", series_name, series_year)
2019-07-30 15:19:03 -07:00
continue
imdb_id = imdb_data["tconst"]
tmdb_data = database.tmdb_get_tv_show_by_imdb_id(imdb_id)
if not tmdb_data:
2019-08-23 21:34:42 -07:00
current_app.logger.info("could not get tmdb data for:" + series_name + " " + str(series_year))
# print("could not get tmdb data for:", series_name, series_year)
with open("/var/lib/rpiWebApp/log.txt", "a") as f:
2019-07-30 15:19:03 -07:00
f.write("could not get tmdb data for: " + imdb_id + " " + series_name + " " + str(series_year)+"\n")
continue
tmdb_id = tmdb_data[0]
description = tmdb_data[1]
poster_path = tmdb_data[2]
tv_show_data = (imdb_id, tmdb_id, series_name, series_year, description, poster_path, path)
tv_show_loaded.send("anonymous", tv_show=tv_show_data)
2019-08-23 21:34:42 -07:00
current_app.logger.info("finished load tv shows.")
2019-07-30 15:19:03 -07:00
def get_tv_episodes():
try:
video_pattern = r"S(?P<season>\d+)E(?P<episode>\d+) - (?P<title>.+)(?P<extension>.mp4|.mkv)"
rows = database.get_all_tv_shows()
for tv_show in rows:
episodes = []
for video in sorted(os.listdir(tv_show.path)):
video_match = re.match(video_pattern, video)
if video_match:
path = os.path.join(tv_show.path, video)
if not database.tv_episode_path_in_db(path):
season = int(video_match.group("season"))
episode = int(video_match.group("episode"))
episode_name = video_match.group("title")
episode_imdb_data = database.imdb_get_tv_episode(tv_show.imdb_id, season, episode)
if not episode_imdb_data:
current_app.logger.info("could not get imdb data for: "+tv_show.title+" "+str(tv_show.year)+" "+str(season)+" "+str(episode))
print("could not get imdb data for:", tv_show.title, tv_show.year, season, episode)
continue
episode_imdb_id = episode_imdb_data["tconst"]
episode_tmdb_data = database.tmdb_get_tv_episode_by_imdb_id(episode_imdb_id)
if not episode_tmdb_data:
current_app.logger.info("could not get tmdb data for: "+tv_show.title+" "+str(tv_show.year)+" "+str(season)+" "+str(episode))
with open("/var/lib/rpiWebApp/log.txt", "w") as f:
f.write("could not get tmdb data for: " + episode_imdb_id + " " + tv_show.title + " " + str(
tv_show.year) + " " + str(season) + " " + str(episode) + "\n")
continue
episode_tmdb_id = episode_tmdb_data[0]
episode_description = episode_tmdb_data[1]
episode_still_path = episode_tmdb_data[2]
episodes.append((episode_imdb_id, tv_show.imdb_id, episode_tmdb_id, episode_name, season, episode,
episode_description, episode_still_path, path))
tv_episodes_loaded.send("anonymous", tv_episodes=episodes)
current_app.logger.info("finished load tv episodes")
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
def get_tv_episode(path):
folder, name = os.path.split(path)
video_pattern = r"S(?P<season>\d+)E(?P<episode>\d+) - (?P<title>.+)(?P<extension>.mp4|.mkv)"
video_match = re.match(video_pattern, name)
if video_match:
rows = database.get_all_tv_shows()
for tv_show in rows:
if folder == tv_show.path:
if not database.tv_episode_path_in_db(path):
episodes = []
season = int(video_match.group("season"))
episode = int(video_match.group("episode"))
episode_name = video_match.group("title")
episode_imdb_data = database.imdb_get_tv_episode(tv_show.imdb_id, season, episode)
if not episode_imdb_data:
current_app.logger.info(
"could not get imdb data for: " + tv_show.title + " " + str(tv_show.year) + " " + str(
season) + " " + str(episode))
print("could not get imdb data for:", tv_show.title, tv_show.year, season, episode)
return
episode_imdb_id = episode_imdb_data["tconst"]
episode_tmdb_data = database.tmdb_get_tv_episode_by_imdb_id(episode_imdb_id)
if not episode_tmdb_data:
current_app.logger.info(
"could not get tmdb data for: " + tv_show.title + " " + str(tv_show.year) + " " + str(
season) + " " + str(episode))
with open("/var/lib/rpiWebApp/log.txt", "w") as f:
f.write("could not get tmdb data for: " + episode_imdb_id + " " + tv_show.title + " " + str(
tv_show.year) + " " + str(season) + " " + str(episode) + "\n")
return
episode_tmdb_id = episode_tmdb_data[0]
episode_description = episode_tmdb_data[1]
episode_still_path = episode_tmdb_data[2]
episodes.append((episode_imdb_id, tv_show.imdb_id, episode_tmdb_id, episode_name, season, episode,
episode_description, episode_still_path, path))
tv_episodes_loaded.send("anonymous", tv_episodes=episodes)
current_app.logger.info("finished load tv episode")
def get_chapters(path):
try:
with open(path, 'rb') as f:
mkv = enzyme.MKV(f)
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
mkv_info = {}
for chapter in mkv.chapters:
if chapter.string == "Intro":
mkv_info["intro"] = {
"start": chapter.start.seconds,
"end": timedelta(microseconds=chapter.end//1000).seconds
}
if chapter.string == "Credits":
mkv_info["credits"] = {"start": chapter.start.seconds}
if chapter.string == "end-credit scene":
if "end-credit scene" not in mkv_info.keys():
mkv_info["end-credit scene"] = []
end_credit = {"start": chapter.start.seconds}
if chapter.end:
end_credit["end"] = timedelta(microseconds=chapter.end//1000).seconds
mkv_info["end-credit scene"].append(end_credit)
return mkv_info
def get_tags(path):
try:
with open(path, 'rb') as f:
mkv = enzyme.MKV(f)
except Exception as e:
current_app.logger.info(inspect.stack()[0][3] + " " + str(type(e)) + " " + str(e))
mkv_info = {}
for tag in mkv.tags:
if tag.targets.data[0].data == 70:
mkv_info["collection"] = {}
for simple in tag.simpletags:
if simple.name == "TITLE":
mkv_info["collection"]["title"] = simple.string
if simple.name == "TOTAL_PARTS":
mkv_info["collection"]["episodes"] = int(simple.string)
if simple.name == "KEYWORDS":
mkv_info["collection"]["key_words"] = simple.string.split(",")
if simple.name == "DATE_RELEASED":
mkv_info["collection"]["year"] = int(simple.string)
if simple.name == "SUMMARY":
mkv_info["collection"]["summary"] = simple.string
if tag.targets.data[0].data == 60:
mkv_info["season"] = {}
for simple in tag.simpletags:
if simple.name == "TITLE":
mkv_info["season"]["title"] = simple.string
if simple.name == "TOTAL_PARTS":
mkv_info["season"]["episodes"] = int(simple.string)
if tag.targets.data[0].data == 50:
mkv_info["movie"] = {}
for simple in tag.simpletags:
if simple.name == "TITLE":
mkv_info["movie"]["title"] = simple.string
if simple.name == "DATE_RELEASED":
mkv_info["movie"]["year"] = int(simple.string)
if simple.name == "PART_NUMBER":
mkv_info["movie"]["episode"] = int(simple.string)
if simple.name == "KEYWORDS":
mkv_info["movie"]["key_words"] = simple.string.split(",")
if simple.name == "SUMMARY":
mkv_info["movie"]["summary"] = simple.string
return mkv_info
def get_games():
games = []
cover_url = "https://api-v3.igdb.com/covers"
games_url = "https://api-v3.igdb.com/games"
headers = {
"accept": "application/json",
"user-key": "641f7f0e3af5273dcc1105ce851ea804"
}
i = 0
for folder in sorted(os.listdir(GAMES_DIRECTORY), key=str.casefold):
root = os.path.join(GAMES_DIRECTORY, folder)
if os.path.isdir(os.path.join(root)):
path = os.path.join(root, "info.json")
with open(path, "r") as f:
info = json.load(f)
game_id = info["id"]
if not database.game_in_db(game_id):
current_app.logger.info(f"start loading game: {info['name']}:{info['id']}")
data = f"fields summary;limit 1;where id={game_id};"
r = requests.get(games_url, headers=headers, data=data).json()[0]
description = ""
if "summary" in r.keys():
description = r["summary"]
data = f"fields image_id;limit 1;where game={game_id};"
r = requests.get(cover_url, headers=headers, data=data).json()
poster_path = None
if r:
if "image_id" in r[0].keys():
poster_path = "https://images.igdb.com/igdb/image/upload/t_cover_big/" + r[0]["image_id"] + ".jpg"
windows = None
mac = None
linux = None
if "windows" in info.keys():
windows = info["windows"]
if "mac" in info.keys():
mac = info["mac"]
if "linux" in info.keys():
linux = info["linux"]
game = (info["name"], game_id, description, poster_path, windows, mac, linux)
games.append(game)
i += 1
if i >= 5:
games_loaded.send("anonymous", games=games.copy())
games.clear()
i = 0
games_loaded.send("anonymous", games=games)
current_app.logger.info("finished loading games")