rpiwebapp-public/test.py

82 lines
3.8 KiB
Python
Raw Normal View History

2019-07-06 23:00:00 -07:00
import threading
import time
import os
import sqlite3
from comicapi import comicarchive
from comicapi.issuestring import IssueString
from urllib import parse
from io import BytesIO
from PIL import Image
import datetime, pytz
from werkzeug.security import generate_password_hash
2019-07-06 23:00:00 -07:00
os.environ["UNRAR_LIB_PATH"] = "C:\\Program Files (x86)\\UnrarDLL\\UnRAR.dll"
2019-07-06 23:00:00 -07:00
DATABASE = "***REMOVED***"
2019-07-06 23:00:00 -07:00
db = sqlite3.connect(DATABASE)
db.row_factory = sqlite3.Row
DC_Comics = ["DC", "Young Animal", "WildStorm", "Earth-M", "Vertigo", "Sandman Universe", "DC Black Label", "Wonder Comics", "DC Ink", "DC Zoom", "Mad", "All Star", "Amalgam Comics", "DC Focus", "Elseworlds", "First Wave", "Helix", "Impact Comics", "Johnny DC", "Minx", "Paradox Press", "Piranha Press", "Tangent Comics", "WildStorm Productions", "America's Best Comics", "Cliffhanger", "CMX Manga", "Homage Comics", "WildStorm Signature", "Zuda Comics"]
Marvel = ["Aircel Comics", "alibu Comics", "Atlas Comics", "Atlas", "CrossGen comics", "CrossGen", "Curtis Magazines", "Disney Books Group", "Disney Kingdoms", "Epic Comics", "Eternity Comics", "Humorama", "Icon Comics", "Infinite Comics", "Malibu Comics", "Marvel 2099", "Marvel Absurd", "Marvel Adventures", "Marvel Age", "Marvel Books", "Marvel Comics 2", "Marvel Comics", "Marvel Edge", "Marvel Frontier", "Marvel Illustrated", "Marvel Knights", "Marvel Magazine Group", "Marvel Mangaverse", "Marvel Monsters Group", "Marvel Music", "Marvel Next", "Marvel Noir", "Marvel Press", "Marvel UK", "Marvel Unlimited", "Max", "MAX", "MC2", "New Universe", "Paramount Comics", "Power Comics", "Razorline", "Star Comics", "Timely Comics", "Timely", "Tsunami", "Ultimate Comics", "Ultimate Marvel"]
2019-07-06 23:00:00 -07:00
def get_db():
return db
def db_search_table_columns_by_query(query, table, columns, group="", order=""):
results = {}
final_query = "%"+query.replace(" ", "%")+"%"
sqlite_base_statement = "SELECT * FROM "+table+" WHERE {condition} {group} {order}"
if not group == "":
group = "GROUP BY "+group
if not order == "":
order = "ORDER BY "+order
for column in columns:
sqlite_statement = sqlite_base_statement.format(condition=column+" LIKE '"+final_query+"'", group=group, order=order)
results[column] = get_db().execute(sqlite_statement).fetchall()
# sqlite_condition = ""
# for column in columns:
# sqlite_condition += column+" LIKE '"+final_query+"'"+(" OR " if column != columns[-1] else "")
# sqlite_statement = "SELECT * FROM {table} WHERE {condition}".format(table=table, condition=sqlite_condition)
# rows = get_db().execute(sqlite_statement).fetchall()
return results
def db_search_comics(query):
publishers = []
series = []
comics = []
results = db_search_table_columns_by_query(query, "comics", ["publisher", "title", "series", "year"])
series_results = db_search_table_columns_by_query(query, "comics", ["publisher", "title", "series", "year"],
group="series, seriesYear", order="issue")
for row in results["publisher"]:
if row["publisher"] not in publishers:
publishers.append(row["publisher"])
for row in series_results["series"]:
dict = {"publisher": row["publisher"],"series": row["series"],"seriesYear": row["seriesYear"],"id": row["id"]}
if dict not in series:
series.append(dict)
for row in results["title"]:
dict = {"publisher": row["publisher"],"series": row["series"],"seriesYear": row["seriesYear"],"issue": row["issue"],"id": row["id"],"title": row["title"]}
if dict not in comics:
comics.append(dict)
for row in results["year"]:
dict = {"publisher": row["publisher"],"series": row["series"],"seriesYear": row["seriesYear"],"issue": row["issue"],"id": row["id"],"title": row["title"]}
if dict not in comics:
comics.append(dict)
return {"publishers": publishers, "series": series, "comics": comics}
results = db_search_comics("ar")
for key in results.keys():
print(key, results[key])