Load and save queue and history files

Switch to using an additional filename and sub-directory field
Allow status in json decode/encode
Switch to using string for url instead of url.URL
use log instead of fmt for logging
Add basic status handlers for the queue and history
Add HTTP timeouts
Implement cookie handling
Ignore TempPath and FilePath when adding URLs, they are absolute paths
Ignore Status when adding URLs and status is not Paused
When determining the filename use the path from the final redirect
Use the correct TempPath when downloading
Actually add requests to the queue before starting them
This commit is contained in:
lordwelch 2020-12-13 01:05:17 -08:00
parent b1079eab13
commit bd0c3d2cc6
3 changed files with 310 additions and 181 deletions

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"mime" "mime"
"net" "net"
"net/http" "net/http"
@ -60,27 +61,31 @@ type Downloader struct {
Jar http.CookieJar Jar http.CookieJar
MaxActiveDownloads int MaxActiveDownloads int
Server *http.Server Server *http.Server
downloads RequestQueue Downloads RequestQueue
history RequestQueue History RequestQueue
NewRequest chan Request NewRequest chan Request
requestDone chan *Request requestDone chan *Request
OnComplete func(r Request)
OnAdd func(r Request)
} }
type Request struct { type Request struct {
URL url.URL `json:"url"` URL string `json:"url"`
Cookies []http.Cookie `json:"cookies"` Cookies []http.Cookie `json:"cookies"`
ForceDownload bool `json:"forceDownload"` ForceDownload bool `json:"forceDownload"`
Status Status `json:"-"` Status Status `json:"Status"`
Priority Priority `json:"priority"` Priority Priority `json:"priority"`
Filepath string `json:"filepath"` FilePath string `json:"filepath"`
Filename string `json:"filename"`
Subdir string `json:"subdir"`
TempPath string `json:"tempPath"` TempPath string `json:"tempPath"`
Response *grab.Response `json:"-"` Response *grab.Response `json:"-"`
Error error `json:"-"` Error error `json:"-"`
CompletedDate time.Time CompletedDate time.Time `json:"completedDate"`
} }
type RequestQueue struct { type RequestQueue struct {
queue []*Request Queue []*Request
URLSort bool URLSort bool
DateSort bool DateSort bool
} }
@ -88,25 +93,25 @@ type RequestQueue struct {
func (rq RequestQueue) Less(i, j int) bool { func (rq RequestQueue) Less(i, j int) bool {
ii := 0 ii := 0
jj := 0 jj := 0
if rq.queue[i].ForceDownload { if rq.Queue[i].ForceDownload {
ii = 1 ii = 1
} }
if rq.queue[j].ForceDownload { if rq.Queue[j].ForceDownload {
jj = 1 jj = 1
} }
if ii < jj { if ii < jj {
return true return true
} }
if rq.queue[i].Priority < rq.queue[j].Priority { if rq.Queue[i].Priority < rq.Queue[j].Priority {
return true return true
} }
if rq.DateSort && rq.queue[i].CompletedDate.Before(rq.queue[j].CompletedDate) { if rq.DateSort && rq.Queue[i].CompletedDate.Before(rq.Queue[j].CompletedDate) {
return true return true
} }
if rq.URLSort && rq.queue[i].URL.String() < rq.queue[j].URL.String() { if rq.URLSort && rq.Queue[i].URL < rq.Queue[j].URL {
return true return true
} }
@ -114,27 +119,27 @@ func (rq RequestQueue) Less(i, j int) bool {
} }
func (rq RequestQueue) Len() int { func (rq RequestQueue) Len() int {
return len(rq.queue) return len(rq.Queue)
} }
func (rq RequestQueue) Swap(i, j int) { func (rq RequestQueue) Swap(i, j int) {
rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] rq.Queue[i], rq.Queue[j] = rq.Queue[j], rq.Queue[i]
} }
func (rq *RequestQueue) Pop(i int) *Request { func (rq *RequestQueue) Pop(i int) *Request {
r := rq.queue[i] r := rq.Queue[i]
copy(rq.queue[i:], rq.queue[i+1:]) copy(rq.Queue[i:], rq.Queue[i+1:])
rq.queue[len(rq.queue)-1] = nil rq.Queue[len(rq.Queue)-1] = nil
rq.queue = rq.queue[:len(rq.queue)-1] rq.Queue = rq.Queue[:len(rq.Queue)-1]
return r return r
} }
func (rq *RequestQueue) remove(r *Request) { func (rq *RequestQueue) remove(r *Request) {
for i, req := range rq.queue { for i, req := range rq.Queue {
if req == r { if req == r {
copy(rq.queue[i:], rq.queue[i+1:]) copy(rq.Queue[i:], rq.Queue[i+1:])
rq.queue[len(rq.queue)-1] = nil rq.Queue[len(rq.Queue)-1] = nil
rq.queue = rq.queue[:len(rq.queue)-1] rq.Queue = rq.Queue[:len(rq.Queue)-1]
break break
} }
} }
@ -182,7 +187,7 @@ func (d *Downloader) Start(network, address string) {
} }
if d.DataDir == "" { if d.DataDir == "" {
d.DataDir = "/perm/downloader" d.DataDir = "/perm/gloader"
} }
if d.DownloadDir == "" { if d.DownloadDir == "" {
@ -193,26 +198,102 @@ func (d *Downloader) Start(network, address string) {
d.CompleteDir = path.Join(d.DataDir, "Complete") d.CompleteDir = path.Join(d.DataDir, "Complete")
} }
fmt.Println(d.DataDir) log.Println(d.DataDir)
fmt.Println(d.DownloadDir) log.Println(d.DownloadDir)
fmt.Println(d.CompleteDir) log.Println(d.CompleteDir)
os.MkdirAll(d.DataDir, 0777) _ = os.MkdirAll(d.DataDir, 0777)
os.MkdirAll(d.DownloadDir, 0777) _ = os.MkdirAll(d.DownloadDir, 0777)
os.MkdirAll(d.CompleteDir, 0777) _ = os.MkdirAll(d.CompleteDir, 0777)
listener, err = net.Listen(network, address) listener, err = net.Listen(network, address)
if err != nil { if err != nil {
panic(err) panic(err)
} }
fmt.Println("adding /add handler") log.Println("adding /add handler")
// mux.HandleFunc("/", d.UI) // mux.HandleFunc("/", d.UI)
mux.HandleFunc("/add", d.restAddDownload) mux.HandleFunc("/add", d.restAddDownload)
mux.HandleFunc("/queue", d.restQueueStatus)
mux.HandleFunc("/history", d.restHistoryStatus)
mux.HandleFunc("/start", d.restStartDownload)
log.Println("starting main go routine")
d.Grab.HTTPClient = &http.Client{
Jar: d.Jar,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
fmt.Println("starting main go routine")
go d.download() go d.download()
fmt.Println("serving http server") log.Println("serving http server")
d.Server.Serve(listener) _ = d.Server.Serve(listener)
}
func (d *Downloader) restStartDownload(w http.ResponseWriter, r *http.Request) {
var (
err error
index struct {
index int
}
)
if r.Method != http.MethodPost {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Add("Allow", http.MethodPost)
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintln(w, "HTTP Error 405 Method Not Allowed\nOnly POST method is allowed")
log.Println("HTTP Error 405 Method Not Allowed\nOnly POST method is allowed")
return
}
err = json.NewDecoder(r.Body).Decode(index)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if index.index >= d.Downloads.Len() || index.index < 0 {
http.Error(w, fmt.Sprintf("slice index out of bounds. index: %d length of slice: %d", index.index, d.Downloads.Len()), http.StatusBadRequest)
return
}
d.startDownload(index.index)
}
func (d *Downloader) restHistoryStatus(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Add("Allow", http.MethodGet)
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintln(w, "HTTP Error 405 Method Not Allowed\nOnly GET method is allowed")
log.Println("HTTP Error 405 Method Not Allowed\nOnly GET method is allowed")
return
}
j := json.NewEncoder(w)
w.Header().Add("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
j.Encode(d.History.Queue)
}
func (d *Downloader) restQueueStatus(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Add("Allow", http.MethodGet)
w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintln(w, "HTTP Error 405 Method Not Allowed\nOnly GET method is allowed")
log.Println("HTTP Error 405 Method Not Allowed\nOnly GET method is allowed")
return
}
j := json.NewEncoder(w)
w.Header().Add("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
j.Encode(d.Downloads.Queue)
} }
func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) { func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
@ -226,7 +307,7 @@ func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Allow", http.MethodPost) w.Header().Add("Allow", http.MethodPost)
w.WriteHeader(http.StatusMethodNotAllowed) w.WriteHeader(http.StatusMethodNotAllowed)
fmt.Fprintln(w, "HTTP Error 405 Method Not Allowed\nOnly POST method is allowed") fmt.Fprintln(w, "HTTP Error 405 Method Not Allowed\nOnly POST method is allowed")
fmt.Println("HTTP Error 405 Method Not Allowed\nOnly POST method is allowed") log.Println("HTTP Error 405 Method Not Allowed\nOnly POST method is allowed")
return return
} }
// TODO fail only on individual requests // TODO fail only on individual requests
@ -236,14 +317,18 @@ func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
return return
} }
for _, req := range requests { for _, req := range requests {
req.TempPath = "" req.TempPath = "" // not allowed via REST API
fmt.Println("adding request", req.URL.String()) req.FilePath = "" // not allowed via REST API
if req.Status != Paused {
req.Status = Queued
}
log.Println("adding request", req.URL)
d.NewRequest <- req d.NewRequest <- req
} }
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
} }
func (d Downloader) getContentDispsition(r Request) string { func (d Downloader) getNameFromHEAD(r Request) string {
var ( var (
err error err error
re *http.Response re *http.Response
@ -252,20 +337,31 @@ func (d Downloader) getContentDispsition(r Request) string {
ht := &http.Client{ ht := &http.Client{
Jar: d.Jar, Jar: d.Jar,
Timeout: 30 * time.Second, Timeout: 30 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
} }
re, err = ht.Head(r.URL.String()) re, err = ht.Head(r.URL)
if err != nil { if err != nil {
return "" return ""
} }
if re.StatusCode < 200 || re.StatusCode > 299 {
return ""
}
re.Body.Close() re.Body.Close()
_, p, err = mime.ParseMediaType(re.Header.Get("Content-Disposition")) _, p, err = mime.ParseMediaType(re.Header.Get("Content-Disposition"))
if err != nil { if err == nil {
return "" if f, ok := p["filename"]; ok {
return f
}
} }
if f, ok := p["filename"]; ok { return path.Base(re.Request.URL.Path)
return f
}
return ""
} }
// getFilename checks the provided filepath // getFilename checks the provided filepath
@ -273,28 +369,29 @@ func (d Downloader) getContentDispsition(r Request) string {
// if not set uses the basename of the url // if not set uses the basename of the url
// and sanitizes the filename using github.com/lordwelch/pathvalidate // and sanitizes the filename using github.com/lordwelch/pathvalidate
func (d *Downloader) getFilename(r *Request) { func (d *Downloader) getFilename(r *Request) {
fmt.Println("Determining filename") log.Println("Determining filename")
r.Filepath = filepath.Clean(r.Filepath) r.Filename = filepath.Clean(r.Filename)
if r.Filepath == "." { if r.Filename == "." {
fmt.Println("filename is empty, testing head request") log.Println("filename is empty, testing head request")
r.Filepath = d.getContentDispsition(*r) r.Filename = d.getNameFromHEAD(*r)
fmt.Println("path from head request:", r.Filepath) log.Println("path from head request:", r.Filename)
if r.Filepath == "" { if r.Filename == "" {
r.Filepath, _ = url.PathUnescape(filepath.Base(r.URL.Path)) u, _ := url.Parse(r.URL)
r.Filename, _ = url.PathUnescape(filepath.Base(u.Path))
} }
} }
r.Filepath, _ = pathvalidate.SanitizeFilename(r.Filepath, '_') r.Filename, _ = pathvalidate.SanitizeFilename(r.Filename, '_')
r.Filepath = filepath.Join(d.DownloadDir, r.Filepath) // r.Filename = filepath.Join(d.CompleteDir, r.Filename)
// if filepath.IsAbs(r.Filepath) { // should already exist // if filepath.IsAbs(r.Filename) { // should already exist
// dir, file := filepath.Split(r.Filepath) // dir, file := filepath.Split(r.Filename)
// // someone is trying to be sneaky (or someone changed the CompleteDir), change path to the correct dir // // someone is trying to be sneaky (or someone changed the CompleteDir), change path to the correct dir
// if dir != filepath.Clean(d.CompleteDir) { // if dir != filepath.Clean(d.CompleteDir) {
// r.Filepath = filepath.Join(d.CompleteDir, file) // r.Filename = filepath.Join(d.CompleteDir, file)
// } // }
// return // return
// } // }
fmt.Println("result path:", r.Filepath) log.Println("result path:", r.Filename)
} }
func getNewFilename(dir, name string) string { func getNewFilename(dir, name string) string {
@ -302,15 +399,16 @@ func getNewFilename(dir, name string) string {
err error err error
index = 1 index = 1
) )
fmt.Println("getfilename", dir, name) log.Println("getfilename", dir, name)
ext := filepath.Ext(name) ext := filepath.Ext(name)
base := strings.TrimSuffix(name, ext) base := strings.TrimSuffix(name, ext)
fmt.Println("stat", filepath.Join(dir, name)) log.Println("stat", filepath.Join(dir, name))
_, err = os.Stat(filepath.Join(dir, name)) _, err = os.Stat(filepath.Join(dir, name))
for err == nil { for err == nil {
name = strings.TrimRight(base+"."+strconv.Itoa(index)+ext, ".") name = strings.TrimRight(base+"."+strconv.Itoa(index)+ext, ".")
fmt.Println("stat", filepath.Join(dir, name)) log.Println("stat", filepath.Join(dir, name))
_, err = os.Stat(filepath.Join(dir, name)) _, err = os.Stat(filepath.Join(dir, name))
index++
} }
if os.IsNotExist(err) { if os.IsNotExist(err) {
return filepath.Join(dir, name) return filepath.Join(dir, name)
@ -318,93 +416,111 @@ func getNewFilename(dir, name string) string {
panic(err) // other path error panic(err) // other path error
} }
func (d Downloader) getDownloadFilename(r *Request) { func (d Downloader) getTempFilename(r *Request) {
if r.TempPath == "" { if r.TempPath == "" {
f, err := ioutil.TempFile(d.DownloadDir, filepath.Base(r.Filepath)) f, err := ioutil.TempFile(d.DownloadDir, filepath.Base(r.Filename))
if err != nil { if err != nil {
fmt.Printf("request for %v failed: %v", r.URL.String(), err) log.Printf("request for %v failed: %v", r.URL, err)
} }
r.TempPath = f.Name()
f.Close() f.Close()
r.TempPath = filepath.Join(d.DownloadDir, f.Name())
} }
f, err := os.OpenFile(r.Filepath, os.O_CREATE|os.O_EXCL, 0666) os.MkdirAll(filepath.Dir(r.FilePath), 0o777)
f, err := os.OpenFile(r.Filename, os.O_CREATE|os.O_EXCL, 0666)
if err != nil { if err != nil {
return return
} }
f.Close() f.Close()
} }
func (d Downloader) SearchDownloads(u url.URL) int { func (d Downloader) SearchDownloads(u string) int {
for i, req := range d.downloads.queue { for i, req := range d.Downloads.Queue {
if req.URL.String() == u.String() { if req.URL == u {
return i return i
} }
} }
return -1 return -1
} }
func (d Downloader) SearchHistory(u url.URL) int { func (d Downloader) SearchHistory(u string) int {
for i, req := range d.history.queue { for i, req := range d.History.Queue {
if req.URL.String() == u.String() { if req.URL == u {
return i return i
} }
} }
return -1 return -1
} }
func (d Downloader) FindRequest(u url.URL) *Request { func (d Downloader) FindRequest(u string) *Request {
if i := d.SearchDownloads(u); i >= 0 { if i := d.SearchDownloads(u); i >= 0 {
return d.downloads.queue[i] return d.Downloads.Queue[i]
} }
if i := d.SearchHistory(u); i >= 0 { if i := d.SearchHistory(u); i >= 0 {
return d.history.queue[i] return d.History.Queue[i]
} }
return nil return nil
} }
func (d *Downloader) addRequest(r *Request) { func (d *Downloader) addRequest(r *Request) {
fmt.Println("adding download for", r.URL.String()) log.Println("adding download for", r.URL)
req := d.FindRequest(r.URL) req := d.FindRequest(r.URL)
u, _ := url.Parse(r.URL)
for i, v := range r.Cookies {
d.Jar.SetCookies(&url.URL{
Scheme: u.Scheme,
Path: v.Path,
Host: v.Domain,
}, []*http.Cookie{&r.Cookies[i]})
}
d.getFilename(r) d.getFilename(r)
if req != nil { // url alread added if req != nil { // url alread added
fmt.Println("URL is already added", r.URL.String()) log.Println("URL is already added", r.URL)
if fi, err := os.Stat(r.Filepath); filepath.Base(req.Filepath) == filepath.Base(r.Filepath) || (err == nil && fi.Name() == filepath.Base(r.Filepath) && fi.Size() != 0) { // filepath has been found, should this check for multiple downloads of the same url or let the download name increment automatically return
fmt.Println("file already exists", r.Filepath) // if fi, err := os.Stat(r.Filepath); filepath.Base(req.Filepath) == filepath.Base(r.Filepath) || (err == nil && fi.Name() == filepath.Base(r.Filepath) && fi.Size() != 0) { // filepath has been found, should this check for multiple downloads of the same url or let the download name increment automatically
//getNewFilename(d.CompleteDir, filepath.Base(r.Filepath)) // log.Println("file already exists", r.Filepath)
d.validate(*r) // TODO, should also check to see if it seems like it is similar, (check first k to see if it is the same file?? leave option to user) // d.validate(*r) // TODO, should also check to see if it seems like it is similar, (check first k to see if it is the same file?? leave option to user)
// return
// }
}
r.FilePath = getNewFilename(d.CompleteDir, filepath.Join(r.Subdir, r.Filename))
d.Downloads.Queue = append(d.Downloads.Queue, r)
if len(d.getRunningDownloads()) < d.MaxActiveDownloads {
d.startDownload(d.Downloads.Len() - 1)
}
}
// func (d *Downloader) validate(r Request) {
// //TODO
// }
func (d *Downloader) startDownload(i int) {
var (
r *Request
req *grab.Request
err error
)
r = d.Downloads.Queue[i]
d.getTempFilename(r)
log.Println("starting download for", r.URL, "to", r.TempPath)
// d.Downloads.Queue = append(d.Downloads.Queue, r)
if r.Response == nil || r.Response.Err() != nil {
req, err = grab.NewRequest(r.TempPath, r.URL)
if err != nil {
r.Status = Error
r.Error = err
return return
} }
} else { // new request, download link
r.Filepath = getNewFilename(d.CompleteDir, filepath.Base(r.Filepath))
d.downloads.queue = append(d.downloads.queue, r)
}
if len(d.getRunningDownloads()) < d.MaxActiveDownloads {
d.startDownload(r)
}
}
func (d *Downloader) validate(r Request) {
//TODO
}
func (d *Downloader) startDownload(r *Request) {
fmt.Println("starting download for", r.URL.String())
d.getDownloadFilename(r)
req, err := grab.NewRequest(r.TempPath, r.URL.String())
if err != nil {
r.Status = Error
r.Error = err
return
} }
r.Status = Downloading r.Status = Downloading
r.Response = d.Grab.Do(req) r.Response = d.Grab.Do(req)
go func(r *Request) { go func(r *Request) {
fmt.Println("wait for download") log.Println("wait for download")
fmt.Println(r.Response.IsComplete()) log.Println(r.Response.IsComplete())
r.Response.Wait() r.Response.Wait()
fmt.Println("download completed for", r.URL) log.Println("download completed for", r.URL)
d.requestDone <- r d.requestDone <- r
}(r) }(r)
} }
@ -413,7 +529,7 @@ func (d Downloader) getRunningDownloads() []*Request {
var ( var (
running = make([]*Request, 0, d.MaxActiveDownloads) running = make([]*Request, 0, d.MaxActiveDownloads)
) )
for _, req := range d.downloads.queue { for _, req := range d.Downloads.Queue {
if req.Status == Downloading && req.Response != nil { if req.Status == Downloading && req.Response != nil {
running = append(running, req) running = append(running, req)
} }
@ -425,20 +541,20 @@ func (d *Downloader) syncDownloads() {
if len(d.getRunningDownloads()) >= d.MaxActiveDownloads { if len(d.getRunningDownloads()) >= d.MaxActiveDownloads {
return return
} }
sort.Stable(d.downloads) sort.Stable(d.Downloads)
// Start new downloads // Start new downloads
for _, req := range d.downloads.queue { for i, req := range d.Downloads.Queue {
if d.MaxActiveDownloads >= len(d.getRunningDownloads()) { if d.MaxActiveDownloads >= len(d.getRunningDownloads()) {
if req.Status == Queued { if req.Status == Queued {
d.startDownload(req) d.startDownload(i)
} }
} }
} }
// Clean completed/canceled downloads // Clean completed/canceled downloads
for i := 0; i < d.downloads.Len(); i++ { for i := 0; i < d.Downloads.Len(); i++ {
if d.downloads.queue[i].Status == Complete || d.downloads.queue[i].Status == Canceled { if d.Downloads.Queue[i].Status == Complete || d.Downloads.Queue[i].Status == Canceled {
d.history.queue = append(d.history.queue, d.downloads.Pop(i)) d.History.Queue = append(d.History.Queue, d.Downloads.Pop(i))
i-- i--
} }
} }
@ -446,40 +562,40 @@ func (d *Downloader) syncDownloads() {
func (d *Downloader) requestCompleted(r *Request) { func (d *Downloader) requestCompleted(r *Request) {
if r.Response.Err() == nil { if r.Response.Err() == nil {
fmt.Println("removing from downloads") log.Println("removing from downloads")
d.downloads.remove(r) d.Downloads.remove(r)
r.Status = Complete r.Status = Complete
fmt.Println(r.TempPath, "!=", r.Filepath) log.Println(r.TempPath, "!=", r.FilePath)
if r.TempPath != r.Filepath { if r.TempPath != r.FilePath {
fmt.Println("renaming download to the completed dir") log.Println("renaming download to the completed dir")
os.Rename(r.TempPath, r.Filepath) os.Rename(r.TempPath, r.FilePath)
} }
d.history.queue = append(d.history.queue, r) d.History.Queue = append(d.History.Queue, r)
} else { } else {
r.Status = Error r.Status = Error
r.Error = r.Response.Err() r.Error = r.Response.Err()
fmt.Println("fucking error:", r.Error) log.Println("fucking error:", r.Error)
} }
} }
func (d *Downloader) download() { func (d *Downloader) download() {
for { for {
select { select {
case TIME := <-time.After(10 * time.Second): case <-time.After(10 * time.Second):
fmt.Println(TIME)
for _, req := range d.downloads.queue {
fmt.Println(req.URL)
fmt.Println(req.Status)
fmt.Println(req.Response.ETA())
}
d.syncDownloads() d.syncDownloads()
case r := <-d.NewRequest: case r := <-d.NewRequest:
d.addRequest(&r) d.addRequest(&r)
if d.OnAdd != nil {
d.OnAdd(r)
}
case r := <-d.requestDone: case r := <-d.requestDone:
fmt.Println("finishing request for", r.URL) log.Println("finishing request for", r.URL)
d.requestCompleted(r) d.requestCompleted(r)
if d.OnComplete != nil {
d.OnComplete(*r)
}
} }
} }
} }

1
go.sum
View File

@ -1,6 +1,5 @@
github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308 h1:CkcsZK6QYg59rc92eqU2h+FRjWltCIiplmEwIB05jfM= github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308 h1:CkcsZK6QYg59rc92eqU2h+FRjWltCIiplmEwIB05jfM=
github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308/go.mod h1:4I4r5Y/LkH+34KACiudU+Q27ooz7xSDyVEuWAVKeJEQ= github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308/go.mod h1:4I4r5Y/LkH+34KACiudU+Q27ooz7xSDyVEuWAVKeJEQ=
github.com/u-root/u-root v1.0.0 h1:3hJy0CG3mXIZtWRE+yrghG/3H0v8L1qEeZBlPr5nS9s=
github.com/u-root/u-root v7.0.0+incompatible h1:u+KSS04pSxJGI5E7WE4Bs9+Zd75QjFv+REkjy/aoAc8= github.com/u-root/u-root v7.0.0+incompatible h1:u+KSS04pSxJGI5E7WE4Bs9+Zd75QjFv+REkjy/aoAc8=
github.com/u-root/u-root v7.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY= github.com/u-root/u-root v7.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA=

116
main.go
View File

@ -1,6 +1,8 @@
package main package main
import ( import (
"bufio"
"encoding/json"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
@ -31,10 +33,72 @@ func main() {
os.Exit(1) os.Exit(1)
} }
d := newDownloader() d := newDownloader()
loadQueue(d)
save := func(r Request) {
var (
content []byte
err error
)
content, err = json.Marshal(d.History.Queue)
if err != nil {
log.Println(err)
return
}
err = ioutil.WriteFile(filepath.Join(gloaderHome, "history.json"), content, 0o666)
if err != nil {
log.Println(err)
return
}
content, err = json.Marshal(d.Downloads.Queue)
if err != nil {
log.Println(err)
return
}
err = ioutil.WriteFile(filepath.Join(gloaderHome, "queue.json"), content, 0o666)
if err != nil {
log.Println(err)
return
}
}
d.OnAdd = save
d.OnComplete = save
d.DataDir = filepath.Join(gloaderHome, "data") d.DataDir = filepath.Join(gloaderHome, "data")
d.Start("tcp", ":8844") d.Start("tcp", ":8844")
} }
func loadQueue(d *Downloader) {
var (
f io.ReadCloser
err error
decoder *json.Decoder
)
f, err = os.Open(filepath.Join(gloaderHome, "history.json"))
if err != nil {
log.Println(err)
return
}
decoder = json.NewDecoder(bufio.NewReader(f))
err = decoder.Decode(&d.History.Queue)
if err != nil {
log.Println(err)
return
}
f.Close()
f, err = os.Open(filepath.Join(gloaderHome, "queue.json"))
if err != nil {
log.Println(err)
return
}
decoder = json.NewDecoder(bufio.NewReader(f))
err = decoder.Decode(&d.Downloads.Queue)
if err != nil {
log.Println(err)
return
}
f.Close()
}
func mount() error { func mount() error {
var ( var (
partUUIDb []byte partUUIDb []byte
@ -72,7 +136,7 @@ func mount() error {
} }
_, err = folder.Readdir(1) _, err = folder.Readdir(1)
if errors.Is(err, io.EOF) { if errors.Is(err, io.EOF) {
fmt.Printf("mount %s %s", partUUID, dataDir) log.Printf("mount %s %s\n", partUUID, dataDir)
dev = findPartUUID(partUUID) dev = findPartUUID(partUUID)
err = syscall.Mount(dev.Path, dataDir, "ext4", 0, "") err = syscall.Mount(dev.Path, dataDir, "ext4", 0, "")
if err != nil { if err != nil {
@ -85,56 +149,6 @@ func mount() error {
return fmt.Errorf("error mounting datadir: data dir %s is not a directory", dataDir) return fmt.Errorf("error mounting datadir: data dir %s is not a directory", dataDir)
} }
// func findPartUUID(uuid string) (string, error) {
// var dev string
// err := filepath.Walk("/sys/block", func(path string, info os.FileInfo, err error) error {
// if err != nil {
// log.Printf("findPartUUID: %v", err)
// return nil
// }
// if info.Mode()&os.ModeSymlink == 0 {
// return nil
// }
// devname := "/dev/" + filepath.Base(path)
// f, err := os.Open(devname)
// if err != nil {
// log.Printf("findPartUUID: %v", err)
// return nil
// }
// defer f.Close()
// if _, err := f.Seek(440, io.SeekStart); err != nil {
// var se syscall.Errno
// if errors.As(err, &se) && se == syscall.EINVAL {
// // Seek()ing empty loop devices results in EINVAL.
// return nil
// }
// log.Printf("findPartUUID: %v(%T)", err, err.(*os.PathError).Err)
// return nil
// }
// var diskSig struct {
// ID uint32
// Trailer uint16
// }
// if err := binary.Read(f, binary.LittleEndian, &diskSig); err != nil {
// log.Printf("findPartUUID: %v", err)
// return nil
// }
// if fmt.Sprintf("%08x", diskSig.ID) == uuid && diskSig.Trailer == 0 {
// dev = devname
// // TODO: abort early with sentinel error code
// return nil
// }
// return nil
// })
// if err != nil {
// return "", err
// }
// if dev == "" {
// return "", fmt.Errorf("PARTUUID=%s not found", uuid)
// }
// return dev, nil
// }
type part struct { type part struct {
UUID string UUID string
Path string Path string