Load and save queue and history files
Switch to using an additional filename and sub-directory field Allow status in json decode/encode Switch to using string for url instead of url.URL use log instead of fmt for logging Add basic status handlers for the queue and history Add HTTP timeouts Implement cookie handling Ignore TempPath and FilePath when adding URLs, they are absolute paths Ignore Status when adding URLs and status is not Paused When determining the filename use the path from the final redirect Use the correct TempPath when downloading Actually add requests to the queue before starting them
This commit is contained in:
parent
b1079eab13
commit
bd0c3d2cc6
374
downloader.go
374
downloader.go
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -60,27 +61,31 @@ type Downloader struct {
|
||||
Jar http.CookieJar
|
||||
MaxActiveDownloads int
|
||||
Server *http.Server
|
||||
downloads RequestQueue
|
||||
history RequestQueue
|
||||
Downloads RequestQueue
|
||||
History RequestQueue
|
||||
NewRequest chan Request
|
||||
requestDone chan *Request
|
||||
OnComplete func(r Request)
|
||||
OnAdd func(r Request)
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
URL url.URL `json:"url"`
|
||||
URL string `json:"url"`
|
||||
Cookies []http.Cookie `json:"cookies"`
|
||||
ForceDownload bool `json:"forceDownload"`
|
||||
Status Status `json:"-"`
|
||||
Status Status `json:"Status"`
|
||||
Priority Priority `json:"priority"`
|
||||
Filepath string `json:"filepath"`
|
||||
FilePath string `json:"filepath"`
|
||||
Filename string `json:"filename"`
|
||||
Subdir string `json:"subdir"`
|
||||
TempPath string `json:"tempPath"`
|
||||
Response *grab.Response `json:"-"`
|
||||
Error error `json:"-"`
|
||||
CompletedDate time.Time
|
||||
CompletedDate time.Time `json:"completedDate"`
|
||||
}
|
||||
|
||||
type RequestQueue struct {
|
||||
queue []*Request
|
||||
Queue []*Request
|
||||
URLSort bool
|
||||
DateSort bool
|
||||
}
|
||||
@ -88,25 +93,25 @@ type RequestQueue struct {
|
||||
func (rq RequestQueue) Less(i, j int) bool {
|
||||
ii := 0
|
||||
jj := 0
|
||||
if rq.queue[i].ForceDownload {
|
||||
if rq.Queue[i].ForceDownload {
|
||||
ii = 1
|
||||
}
|
||||
if rq.queue[j].ForceDownload {
|
||||
if rq.Queue[j].ForceDownload {
|
||||
jj = 1
|
||||
}
|
||||
if ii < jj {
|
||||
return true
|
||||
}
|
||||
|
||||
if rq.queue[i].Priority < rq.queue[j].Priority {
|
||||
if rq.Queue[i].Priority < rq.Queue[j].Priority {
|
||||
return true
|
||||
}
|
||||
|
||||
if rq.DateSort && rq.queue[i].CompletedDate.Before(rq.queue[j].CompletedDate) {
|
||||
if rq.DateSort && rq.Queue[i].CompletedDate.Before(rq.Queue[j].CompletedDate) {
|
||||
return true
|
||||
}
|
||||
|
||||
if rq.URLSort && rq.queue[i].URL.String() < rq.queue[j].URL.String() {
|
||||
if rq.URLSort && rq.Queue[i].URL < rq.Queue[j].URL {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -114,27 +119,27 @@ func (rq RequestQueue) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
func (rq RequestQueue) Len() int {
|
||||
return len(rq.queue)
|
||||
return len(rq.Queue)
|
||||
}
|
||||
|
||||
func (rq RequestQueue) Swap(i, j int) {
|
||||
rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i]
|
||||
rq.Queue[i], rq.Queue[j] = rq.Queue[j], rq.Queue[i]
|
||||
}
|
||||
|
||||
func (rq *RequestQueue) Pop(i int) *Request {
|
||||
r := rq.queue[i]
|
||||
copy(rq.queue[i:], rq.queue[i+1:])
|
||||
rq.queue[len(rq.queue)-1] = nil
|
||||
rq.queue = rq.queue[:len(rq.queue)-1]
|
||||
r := rq.Queue[i]
|
||||
copy(rq.Queue[i:], rq.Queue[i+1:])
|
||||
rq.Queue[len(rq.Queue)-1] = nil
|
||||
rq.Queue = rq.Queue[:len(rq.Queue)-1]
|
||||
return r
|
||||
}
|
||||
|
||||
func (rq *RequestQueue) remove(r *Request) {
|
||||
for i, req := range rq.queue {
|
||||
for i, req := range rq.Queue {
|
||||
if req == r {
|
||||
copy(rq.queue[i:], rq.queue[i+1:])
|
||||
rq.queue[len(rq.queue)-1] = nil
|
||||
rq.queue = rq.queue[:len(rq.queue)-1]
|
||||
copy(rq.Queue[i:], rq.Queue[i+1:])
|
||||
rq.Queue[len(rq.Queue)-1] = nil
|
||||
rq.Queue = rq.Queue[:len(rq.Queue)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -182,7 +187,7 @@ func (d *Downloader) Start(network, address string) {
|
||||
}
|
||||
|
||||
if d.DataDir == "" {
|
||||
d.DataDir = "/perm/downloader"
|
||||
d.DataDir = "/perm/gloader"
|
||||
}
|
||||
|
||||
if d.DownloadDir == "" {
|
||||
@ -193,26 +198,102 @@ func (d *Downloader) Start(network, address string) {
|
||||
d.CompleteDir = path.Join(d.DataDir, "Complete")
|
||||
}
|
||||
|
||||
fmt.Println(d.DataDir)
|
||||
fmt.Println(d.DownloadDir)
|
||||
fmt.Println(d.CompleteDir)
|
||||
os.MkdirAll(d.DataDir, 0777)
|
||||
os.MkdirAll(d.DownloadDir, 0777)
|
||||
os.MkdirAll(d.CompleteDir, 0777)
|
||||
log.Println(d.DataDir)
|
||||
log.Println(d.DownloadDir)
|
||||
log.Println(d.CompleteDir)
|
||||
_ = os.MkdirAll(d.DataDir, 0777)
|
||||
_ = os.MkdirAll(d.DownloadDir, 0777)
|
||||
_ = os.MkdirAll(d.CompleteDir, 0777)
|
||||
|
||||
listener, err = net.Listen(network, address)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("adding /add handler")
|
||||
log.Println("adding /add handler")
|
||||
// mux.HandleFunc("/", d.UI)
|
||||
mux.HandleFunc("/add", d.restAddDownload)
|
||||
mux.HandleFunc("/queue", d.restQueueStatus)
|
||||
mux.HandleFunc("/history", d.restHistoryStatus)
|
||||
mux.HandleFunc("/start", d.restStartDownload)
|
||||
|
||||
log.Println("starting main go routine")
|
||||
d.Grab.HTTPClient = &http.Client{
|
||||
Jar: d.Jar,
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
fmt.Println("starting main go routine")
|
||||
go d.download()
|
||||
|
||||
fmt.Println("serving http server")
|
||||
d.Server.Serve(listener)
|
||||
log.Println("serving http server")
|
||||
_ = d.Server.Serve(listener)
|
||||
}
|
||||
|
||||
func (d *Downloader) restStartDownload(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
err error
|
||||
index struct {
|
||||
index int
|
||||
}
|
||||
)
|
||||
if r.Method != http.MethodPost {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Add("Allow", http.MethodPost)
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
fmt.Fprintln(w, "HTTP Error 405 – Method Not Allowed\nOnly POST method is allowed")
|
||||
log.Println("HTTP Error 405 – Method Not Allowed\nOnly POST method is allowed")
|
||||
return
|
||||
}
|
||||
err = json.NewDecoder(r.Body).Decode(index)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if index.index >= d.Downloads.Len() || index.index < 0 {
|
||||
http.Error(w, fmt.Sprintf("slice index out of bounds. index: %d length of slice: %d", index.index, d.Downloads.Len()), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
d.startDownload(index.index)
|
||||
}
|
||||
|
||||
func (d *Downloader) restHistoryStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Add("Allow", http.MethodGet)
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
fmt.Fprintln(w, "HTTP Error 405 – Method Not Allowed\nOnly GET method is allowed")
|
||||
log.Println("HTTP Error 405 – Method Not Allowed\nOnly GET method is allowed")
|
||||
return
|
||||
}
|
||||
j := json.NewEncoder(w)
|
||||
w.Header().Add("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
j.Encode(d.History.Queue)
|
||||
}
|
||||
|
||||
func (d *Downloader) restQueueStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Add("Allow", http.MethodGet)
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
fmt.Fprintln(w, "HTTP Error 405 – Method Not Allowed\nOnly GET method is allowed")
|
||||
log.Println("HTTP Error 405 – Method Not Allowed\nOnly GET method is allowed")
|
||||
return
|
||||
}
|
||||
j := json.NewEncoder(w)
|
||||
w.Header().Add("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
j.Encode(d.Downloads.Queue)
|
||||
}
|
||||
|
||||
func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
|
||||
@ -226,7 +307,7 @@ func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Allow", http.MethodPost)
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
fmt.Fprintln(w, "HTTP Error 405 – Method Not Allowed\nOnly POST method is allowed")
|
||||
fmt.Println("HTTP Error 405 – Method Not Allowed\nOnly POST method is allowed")
|
||||
log.Println("HTTP Error 405 – Method Not Allowed\nOnly POST method is allowed")
|
||||
return
|
||||
}
|
||||
// TODO fail only on individual requests
|
||||
@ -236,14 +317,18 @@ func (d *Downloader) restAddDownload(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
for _, req := range requests {
|
||||
req.TempPath = ""
|
||||
fmt.Println("adding request", req.URL.String())
|
||||
req.TempPath = "" // not allowed via REST API
|
||||
req.FilePath = "" // not allowed via REST API
|
||||
if req.Status != Paused {
|
||||
req.Status = Queued
|
||||
}
|
||||
log.Println("adding request", req.URL)
|
||||
d.NewRequest <- req
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (d Downloader) getContentDispsition(r Request) string {
|
||||
func (d Downloader) getNameFromHEAD(r Request) string {
|
||||
var (
|
||||
err error
|
||||
re *http.Response
|
||||
@ -252,20 +337,31 @@ func (d Downloader) getContentDispsition(r Request) string {
|
||||
ht := &http.Client{
|
||||
Jar: d.Jar,
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 5 * time.Second,
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
re, err = ht.Head(r.URL.String())
|
||||
re, err = ht.Head(r.URL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if re.StatusCode < 200 || re.StatusCode > 299 {
|
||||
return ""
|
||||
}
|
||||
re.Body.Close()
|
||||
_, p, err = mime.ParseMediaType(re.Header.Get("Content-Disposition"))
|
||||
if err != nil {
|
||||
return ""
|
||||
if err == nil {
|
||||
if f, ok := p["filename"]; ok {
|
||||
return f
|
||||
}
|
||||
}
|
||||
if f, ok := p["filename"]; ok {
|
||||
return f
|
||||
}
|
||||
return ""
|
||||
return path.Base(re.Request.URL.Path)
|
||||
}
|
||||
|
||||
// getFilename checks the provided filepath
|
||||
@ -273,28 +369,29 @@ func (d Downloader) getContentDispsition(r Request) string {
|
||||
// if not set uses the basename of the url
|
||||
// and sanitizes the filename using github.com/lordwelch/pathvalidate
|
||||
func (d *Downloader) getFilename(r *Request) {
|
||||
fmt.Println("Determining filename")
|
||||
r.Filepath = filepath.Clean(r.Filepath)
|
||||
if r.Filepath == "." {
|
||||
fmt.Println("filename is empty, testing head request")
|
||||
r.Filepath = d.getContentDispsition(*r)
|
||||
fmt.Println("path from head request:", r.Filepath)
|
||||
if r.Filepath == "" {
|
||||
r.Filepath, _ = url.PathUnescape(filepath.Base(r.URL.Path))
|
||||
log.Println("Determining filename")
|
||||
r.Filename = filepath.Clean(r.Filename)
|
||||
if r.Filename == "." {
|
||||
log.Println("filename is empty, testing head request")
|
||||
r.Filename = d.getNameFromHEAD(*r)
|
||||
log.Println("path from head request:", r.Filename)
|
||||
if r.Filename == "" {
|
||||
u, _ := url.Parse(r.URL)
|
||||
r.Filename, _ = url.PathUnescape(filepath.Base(u.Path))
|
||||
}
|
||||
}
|
||||
r.Filepath, _ = pathvalidate.SanitizeFilename(r.Filepath, '_')
|
||||
r.Filepath = filepath.Join(d.DownloadDir, r.Filepath)
|
||||
r.Filename, _ = pathvalidate.SanitizeFilename(r.Filename, '_')
|
||||
// r.Filename = filepath.Join(d.CompleteDir, r.Filename)
|
||||
|
||||
// if filepath.IsAbs(r.Filepath) { // should already exist
|
||||
// dir, file := filepath.Split(r.Filepath)
|
||||
// if filepath.IsAbs(r.Filename) { // should already exist
|
||||
// dir, file := filepath.Split(r.Filename)
|
||||
// // someone is trying to be sneaky (or someone changed the CompleteDir), change path to the correct dir
|
||||
// if dir != filepath.Clean(d.CompleteDir) {
|
||||
// r.Filepath = filepath.Join(d.CompleteDir, file)
|
||||
// r.Filename = filepath.Join(d.CompleteDir, file)
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
fmt.Println("result path:", r.Filepath)
|
||||
log.Println("result path:", r.Filename)
|
||||
}
|
||||
|
||||
func getNewFilename(dir, name string) string {
|
||||
@ -302,15 +399,16 @@ func getNewFilename(dir, name string) string {
|
||||
err error
|
||||
index = 1
|
||||
)
|
||||
fmt.Println("getfilename", dir, name)
|
||||
log.Println("getfilename", dir, name)
|
||||
ext := filepath.Ext(name)
|
||||
base := strings.TrimSuffix(name, ext)
|
||||
fmt.Println("stat", filepath.Join(dir, name))
|
||||
log.Println("stat", filepath.Join(dir, name))
|
||||
_, err = os.Stat(filepath.Join(dir, name))
|
||||
for err == nil {
|
||||
name = strings.TrimRight(base+"."+strconv.Itoa(index)+ext, ".")
|
||||
fmt.Println("stat", filepath.Join(dir, name))
|
||||
log.Println("stat", filepath.Join(dir, name))
|
||||
_, err = os.Stat(filepath.Join(dir, name))
|
||||
index++
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return filepath.Join(dir, name)
|
||||
@ -318,93 +416,111 @@ func getNewFilename(dir, name string) string {
|
||||
panic(err) // other path error
|
||||
}
|
||||
|
||||
func (d Downloader) getDownloadFilename(r *Request) {
|
||||
func (d Downloader) getTempFilename(r *Request) {
|
||||
if r.TempPath == "" {
|
||||
f, err := ioutil.TempFile(d.DownloadDir, filepath.Base(r.Filepath))
|
||||
f, err := ioutil.TempFile(d.DownloadDir, filepath.Base(r.Filename))
|
||||
if err != nil {
|
||||
fmt.Printf("request for %v failed: %v", r.URL.String(), err)
|
||||
log.Printf("request for %v failed: %v", r.URL, err)
|
||||
}
|
||||
r.TempPath = f.Name()
|
||||
f.Close()
|
||||
r.TempPath = filepath.Join(d.DownloadDir, f.Name())
|
||||
}
|
||||
f, err := os.OpenFile(r.Filepath, os.O_CREATE|os.O_EXCL, 0666)
|
||||
os.MkdirAll(filepath.Dir(r.FilePath), 0o777)
|
||||
f, err := os.OpenFile(r.Filename, os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func (d Downloader) SearchDownloads(u url.URL) int {
|
||||
for i, req := range d.downloads.queue {
|
||||
if req.URL.String() == u.String() {
|
||||
func (d Downloader) SearchDownloads(u string) int {
|
||||
for i, req := range d.Downloads.Queue {
|
||||
if req.URL == u {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (d Downloader) SearchHistory(u url.URL) int {
|
||||
for i, req := range d.history.queue {
|
||||
if req.URL.String() == u.String() {
|
||||
func (d Downloader) SearchHistory(u string) int {
|
||||
for i, req := range d.History.Queue {
|
||||
if req.URL == u {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
|
||||
}
|
||||
|
||||
func (d Downloader) FindRequest(u url.URL) *Request {
|
||||
func (d Downloader) FindRequest(u string) *Request {
|
||||
if i := d.SearchDownloads(u); i >= 0 {
|
||||
return d.downloads.queue[i]
|
||||
return d.Downloads.Queue[i]
|
||||
}
|
||||
if i := d.SearchHistory(u); i >= 0 {
|
||||
return d.history.queue[i]
|
||||
return d.History.Queue[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) addRequest(r *Request) {
|
||||
fmt.Println("adding download for", r.URL.String())
|
||||
log.Println("adding download for", r.URL)
|
||||
req := d.FindRequest(r.URL)
|
||||
u, _ := url.Parse(r.URL)
|
||||
for i, v := range r.Cookies {
|
||||
d.Jar.SetCookies(&url.URL{
|
||||
Scheme: u.Scheme,
|
||||
Path: v.Path,
|
||||
Host: v.Domain,
|
||||
}, []*http.Cookie{&r.Cookies[i]})
|
||||
}
|
||||
d.getFilename(r)
|
||||
|
||||
if req != nil { // url alread added
|
||||
fmt.Println("URL is already added", r.URL.String())
|
||||
if fi, err := os.Stat(r.Filepath); filepath.Base(req.Filepath) == filepath.Base(r.Filepath) || (err == nil && fi.Name() == filepath.Base(r.Filepath) && fi.Size() != 0) { // filepath has been found, should this check for multiple downloads of the same url or let the download name increment automatically
|
||||
fmt.Println("file already exists", r.Filepath)
|
||||
//getNewFilename(d.CompleteDir, filepath.Base(r.Filepath))
|
||||
d.validate(*r) // TODO, should also check to see if it seems like it is similar, (check first k to see if it is the same file?? leave option to user)
|
||||
log.Println("URL is already added", r.URL)
|
||||
return
|
||||
// if fi, err := os.Stat(r.Filepath); filepath.Base(req.Filepath) == filepath.Base(r.Filepath) || (err == nil && fi.Name() == filepath.Base(r.Filepath) && fi.Size() != 0) { // filepath has been found, should this check for multiple downloads of the same url or let the download name increment automatically
|
||||
// log.Println("file already exists", r.Filepath)
|
||||
// d.validate(*r) // TODO, should also check to see if it seems like it is similar, (check first k to see if it is the same file?? leave option to user)
|
||||
// return
|
||||
// }
|
||||
}
|
||||
r.FilePath = getNewFilename(d.CompleteDir, filepath.Join(r.Subdir, r.Filename))
|
||||
d.Downloads.Queue = append(d.Downloads.Queue, r)
|
||||
|
||||
if len(d.getRunningDownloads()) < d.MaxActiveDownloads {
|
||||
d.startDownload(d.Downloads.Len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// func (d *Downloader) validate(r Request) {
|
||||
// //TODO
|
||||
// }
|
||||
|
||||
func (d *Downloader) startDownload(i int) {
|
||||
var (
|
||||
r *Request
|
||||
req *grab.Request
|
||||
err error
|
||||
)
|
||||
r = d.Downloads.Queue[i]
|
||||
d.getTempFilename(r)
|
||||
log.Println("starting download for", r.URL, "to", r.TempPath)
|
||||
// d.Downloads.Queue = append(d.Downloads.Queue, r)
|
||||
if r.Response == nil || r.Response.Err() != nil {
|
||||
req, err = grab.NewRequest(r.TempPath, r.URL)
|
||||
if err != nil {
|
||||
r.Status = Error
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
} else { // new request, download link
|
||||
r.Filepath = getNewFilename(d.CompleteDir, filepath.Base(r.Filepath))
|
||||
d.downloads.queue = append(d.downloads.queue, r)
|
||||
}
|
||||
if len(d.getRunningDownloads()) < d.MaxActiveDownloads {
|
||||
d.startDownload(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Downloader) validate(r Request) {
|
||||
//TODO
|
||||
}
|
||||
|
||||
func (d *Downloader) startDownload(r *Request) {
|
||||
fmt.Println("starting download for", r.URL.String())
|
||||
d.getDownloadFilename(r)
|
||||
req, err := grab.NewRequest(r.TempPath, r.URL.String())
|
||||
if err != nil {
|
||||
r.Status = Error
|
||||
r.Error = err
|
||||
return
|
||||
}
|
||||
r.Status = Downloading
|
||||
|
||||
r.Response = d.Grab.Do(req)
|
||||
go func(r *Request) {
|
||||
fmt.Println("wait for download")
|
||||
fmt.Println(r.Response.IsComplete())
|
||||
log.Println("wait for download")
|
||||
log.Println(r.Response.IsComplete())
|
||||
r.Response.Wait()
|
||||
fmt.Println("download completed for", r.URL)
|
||||
log.Println("download completed for", r.URL)
|
||||
d.requestDone <- r
|
||||
}(r)
|
||||
}
|
||||
@ -413,7 +529,7 @@ func (d Downloader) getRunningDownloads() []*Request {
|
||||
var (
|
||||
running = make([]*Request, 0, d.MaxActiveDownloads)
|
||||
)
|
||||
for _, req := range d.downloads.queue {
|
||||
for _, req := range d.Downloads.Queue {
|
||||
if req.Status == Downloading && req.Response != nil {
|
||||
running = append(running, req)
|
||||
}
|
||||
@ -425,20 +541,20 @@ func (d *Downloader) syncDownloads() {
|
||||
if len(d.getRunningDownloads()) >= d.MaxActiveDownloads {
|
||||
return
|
||||
}
|
||||
sort.Stable(d.downloads)
|
||||
sort.Stable(d.Downloads)
|
||||
// Start new downloads
|
||||
for _, req := range d.downloads.queue {
|
||||
for i, req := range d.Downloads.Queue {
|
||||
if d.MaxActiveDownloads >= len(d.getRunningDownloads()) {
|
||||
if req.Status == Queued {
|
||||
d.startDownload(req)
|
||||
d.startDownload(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean completed/canceled downloads
|
||||
for i := 0; i < d.downloads.Len(); i++ {
|
||||
if d.downloads.queue[i].Status == Complete || d.downloads.queue[i].Status == Canceled {
|
||||
d.history.queue = append(d.history.queue, d.downloads.Pop(i))
|
||||
for i := 0; i < d.Downloads.Len(); i++ {
|
||||
if d.Downloads.Queue[i].Status == Complete || d.Downloads.Queue[i].Status == Canceled {
|
||||
d.History.Queue = append(d.History.Queue, d.Downloads.Pop(i))
|
||||
i--
|
||||
}
|
||||
}
|
||||
@ -446,40 +562,40 @@ func (d *Downloader) syncDownloads() {
|
||||
|
||||
func (d *Downloader) requestCompleted(r *Request) {
|
||||
if r.Response.Err() == nil {
|
||||
fmt.Println("removing from downloads")
|
||||
d.downloads.remove(r)
|
||||
log.Println("removing from downloads")
|
||||
d.Downloads.remove(r)
|
||||
r.Status = Complete
|
||||
fmt.Println(r.TempPath, "!=", r.Filepath)
|
||||
if r.TempPath != r.Filepath {
|
||||
fmt.Println("renaming download to the completed dir")
|
||||
os.Rename(r.TempPath, r.Filepath)
|
||||
log.Println(r.TempPath, "!=", r.FilePath)
|
||||
if r.TempPath != r.FilePath {
|
||||
log.Println("renaming download to the completed dir")
|
||||
os.Rename(r.TempPath, r.FilePath)
|
||||
}
|
||||
d.history.queue = append(d.history.queue, r)
|
||||
d.History.Queue = append(d.History.Queue, r)
|
||||
} else {
|
||||
r.Status = Error
|
||||
r.Error = r.Response.Err()
|
||||
fmt.Println("fucking error:", r.Error)
|
||||
log.Println("fucking error:", r.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Downloader) download() {
|
||||
for {
|
||||
select {
|
||||
case TIME := <-time.After(10 * time.Second):
|
||||
fmt.Println(TIME)
|
||||
for _, req := range d.downloads.queue {
|
||||
fmt.Println(req.URL)
|
||||
fmt.Println(req.Status)
|
||||
fmt.Println(req.Response.ETA())
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
d.syncDownloads()
|
||||
|
||||
case r := <-d.NewRequest:
|
||||
d.addRequest(&r)
|
||||
if d.OnAdd != nil {
|
||||
d.OnAdd(r)
|
||||
}
|
||||
|
||||
case r := <-d.requestDone:
|
||||
fmt.Println("finishing request for", r.URL)
|
||||
log.Println("finishing request for", r.URL)
|
||||
d.requestCompleted(r)
|
||||
if d.OnComplete != nil {
|
||||
d.OnComplete(*r)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
go.sum
1
go.sum
@ -1,6 +1,5 @@
|
||||
github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308 h1:CkcsZK6QYg59rc92eqU2h+FRjWltCIiplmEwIB05jfM=
|
||||
github.com/lordwelch/pathvalidate v0.0.0-20201012043703-54efa7ea1308/go.mod h1:4I4r5Y/LkH+34KACiudU+Q27ooz7xSDyVEuWAVKeJEQ=
|
||||
github.com/u-root/u-root v1.0.0 h1:3hJy0CG3mXIZtWRE+yrghG/3H0v8L1qEeZBlPr5nS9s=
|
||||
github.com/u-root/u-root v7.0.0+incompatible h1:u+KSS04pSxJGI5E7WE4Bs9+Zd75QjFv+REkjy/aoAc8=
|
||||
github.com/u-root/u-root v7.0.0+incompatible/go.mod h1:RYkpo8pTHrNjW08opNd/U6p/RJE7K0D8fXO0d47+3YY=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA=
|
||||
|
116
main.go
116
main.go
@ -1,6 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@ -31,10 +33,72 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
d := newDownloader()
|
||||
loadQueue(d)
|
||||
save := func(r Request) {
|
||||
var (
|
||||
content []byte
|
||||
err error
|
||||
)
|
||||
content, err = json.Marshal(d.History.Queue)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(gloaderHome, "history.json"), content, 0o666)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
content, err = json.Marshal(d.Downloads.Queue)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(gloaderHome, "queue.json"), content, 0o666)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
d.OnAdd = save
|
||||
d.OnComplete = save
|
||||
d.DataDir = filepath.Join(gloaderHome, "data")
|
||||
d.Start("tcp", ":8844")
|
||||
}
|
||||
|
||||
func loadQueue(d *Downloader) {
|
||||
var (
|
||||
f io.ReadCloser
|
||||
err error
|
||||
decoder *json.Decoder
|
||||
)
|
||||
f, err = os.Open(filepath.Join(gloaderHome, "history.json"))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
decoder = json.NewDecoder(bufio.NewReader(f))
|
||||
err = decoder.Decode(&d.History.Queue)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
f.Close()
|
||||
|
||||
f, err = os.Open(filepath.Join(gloaderHome, "queue.json"))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
decoder = json.NewDecoder(bufio.NewReader(f))
|
||||
err = decoder.Decode(&d.Downloads.Queue)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func mount() error {
|
||||
var (
|
||||
partUUIDb []byte
|
||||
@ -72,7 +136,7 @@ func mount() error {
|
||||
}
|
||||
_, err = folder.Readdir(1)
|
||||
if errors.Is(err, io.EOF) {
|
||||
fmt.Printf("mount %s %s", partUUID, dataDir)
|
||||
log.Printf("mount %s %s\n", partUUID, dataDir)
|
||||
dev = findPartUUID(partUUID)
|
||||
err = syscall.Mount(dev.Path, dataDir, "ext4", 0, "")
|
||||
if err != nil {
|
||||
@ -85,56 +149,6 @@ func mount() error {
|
||||
return fmt.Errorf("error mounting datadir: data dir %s is not a directory", dataDir)
|
||||
}
|
||||
|
||||
// func findPartUUID(uuid string) (string, error) {
|
||||
// var dev string
|
||||
// err := filepath.Walk("/sys/block", func(path string, info os.FileInfo, err error) error {
|
||||
// if err != nil {
|
||||
// log.Printf("findPartUUID: %v", err)
|
||||
// return nil
|
||||
// }
|
||||
// if info.Mode()&os.ModeSymlink == 0 {
|
||||
// return nil
|
||||
// }
|
||||
// devname := "/dev/" + filepath.Base(path)
|
||||
// f, err := os.Open(devname)
|
||||
// if err != nil {
|
||||
// log.Printf("findPartUUID: %v", err)
|
||||
// return nil
|
||||
// }
|
||||
// defer f.Close()
|
||||
// if _, err := f.Seek(440, io.SeekStart); err != nil {
|
||||
// var se syscall.Errno
|
||||
// if errors.As(err, &se) && se == syscall.EINVAL {
|
||||
// // Seek()ing empty loop devices results in EINVAL.
|
||||
// return nil
|
||||
// }
|
||||
// log.Printf("findPartUUID: %v(%T)", err, err.(*os.PathError).Err)
|
||||
// return nil
|
||||
// }
|
||||
// var diskSig struct {
|
||||
// ID uint32
|
||||
// Trailer uint16
|
||||
// }
|
||||
// if err := binary.Read(f, binary.LittleEndian, &diskSig); err != nil {
|
||||
// log.Printf("findPartUUID: %v", err)
|
||||
// return nil
|
||||
// }
|
||||
// if fmt.Sprintf("%08x", diskSig.ID) == uuid && diskSig.Trailer == 0 {
|
||||
// dev = devname
|
||||
// // TODO: abort early with sentinel error code
|
||||
// return nil
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// if dev == "" {
|
||||
// return "", fmt.Errorf("PARTUUID=%s not found", uuid)
|
||||
// }
|
||||
// return dev, nil
|
||||
// }
|
||||
|
||||
type part struct {
|
||||
UUID string
|
||||
Path string
|
||||
|
Loading…
x
Reference in New Issue
Block a user