From 76e8117b0c42b7b9650bcf13d63c6c390b7a9b2c Mon Sep 17 00:00:00 2001 From: Jack Darlington Date: Mon, 6 Mar 2017 20:58:15 +0000 Subject: [PATCH 1/3] initial version --- cps/web.py | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/cps/web.py b/cps/web.py index 434c3c0e..178e0010 100755 --- a/cps/web.py +++ b/cps/web.py @@ -753,6 +753,32 @@ def feed_series(id): return response +def partial(total_byte_len, part_size_limit): + s = [] + for p in range(0, total_byte_len, part_size_limit): + last = min(total_byte_len - 1, p + part_size_limit - 1) + s.append([p, last]) + return s + +def do_gdrive_download(df, headers): + total_size = int(df.metadata.get('fileSize')) + print total_size + download_url = df.metadata.get('downloadUrl') + s = partial(total_size, 1024 * 100) # I'm downloading BIG files, so 100M chunk size is fine for me + def stream(): + for bytes in s: + headers = {"Range" : 'bytes=%s-%s' % (bytes[0], bytes[1])} + resp, content = df.auth.Get_Http_Object().request(download_url, headers=headers) + if resp.status == 206 : + yield content + else: + print 'An error occurred: %s' % resp + return + print str(bytes[1])+"..." + return Response(stream(), headers=headers) + + + @app.route("/opds/download///") @requires_basic_auth_if_no_ano @download_required @@ -760,17 +786,22 @@ def get_opds_download_link(book_id, format): format = format.split(".")[0] book = db.session.query(db.Books).filter(db.Books.id == book_id).first() data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == format.upper()).first() + print (data.name) if current_user.is_authenticated: helper.update_download(book_id, int(current_user.id)) file_name = book.title if len(book.authors) > 0: file_name = book.authors[0].name + '-' + file_name file_name = helper.get_valid_filename(file_name) + headers={} + headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (urllib.quote(file_name.encode('utf8')), format) if config.config_use_google_drive: df=gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, data.name + "." + format) - download_url = df.metadata.get('downloadUrl') - resp, content = df.auth.Get_Http_Object().request(download_url) - response=send_file(io.BytesIO(content)) + #download_url = df.metadata.get('downloadUrl') + #resp, content = df.auth.Get_Http_Object().request(download_url) + #io.BytesIO(content) + #response=send_file(io.BytesIO(content)) + return do_gdrive_download(df, headers) else: # file_name = helper.get_valid_filename(file_name) response = make_response(send_from_directory(os.path.join(config.config_calibre_dir, book.path), data.name + "." + format)) From 10b129cb078e9e1c3e4d778a09b3cc89ce04d8ec Mon Sep 17 00:00:00 2001 From: Jack Darlington Date: Mon, 6 Mar 2017 21:05:21 +0000 Subject: [PATCH 2/3] add timestamps --- cps/web.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cps/web.py b/cps/web.py index 178e0010..064a23cd 100755 --- a/cps/web.py +++ b/cps/web.py @@ -761,9 +761,12 @@ def partial(total_byte_len, part_size_limit): return s def do_gdrive_download(df, headers): + startTime=time.time() total_size = int(df.metadata.get('fileSize')) - print total_size + app.logger.info (time.time()-startTime) + app.logger.info(total_size) download_url = df.metadata.get('downloadUrl') + app.logger.info (time.time()-startTime) s = partial(total_size, 1024 * 100) # I'm downloading BIG files, so 100M chunk size is fine for me def stream(): for bytes in s: @@ -772,9 +775,10 @@ def do_gdrive_download(df, headers): if resp.status == 206 : yield content else: - print 'An error occurred: %s' % resp + app.logger.info('An error occurred: %s' % resp) return - print str(bytes[1])+"..." + app.logger.info(str(bytes[1])+"...") + app.logger.info (time.time()-startTime) return Response(stream(), headers=headers) @@ -783,10 +787,11 @@ def do_gdrive_download(df, headers): @requires_basic_auth_if_no_ano @download_required def get_opds_download_link(book_id, format): + startTime=time.time() format = format.split(".")[0] book = db.session.query(db.Books).filter(db.Books.id == book_id).first() data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == format.upper()).first() - print (data.name) + app.logger.info (data.name) if current_user.is_authenticated: helper.update_download(book_id, int(current_user.id)) file_name = book.title @@ -795,8 +800,11 @@ def get_opds_download_link(book_id, format): file_name = helper.get_valid_filename(file_name) headers={} headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (urllib.quote(file_name.encode('utf8')), format) + app.logger.info (time.time()-startTime) + startTime=time.time() if config.config_use_google_drive: df=gdriveutils.getFileFromEbooksFolder(Gdrive.Instance().drive, book.path, data.name + "." + format) + app.logger.info (time.time()-startTime) #download_url = df.metadata.get('downloadUrl') #resp, content = df.auth.Get_Http_Object().request(download_url) #io.BytesIO(content) From e3e26d418df47dc75689486b050591ff59464f01 Mon Sep 17 00:00:00 2001 From: Jack Darlington Date: Mon, 6 Mar 2017 22:50:24 +0000 Subject: [PATCH 3/3] Finished code to chunk successfully. --- cps/web.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/cps/web.py b/cps/web.py index 064a23cd..cc7164b7 100755 --- a/cps/web.py +++ b/cps/web.py @@ -7,7 +7,8 @@ import logging from logging.handlers import RotatingFileHandler import textwrap from flask import Flask, render_template, session, request, Response, redirect, url_for, send_from_directory, \ - make_response, g, flash, abort, send_file, Markup + make_response, g, flash, abort, send_file, Markup, \ + stream_with_context from flask import __version__ as flaskVersion import ub from ub import config @@ -767,7 +768,7 @@ def do_gdrive_download(df, headers): app.logger.info(total_size) download_url = df.metadata.get('downloadUrl') app.logger.info (time.time()-startTime) - s = partial(total_size, 1024 * 100) # I'm downloading BIG files, so 100M chunk size is fine for me + s = partial(total_size, 1024 * 1024) # I'm downloading BIG files, so 100M chunk size is fine for me def stream(): for bytes in s: headers = {"Range" : 'bytes=%s-%s' % (bytes[0], bytes[1])} @@ -777,11 +778,7 @@ def do_gdrive_download(df, headers): else: app.logger.info('An error occurred: %s' % resp) return - app.logger.info(str(bytes[1])+"...") - app.logger.info (time.time()-startTime) - return Response(stream(), headers=headers) - - + return Response(stream_with_context(stream()), headers=headers) @app.route("/opds/download///") @requires_basic_auth_if_no_ano