Allow user to download ZIP from Django view - python-3.x

My main task is to have the user press a Download button and download file "A.zip" from the query directory.
The reason I have a elif request.POST..... is because I have another condition checking if the "Execute" button was pressed. This execute button runs a script. Both POST actions work, and the dir_file is C:\Data\Folder.
I followed and read many tutorials and responses as to how to download a file from Django, and I cannot figure out why my simple code does not download a file.
What am I missing? The code does not return any errors. Does anybody have any documentation that can explain what I am doing wrong?
I am expecting an automatic download of the file, but does not occur.
elif request.POST['action'] == 'Download':
query = request.POST['q']
dir_file = query + "A.zip"
zip_file = open(dir_file, 'rb')
response = HttpResponse(zip_file, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=%s' % 'foo_zip'
zip_file.close()

I found out my answer.
After reading through many documentation about this, I left out the most important aspect of this feature which is the url.
Basically, the function download_zip is called by the POST and runs script where the zip is downloaded.
Here is what I ended up doing:
elif request.POST['action'] == 'Download':
return(HttpResponseRedirect('/App/download'))
Created a view:
def download_zip(request):
zip_path = root + "A.zip"
zip_file = open(zip_path, 'rb')
response = HttpResponse(zip_file, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=%s' % 'A.zip'
response['Content-Length'] = os.path.getsize(zip_path)
zip_file.close()
return response
Finally in urls.py:
url(r'^download/$', views.download_zip, name='download_zip'),

Related

Downloading files by crawling sub-URLs in python

I am trying to download documents (mainly in pdf) from a large number of web links like the following:
https://projects.worldbank.org/en/projects-operations/document-detail/P167897?type=projects
https://projects.worldbank.org/en/projects-operations/document-detail/P173997?type=projects
https://projects.worldbank.org/en/projects-operations/document-detail/P166309?type=projects
However, the pdf files are not directly accessible from these links. One needs to click on sub-URLs to access the pdfs. Is there any way to crawl the sub-URLs and download all the related files from them? I am trying it with the following codes but have not had any success so far specifically for these URLs listed here.
Please let me know if you need any further clarifications. I would be happy to do so. Thank you.
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils
class MySpider(Spider):
name = 'download_pdf'
allowed_domains = ["www.worldbank.org"]
start_urls = [
"https://projects.worldbank.org/en/projects-operations/document-detail/P167897?type=projects",
"https://projects.worldbank.org/en/projects-operations/document-detail/P173997?type=projects",
"https://projects.worldbank.org/en/projects-operations/document-detail/P166309?type=projects"
] # Entry page
def afterResponse(self, response, url, error=None, extra=None):
if not extra:
print ("The version of library simplified_scrapy is too old, please update.")
SimplifiedMain.setRunFlag(False)
return
try:
path = './pdfs'
# create folder start
srcUrl = extra.get('srcUrl')
if srcUrl:
index = srcUrl.find('year/')
year = ''
if index > 0:
year = srcUrl[index + 5:]
index = year.find('?')
if index>0:
path = path + year[:index]
utils.createDir(path)
# create folder end
path = path + url[url.rindex('/'):]
index = path.find('?')
if index > 0: path = path[:index]
flag = utils.saveResponseAsFile(response, path, fileType="pdf")
if flag:
return None
else: # If it's not a pdf, leave it to the frame
return Spider.afterResponse(self, response, url, error, extra)
except Exception as err:
print(err)
def extract(self, url, html, models, modelNames):
doc = SimplifiedDoc(html)
lst = doc.selects('div.list >a').contains("documents/", attr="href")
if not lst:
lst = doc.selects('div.hidden-md hidden-lg >a')
urls = []
for a in lst:
a["url"] = utils.absoluteUrl(url.url, a["href"])
# Set root url start
a["srcUrl"] = url.get('srcUrl')
if not a['srcUrl']:
a["srcUrl"] = url.url
# Set root url end
urls.append(a)
return {"Urls": urls}
# Download again by resetting the URL. Called when you want to download again.
def resetUrl(self):
Spider.clearUrl(self)
Spider.resetUrlsTest(self)
SimplifiedMain.startThread(MySpider()) # Start download
There's an API endpoint that contains the entire response you see on the web-site along with... the URL to the document pdf. :D
So, you can query the API, get the URLS, and finally fetch the documents.
Here's how:
import requests
pids = ["P167897", "P173997", "P166309"]
for pid in pids:
end_point = f"https://search.worldbank.org/api/v2/wds?" \
f"format=json&includepublicdocs=1&" \
f"fl=docna,lang,docty,repnb,docdt,doc_authr,available_in&" \
f"os=0&rows=20&proid={pid}&apilang=en"
documents = requests.get(end_point).json()["documents"]
for document_data in documents.values():
try:
pdf_url = document_data["pdfurl"]
print(f"Fetching: {pdf_url}")
with open(pdf_url.rsplit("/")[-1], "wb") as pdf:
pdf.write(requests.get(pdf_url).content)
except KeyError:
continue
Output: (fully downloaded .pdf files)
Fetching: http://documents.worldbank.org/curated/en/106981614570591392/pdf/Official-Documents-Grant-Agreement-for-Additional-Financing-Grant-TF0B4694.pdf
Fetching: http://documents.worldbank.org/curated/en/331341614570579132/pdf/Official-Documents-First-Restatement-to-the-Disbursement-Letter-for-Grant-D6810-SL-and-for-Additional-Financing-Grant-TF0B4694.pdf
Fetching: http://documents.worldbank.org/curated/en/387211614570564353/pdf/Official-Documents-Amendment-to-the-Financing-Agreement-for-Grant-D6810-SL.pdf
Fetching: http://documents.worldbank.org/curated/en/799541612993594209/pdf/Sierra-Leone-AFRICA-WEST-P167897-Sierra-Leone-Free-Education-Project-Procurement-Plan.pdf
Fetching: http://documents.worldbank.org/curated/en/310641612199201329/pdf/Disclosable-Version-of-the-ISR-Sierra-Leone-Free-Education-Project-P167897-Sequence-No-02.pdf
and more ...

Why are the changes I am making within my code not writing to thew new file I have created?

I am currently working with several XML files that require the text of the element mods:namePart changed. I have created a script that should loop through all the XML files I have specified in a particular directory and make the intended changes. However, when I run the script the changes are not reflected in the new files. It executes as expected, and I even get the "namepart changed" output in my console, but the text I want to replace remains the same. PLEASE HELP!! I am extremely new to coding so any tips/comments are welcome. Here is the code I'm using:
list_of_files = glob.glob('/Users/#####/Documents/test_xml_files/*.xml')
for file in list_of_files: xmlObject = ET.parse(file)
root = xmlObject.getroot()
namespaces = {'mods':'http://www.loc.gov/mods/v3'}
for namePart in root.iterfind('mods:name/mods:namePart', namespaces):
if namePart.text == 'Tsukioka, Kōgyo, 1869-1927':
new_namePart = namePart.text.replace('Tsukioka, Kōgyo, 1869-1927', 'Tsukioka Kōgyo, 1869-1927', 1)
namePart.text == new_namePart
print('namepart changed')
else:
continue
nf = open(os.path.join('/Users/####/Documents/updated_test_directory', os.path.basename(file)), 'wb')
xmlString = ET.tostring(root, encoding="utf-8", method="xml", xml_declaration=None)
nf.write(xmlString)
nf.close()

RETR downloading zip File from ftp not writing

I am trying to donwload a huge zip file (~9Go zipped and ~130GO unzipped) from an FTP with python using the ftplib library but unfortunately when using the retrbinary method, it does create the file in my local diretory but it is not writing into the file. After a while the code runs, I get an timeout error. It used to work fine before, but when I tried to go deeper in the use of sockets by using this code it does not work anymore. Indeed, as the files I am trying to download are huge I want to have more control with the connection to prevent timeout error while downloading the files. I am not very familar with sockets so I may have misused it. I have been searching online but did not find any problems like this. (I tried with smaller files too for test but still have the same issues)
Here are the function that I tried but both have problems (method 1 is not writing to file, method 2 donwloads file but I can't unzip it)
import time
import socket
import ftplib
import threading
# To complete
filename = ''
local_folder = ''
ftp_folder = ''
host = ''
user = ''
mp = ''
# timeout error in method 1
def downloadFile_method_1(filename, local_folder, ftp_folder, host, user, mp):
try:
ftp = ftplib.FTP(host, user, mp, timeout=1600)
ftp.set_debuglevel(2)
except ftplib.error_perm as error:
print(error)
with open(local_folder + '/' + filename, "wb") as f:
ftp.retrbinary("RETR" + ftp_folder + '/' + filename, f.write)
# method 2 works to download zip file, but header error when unziping it
def downloadFile_method_2(filename, local_folder, ftp_folder, host, user, mp):
try:
ftp = ftplib.FTP(host, user, mp, timeout=1600)
ftp.set_debuglevel(2)
sock = ftp.transfercmd('RETR ' + ftp_folder + '/' + filename)
except ftplib.error_perm as error:
print(error)
def background():
f = open(local_folder + '/' + filename, 'wb')
while True:
block = sock.recv(1024*1024)
if not block:
break
f.write(block)
sock.close()
t = threading.Thread(target=background)
t.start()
while t.is_alive():
t.join(60)
ftp.voidcmd('NOOP')
def unzip_file(filename, local_folder):
local_filename = local_folder + '/' + filename
with ZipFile(local_filename, 'r') as zipObj:
zipObj.extractall(local_folder)
And the error I get for method 1:
ftplib.error_temp: 421 Timeout - try typing a little faster next time
And the error I get when I try to unzip after using method 2:
zipfile.BadZipFile: Bad magic number for file header
Alos, regarding this code If anyone could explain what this does concerning socketopt too would be helpful:
ftp.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)
ftp.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 60)
Thanks for your help.

python download file into memory and handle broken links

I'm using the following code to download a file into memory :
if 'login_before_download' in obj.keys():
with requests.Session() as session:
session.post(obj['login_before_download'], verify=False)
request = session.get(obj['download_link'], allow_redirects=True)
else:
request = requests.get(obj['download_link'], allow_redirects=True)
print("downloaded {} into memory".format(obj[download_link_key]))
file_content = request.content
obj is a dict that contains the download_link and another key that indicates if I need to login to the page to create a cookie.
The problem with my code is that if the url is broken and there isnt any file to download I'm still getting the html content of the page instead of identifying that the download failed.
Is there any way to identify that the file wasnt downloaded ?
I found the following solution in this url :
import requests
def is_downloadable(url):
"""
Does the url contain a downloadable resource
"""
h = requests.head(url, allow_redirects=True)
header = h.headers
content_type = header.get('content-type')
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True
print is_downloadable('https://www.youtube.com/watch?v=9bZkp7q19f0')
# >> False
print is_downloadable('http://google.com/favicon.ico')
# >> True

Selenium (Python) - waiting for a download process to complete using Chrome web driver

I'm using selenium and python via chromewebdriver (windows) in order to automate a task of downloading large amount of files from different pages.
My code works, but the solution is far from ideal: the function below clicks on the website button that initiating a java script function that generating a PDF file and then downloading it.
I had to use a static wait in order to wait for the download to be completed (ugly) I cannot check the file system in order to verify when the download is completed since i'm using multi threading (downloading lot's of files from different pages at once) and also the the name of the files is generated dynamically in the website itself.
My code:
def file_download(num, drivervar):
Counter += 1
try:
drivervar.get(url[num])
download_button = WebDriverWait(drivervar, 20).until(EC.element_to_be_clickable((By.ID, 'download button ID')))
download_button.click()
time.sleep(10)
except TimeoutException: # Retry once
print('Timeout in thread number: ' + str(num) + ', retrying...')
.....
Is it possible to determine download completion in webdriver? I want to avoid using time.sleep(x).
Thanks a lot.
You can get the status of each download by visiting chrome://downloads/ with the driver.
To wait for all the downloads to finish and to list all the paths:
def every_downloads_chrome(driver):
if not driver.current_url.startswith("chrome://downloads"):
driver.get("chrome://downloads/")
return driver.execute_script("""
var items = document.querySelector('downloads-manager')
.shadowRoot.getElementById('downloadsList').items;
if (items.every(e => e.state === "COMPLETE"))
return items.map(e => e.fileUrl || e.file_url);
""")
# waits for all the files to be completed and returns the paths
paths = WebDriverWait(driver, 120, 1).until(every_downloads_chrome)
print(paths)
Was updated to support changes till version 81.
I have had the same problem and found a solution. You can check weither or not a .crdownload is in your download folder. If there are 0 instances of a file with .crdownload extension in the download folder then all your downloads are completed. This only works for chrome and chromium i think.
def downloads_done():
while True:
for filename in os.listdir("/downloads"):
if ".crdownload" in i:
time.sleep(0.5)
downloads_done()
Whenever you call downloads_done() it will loop itself untill all downloads are completed. If you are downloading massive files like 80 gigabytes then i don't recommend this because then the function can reach maximum recursion depth.
2020 edit:
def wait_for_downloads():
print("Waiting for downloads", end="")
while any([filename.endswith(".crdownload") for filename in
os.listdir("/downloads")]):
time.sleep(2)
print(".", end="")
print("done!")
The "end" keyword argument in print() usually holds a newline but we replace it.
While there are no filenames in the /downloads folder that end with .crdownload
sleep for 2 seconds and print one dot without newline to console
I don't really recommend using selenium anymore after finding out about requests but if it's a very heavily guarded site with cloudflare and captchas etc then you might have to resort to selenium.
With Chrome 80, I had to change the answer from #florent-b by the code below:
def every_downloads_chrome(driver):
if not driver.current_url.startswith("chrome://downloads"):
driver.get("chrome://downloads/")
return driver.execute_script("""
return document.querySelector('downloads-manager')
.shadowRoot.querySelector('#downloadsList')
.items.filter(e => e.state === 'COMPLETE')
.map(e => e.filePath || e.file_path || e.fileUrl || e.file_url);
""")
I believe this is retro-compatible, I mean this shall be working with older versions of Chrome.
There are issues with opening chrome://downloads/ when running Chrome in headless mode.
The following function uses a composite approach that works whether the mode is headless or not, choosing the better approach available in each mode.
It assumes that the caller clears all files downloaded at file_download_path after each call to this function.
import os
import logging
from selenium.webdriver.support.ui import WebDriverWait
def wait_for_downloads(driver, file_download_path, headless=False, num_files=1):
max_delay = 60
interval_delay = 0.5
if headless:
total_delay = 0
done = False
while not done and total_delay < max_delay:
files = os.listdir(file_download_path)
# Remove system files if present: Mac adds the .DS_Store file
if '.DS_Store' in files:
files.remove('.DS_Store')
if len(files) == num_files and not [f for f in files if f.endswith('.crdownload')]:
done = True
else:
total_delay += interval_delay
time.sleep(interval_delay)
if not done:
logging.error("File(s) couldn't be downloaded")
else:
def all_downloads_completed(driver, num_files):
return driver.execute_script("""
var items = document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList').items;
var i;
var done = false;
var count = 0;
for (i = 0; i < items.length; i++) {
if (items[i].state === 'COMPLETE') {count++;}
}
if (count === %d) {done = true;}
return done;
""" % (num_files))
driver.execute_script("window.open();")
driver.switch_to_window(driver.window_handles[1])
driver.get('chrome://downloads/')
# Wait for downloads to complete
WebDriverWait(driver, max_delay, interval_delay).until(lambda d: all_downloads_completed(d, num_files))
# Clear all downloads from chrome://downloads/
driver.execute_script("""
document.querySelector('downloads-manager').shadowRoot
.querySelector('#toolbar').shadowRoot
.querySelector('#moreActionsMenu')
.querySelector('button.clear-all').click()
""")
driver.close()
driver.switch_to_window(driver.window_handles[0])
import os
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
class MySeleniumTests(unittest.TestCase):
selenium = None
#classmethod
def setUpClass(cls):
cls.selenium = webdriver.Firefox(...)
...
def test_download(self):
os.chdir(self.download_path) # default download directory
# click the button
self.selenium.get(...)
self.selenium.find_element_by_xpath(...).click()
# waiting server for finishing inner task
def download_begin(driver):
if len(os.listdir()) == 0:
time.sleep(0.5)
return False
else:
return True
WebDriverWait(self.selenium, 120).until(download_begin) # the max wating time is 120s
# waiting server for finishing sending.
# if size of directory is changing,wait
def download_complete(driver):
sum_before=-1
sum_after=sum([os.stat(file).st_size for file in os.listdir()])
while sum_before != sum_after:
time.sleep(0.2)
sum_before = sum_after
sum_after = sum([os.stat(file).st_size for file in os.listdir()])
return True
WebDriverWait(self.selenium, 120).until(download_complete) # the max wating time is 120s
You must do these thing
Wait for server to finish inner business( for example, query from database).
Wait for server to finish sending the files.
(my English is not very well)
To obtain the return of more than one item, I had to change the answer of #thdox by the code below:
def every_downloads_chrome(driver):
if not driver.current_url.startswith("chrome://downloads"):
driver.get("chrome://downloads/")
return driver.execute_script("""
var elements = document.querySelector('downloads-manager')
.shadowRoot.querySelector('#downloadsList')
.items
if (elements.every(e => e.state === 'COMPLETE'))
return elements.map(e => e.filePath || e.file_path || e.fileUrl || e.file_url);
""")
This may not work for all usecases but for my simple need to wait for one pdf to download it works great. Based off of Walter's comment above.
def get_non_temp_len(download_dir):
non_temp_files = [i for i in os.listdir(download_dir) if not (i.endswith('.tmp') or i.endswith('.crdownload'))]
return len(non_temp_files)
download_dir = 'your/download/dir'
original_count = get_non_temp_len(download_dir) # get the file count at the start
# do your selenium stuff
while original_count == get_non_temp_len(download_dir):
time.sleep(.5) # wait for file count to change
driver.quit()
I had the same problem and this method worked for me.
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementClickInterceptedException
from threading import Thread
import os
import datetime
def checkFilePresence(downloadPath, numberOfFilesInitially, artistName,
songTitle):
timeNow = datetime.datetime.now()
found = False
while not found:
numberOfFilesNow = len(os.listdir(downloadPath))
if numberOfFilesNow > numberOfFilesInitially:
for folders, subfolders, files in os.walk(downloadPath):
for file in files:
modificationTime = datetime.datetime.fromtimestamp\
(os.path.getctime(os.path.join(folders, file)))
if modificationTime > timeNow:
if file.endswith('.mp3'):
return
This code work in headless mode and return downloaded file name (based on
#protonum code):
def wait_for_downloads(download_path):
max_delay = 30
interval_delay = 0.5
total_delay = 0
file = ''
done = False
while not done and total_delay < max_delay:
files = [f for f in os.listdir(download_path) if f.endswith('.crdownload')]
if not files and len(file) > 1:
done = True
if files:
file = files[0]
time.sleep(interval_delay)
total_delay += interval_delay
if not done:
logging.error("File(s) couldn't be downloaded")
return download_path + '/' + file.replace(".crdownload", "")
def wait_for_download_to_be_don(self, path_to_folder, file_name):
max_time = 60
counter = 0
while not os.path.exists(path_to_folder + file_name) and time_counter < max_time:
sleep(0.5)
time_counter += 0.5
if time_counter == max_time:
assert os.path.exists(path_to_folder + file_name), "The file wasn't downloaded"
When using test automation, its crucial that developers make the software testable. It is your job to check the software combined with the testability, meaning that you need to request a spinner or a simple HTML tag which indicates when the download is done successfully.
In a case as yours, where you cannot check it in the UI and you cannot check in system, this is the best way to solve it.

Resources