Get files pictures with threads and queue in a particular website - python-3.x

I'm trying to create a simple program in python3 with threads and queue to concurrent download images from URL links by using 4 or more threads to download 4 images at the same time and download said images in the downloads folder in the PC while avoiding duplicates by sharing the information between threads.
I suppose I could use something like URL1= “Link1”?
Here are some examples of links.
“https://unab-dw2018.s3.amazonaws.com/ldp2019/1.jpeg”
“https://unab-dw2018.s3.amazonaws.com/ldp2019/2.jpeg”
But I don't understand how to use threads with queue and I'm lost on how to do this.
I have tried searching for some page that can explain how to use threads with queue to concurrent download I have only found links for threads only.
Here is a code that it works partially.
What i need is that the program ask how many threads you want and then download images until it reaches image 20, but on the code if input 5, it will only download 5 images and so on. The thing is that if i put 5, it will download 5 images first, then the following 5 and so on until 20. if its 4 images then 4, 4, 4, 4, 4. if its 6 then it will go 6,6,6 and then download the remaining 2.
Somehow i must implement queue on the code but i just learn threads a few days ago and im lost on how to mix threads and queue together.
import threading
import urllib.request
import queue # i need to use this somehow
def worker(cont):
print("The worker is ON",cont)
image_download = "URL"+str(cont)+".jpeg"
download = urllib.request.urlopen(image_download)
file_save = open("Image "+str(cont)+".jpeg", "wb")
file_save.write(download.read())
file_save.close()
return cont+1
threads = []
q_threads = int(input("Choose input amount of threads between 4 and 20"))
for i in range(0, q_threads):
h = threading.Thread(target=worker, args=(i+1, int))
threads.append(h)
for i in range(0, q_threads):
threads[i].start()

I adapted the following from some code I used to perform multi threaded PSO
import threading
import queue
if __name__ == "__main__":
picture_queue = queue.Queue(maxsize=0)
picture_threads = []
picture_urls = ["string.com","string2.com"]
# create and start the threads
for url in picture_urls:
picture_threads.append(picture_getter(url, picture_queue))
picture_threads[i].start()
# wait for threads to finish
for picture_thread in picture_threads:
picture_thread.join()
# get the results
picture_list = []
while not picture_queue.empty():
picture_list.append(picture_queue.get())
class picture_getter(threading.Thread):
def __init__(self, url, picture_queue):
self.url = url
self.picture_queue = picture_queue
super(picture_getter, self).__init__()
def run(self):
print("Starting download on " + str(self.url))
self._get_picture()
def _get_picture(self):
# --- get your picture --- #
self.picture_queue.put(picture)
Just so you know, people on stackoverflow like to see what you have tried first before providing a solution. However I have this code lying around anyway. Welcome aboard fellow newbie!
One thing I will add is that this does not avoid duplication by sharing information between threads. It avoids duplication as each thread is told what to download. If your filenames are numbered as they appear to be in your question this shouldn't be a problem as you can easily build a list of these.
Updated code to solve the edits to Treyons original post
import threading
import urllib.request
import queue
import time
class picture_getter(threading.Thread):
def __init__(self, url, file_name, picture_queue):
self.url = url
self.file_name = file_name
self.picture_queue = picture_queue
super(picture_getter, self).__init__()
def run(self):
print("Starting download on " + str(self.url))
self._get_picture()
def _get_picture(self):
print("{}: Simulating delay".format(self.file_name))
time.sleep(1)
# download and save image
download = urllib.request.urlopen(self.url)
file_save = open("Image " + self.file_name, "wb")
file_save.write(download.read())
file_save.close()
self.picture_queue.put("Image " + self.file_name)
def remainder_or_max_threads(num_pictures, num_threads, iterations):
# remaining pictures
remainder = num_pictures - (num_threads * iterations)
# if there are equal or more pictures remaining than max threads
# return max threads, otherwise remaining number of pictures
if remainder >= num_threads:
return max_threads
else:
return remainder
if __name__ == "__main__":
# store the response from the threads
picture_queue = queue.Queue(maxsize=0)
picture_threads = []
num_pictures = 20
url_prefix = "https://unab-dw2018.s3.amazonaws.com/ldp2019/"
picture_names = ["{}.jpeg".format(i+1) for i in range(num_pictures)]
max_threads = int(input("Choose input amount of threads between 4 and 20: "))
iterations = 0
# during the majority of runtime iterations * max threads is
# the number of pictures that have been downloaded
# when it exceeds num_pictures all pictures have been downloaded
while iterations * max_threads < num_pictures:
# this returns max_threads if there are max_threads or more pictures left to download
# else it will return the number of remaining pictures
threads = remainder_or_max_threads(num_pictures, max_threads, iterations)
# loop through the next section of pictures, create and start their threads
for name, i in zip(picture_names[iterations * max_threads:], range(threads)):
picture_threads.append(picture_getter(url_prefix + name, name, picture_queue))
picture_threads[i + iterations * max_threads].start()
# wait for threads to finish
for picture_thread in picture_threads:
picture_thread.join()
# increment the iterations
iterations += 1
# get the results
picture_list = []
while not picture_queue.empty():
picture_list.append(picture_queue.get())
print("Successfully downloaded")
print(picture_list)

Related

How do I process several lists at once?

I have a big list of numbers. I want to split that big list of numbers into x number of lists and process them in parallel.
Here's the code that I have so far:
from multiprocessing import Pool
import numpy
def processNumList(numList):
for num in numList:
outputList.append(num ** 2)
numThreads = 5
bigNumList = list(range(50))
splitNumLists = numpy.array_split(bigNumList, numThreads)
outputList = []
for numList in splitNumLists:
processNumList(numList)
print(outputList)
The above code does the following:
Splits a big list of numbers into the specified number of smaller lists
Passes each of those lists to the processNumList function
Prints the result list afterwards
Everything there works as expected, but it only processes one list at a time. I want every list to be processed simultaneously.
What is the proper code to do that? I experimented with pool but could never seem to get it working.
You could try something like this:
import threading
class MyClass(threading.Thread):
def __init__(self):
# init stuff
def run(self, arg, arg2):
# your logic to process the list
# split the list as you already did
for _ in range(numThreads):
MyThread(arg, arg2).start()
Here's the code I ended up using.
I used threading.Thread() to process the lists asynchronously and then called thread.join() to ensure that all of the threads were finished before moving on.
I added time.sleep for demonstration purposes (to simulate a lengthy task), but obviously you wouldn't want to use that in production code.
import numpy
import threading
import time
def process_num_list(numList):
for num in numList:
output_list.append(num ** 2)
time.sleep(1)
num_threads = 5
big_num_list = list(range(30))
split_num_lists = numpy.array_split(big_num_list, num_threads)
output_list = []
threads = []
for num_list in split_num_lists:
thread = threading.Thread(target=process_num_list, args=[num_list])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(output_list)
As a bonus, here's a working example of five Selenium windows:
from selenium import webdriver
import numpy
import threading
import time
def scrapeSites(siteList):
print("Preparing to scrape " + str(len(siteList)) + " sites")
driver = webdriver.Chrome(executable_path = r"..\chromedriver.exe")
driver.set_window_size(700, 400)
for site in siteList:
print("\nNow scraping " + site)
driver.get(site)
pageTitles.append(driver.title)
driver.quit()
numThreads = 5
fullWebsiteList = ["https://en.wikipedia.org/wiki/Special:Random"] * 30
splitWebsiteLists = numpy.array_split(fullWebsiteList, numThreads)
pageTitles = []
threads = []
for websiteList in splitWebsiteLists:
thread = threading.Thread(target=scrapeSites, args=[websiteList])
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
print(pageTitles)

Muti-core parallel computing over a for loop in python-3.x

I have a simple for loop which is to print a number from 1 to 9999 with 5 seconds sleep in between. The code is as below:
import time
def run():
length = 10000
for i in range(1, length):
print(i)
time.sleep(5)
run()
I want to apply multiprocessing to run the for loop concurrently with multi-cores. So I amended the code above to take 5 cores:
import multiprocessing as mp
import time
def run():
length = 10000
for i in range(1, length):
print(i)
time.sleep(5)
if __name__ == '__main__':
p = mp.Pool(5)
p.map(run())
p.close()
There is no issue in running the job but it seems like it is not running in parallel with 5 cores. How could I get the code worked as expected?
First, you are running the same 1..9999 loop 5 times, and second, you are executing the run function instead of passing it to the .map() method.
You must prepare your queue before passing it to the Pool instance so that all 5 workers process the same queue:
import multiprocessing as mp
import time
def run(i):
print(i)
time.sleep(5)
if __name__ == '__main__':
length = 10000
queue = range(1, length)
p = mp.Pool(5)
p.map(run, queue)
p.close()
Note that it will process the numbers out of order as explained in the documentation. For example, worker #1 will process 1..500, worker #2 will process 501..1000 etc:
This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.
If you want to process the numbers more similarly to the single threaded version, set chunksize to 1:
p.map(run, queue, 1)

Processing huge CSV file using Python and multithreading

I have a function that yields lines from a huge CSV file lazily:
def get_next_line():
with open(sample_csv,'r') as f:
for line in f:
yield line
def do_long_operation(row):
print('Do some operation that takes a long time')
I need to use threads such that each record I get from the above function I can call do_long_operation.
Most places on Internet have examples like this, and I am not very sure if I am on the right path.
import threading
thread_list = []
for i in range(8):
t = threading.Thread(target=do_long_operation, args=(get_next_row from get_next_line))
thread_list.append(t)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
My questions are:
How do I start only a finite number of threads, say 8?
How do I make sure that each of the threads will get a row from get_next_line?
You could use a thread pool from multiprocessing and map your tasks to a pool of workers:
from multiprocessing.pool import ThreadPool as Pool
# from multiprocessing import Pool
from random import randint
from time import sleep
def process_line(l):
print l, "started"
sleep(randint(0, 3))
print l, "done"
def get_next_line():
with open("sample.csv", 'r') as f:
for line in f:
yield line
f = get_next_line()
t = Pool(processes=8)
for i in f:
t.map(process_line, (i,))
t.close()
t.join()
This will create eight workers and submit your lines to them, one by one. As soon as a process is "free", it will be allocated a new task.
There is a commented out import statement, too. If you comment out the ThreadPool and import Pool from multiprocessing instead, you will get subprocesses instead of threads, which may be more efficient in your case.
Using a Pool/ThreadPool from multiprocessing to map tasks to a pool of workers and a Queue to control how many tasks are held in memory (so we don't read too far ahead into the huge CSV file if worker processes are slow):
from multiprocessing.pool import ThreadPool as Pool
# from multiprocessing import Pool
from random import randint
import time, os
from multiprocessing import Queue
def process_line(l):
print("{} started".format(l))
time.sleep(randint(0, 3))
print("{} done".format(l))
def get_next_line():
with open(sample_csv, 'r') as f:
for line in f:
yield line
# use for testing
# def get_next_line():
# for i in range(100):
# print('yielding {}'.format(i))
# yield i
def worker_main(queue):
print("{} working".format(os.getpid()))
while True:
# Get item from queue, block until one is available
item = queue.get(True)
if item == None:
# Shutdown this worker and requeue the item so other workers can shutdown as well
queue.put(None)
break
else:
# Process item
process_line(item)
print("{} done working".format(os.getpid()))
f = get_next_line()
# Use a multiprocessing queue with maxsize
q = Queue(maxsize=5)
# Start workers to process queue items
t = Pool(processes=8, initializer=worker_main, initargs=(q,))
# Enqueue items. This blocks if the queue is full.
for l in f:
q.put(l)
# Enqueue the shutdown message (i.e. None)
q.put(None)
# We need to first close the pool before joining
t.close()
t.join()
Hannu's answer is not the best method.
I ran the code on a 100M rows CSV file. It took me forever to perform the operation.
However, prior to reading his answer, I had written the following code:
def call_processing_rows_pickably(row):
process_row(row)
import csv
from multiprocessing import Pool
import time
import datetime
def process_row(row):
row_to_be_printed = str(row)+str("hola!")
print(row_to_be_printed)
class process_csv():
def __init__(self, file_name):
self.file_name = file_name
def get_row_count(self):
with open(self.file_name) as f:
for i, l in enumerate(f):
pass
self.row_count = i
def select_chunk_size(self):
if(self.row_count>10000000):
self.chunk_size = 100000
return
if(self.row_count>5000000):
self.chunk_size = 50000
return
self.chunk_size = 10000
return
def process_rows(self):
list_de_rows = []
count = 0
with open(self.file_name, 'rb') as file:
reader = csv.reader(file)
for row in reader:
print(count+1)
list_de_rows.append(row)
if(len(list_de_rows) == self.chunk_size):
p.map(call_processing_rows_pickably, list_de_rows)
del list_de_rows[:]
def start_process(self):
self.get_row_count()
self.select_chunk_size()
self.process_rows()
initial = datetime.datetime.now()
p = Pool(4)
ob = process_csv("100M_primes.csv")
ob.start_process()
final = datetime.datetime.now()
print(final-initial)
This took 22 minutes. Obviously, I need to have more improvements. For example, the Fred library in R takes 10 minutes maximum to do this task.
The difference is: I am creating a chunk of 100k rows first, and then I pass it to a function which is mapped by threadpool(here, 4 threads).

Threads not picking up more work from Queue

I'm pretty much brand new to python and I have been working on a script that parses the csv files in any given directory. After I implemented a queue and threads, I've been stuck on this issue of the threads not picking up new work, even though there are still items in the queue. For example, if I specify the max # of threads as 3, and there are 6 items in the queue, the threads pick up 3 files, process them, then hang, indefinitely. I may just be conceptually misunderstanding the multithreading process.
ETA:
Some of the code has been removed for security reasons.
q = Queue.Queue()
threads = []
for file in os.listdir(os.chdir(arguments.path)):
if (file.endswith('.csv')):
q.put(file)
for i in range(max_threads):
worker = threading.Thread(target=process, name='worker-{}'.format(thread_count))
worker.setDaemon(True)
worker.start()
threads.append(worker)
thread_count += 1
q.join()
def process():
with open(q.get()) as csvfile:
#do stuff
q.task_done()
You forgot a to loop over the Queue in your threads...
def process():
while True: #<---------------- keep getting stuff from the queue
with open(q.get()) as csvfile:
#do stuff
q.task_done()
That said, You are maybe re-inventing the wheel, try using a Thread Pool:
from concurrent.futures import ThreadPoolExecutor
l = [] # a list should do it ...
for file in os.listdir(arguments.path):
if (file.endswith('.csv')):
l.append(file)
def process(file):
return "this is the file i got %s" % file
with ThreadPoolExecutor(max_workers=4) as e:
results = list(e.map(process, l))

python 3 multithreading output to CSV file (blank)

I am new to python, i have got this multithreading working from a tutorial i ran across.
Unsure if it is goo practice or not.
What i want to archive:
pings the list of hostnames and returns up or down.
writes results to csv file
What this file currently does is:
pings the list of hostnames and returns up or down.
the csv file it creates is empty and doesnt appear to write any results to it.
I have done some testing and found that with the pings multithreadin over serial code is approx 16 times faster for me.
I am doing massive amounts of pings approx 9000 and want them returned asap.
Can you please let me know where i have gone wrong with the csv part.
import threading
from queue import Queue
import time
import subprocess as sp
import csv
# lock to serialize console output
lock = threading.Lock()
def do_work(item):
#time.sleep(1) # pretend to do some lengthy work.
# Make sure the whole print completes or threads can mix up output in one line.
status,result = sp.getstatusoutput("ping -n 3 " + str(item))
if status == 0:
result = 'Up'
else:
result = 'Down'
with lock:
output.writerow({'hostname': item,'status': result})
array.append({'hostname': item,'status': result})
print(threading.current_thread().name,item,result)
# The worker thread pulls an item from the queue and processes it
def worker():
while True:
item = q.get()
do_work(item)
q.task_done()
# Create the queue and thread pool.
q = Queue()
for i in range(100):
t = threading.Thread(target=worker)
t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t.start()
array = []
# stuff work items on the queue (in this case, just a number).
start = time.perf_counter()
headers = ['status','hostname']
output = csv.DictWriter(open('host-export.csv','w'), delimiter=',', lineterminator='\n', fieldnames=headers)
output.writeheader()
txt = open("hosts.txt", 'r', encoding="utf8")
for line in txt:
q.put(line,array)
q.join() # block until all tasks are done
# "Work" took .1 seconds per task.
# 20 tasks serially would be 2 seconds.
# With 4 threads should be about .5 seconds (contrived because non-CPU intensive "work")
print(array)
print('time:',time.perf_counter() - start)
I also added bulk writing for the csv thinking maybe i just cant access the csv object in the function but that also didnt work as below.
headers = ['status','hostname']
output = csv.DictWriter(open('host-export.csv','w'), delimiter=',', lineterminator='\n', fieldnames=headers)
output.writeheader()
output.writerows(array)
I fiugured out what i have done wrong.
I didnt close the file connection so it didnt write to the file.
here is the code i am using now to site my csv file.
fieldnames = ['ip', 'dns', 'pings'] #headings
test_file = open('test2-p.csv','w', newline='') #open file
csvwriter = csv.DictWriter(test_file, delimiter=',', fieldnames=fieldnames) #set csv writing settings
csvwriter.writeheader() #write csv headings
for row in rows: #write to csv file
csvwriter.writerow(row)
test_file.close()

Resources