Jython threading with thread -> queue -> thread - multithreading

I'm running Jython 2.5.3 on Ubuntu 12.04 with the OpenJDK 64-bit 1.7.0_55 JVM.
I'm trying to create a simple threaded application to optimize data processing and loading. I have populator threads that read records from a database and mangles them a bit before putting them onto a queue. The queue is read by consumer threads that store the data in a different database. Here is the outline of my code:
import sys
import time
import threading
import Queue
class PopulatorThread(threading.Thread):
def __init__(self, mod, mods, queue):
super(PopulatorThread, self).__init__()
self.mod = mod
self.mods = mods
self.queue = queue
def run(self):
# Create db connection
# ...
try:
# Select one segment of records using 'id % mods = mod'
# Process these records & slap them onto the queue
# ...
except:
con.rollback()
raise
finally:
print "Made it to 'finally' in populator %d" % self.mod
con.close()
class ConsumerThread(threading.Thread):
def __init__(self, mod, queue):
super(ConsumerThread, self).__init__()
self.mod = mod
self.queue = queue
def run(self):
# Create db connection
# ...
try:
while True:
item = queue.get()
if not item: break
# Put records from the queue into
# a different database
# ...
queue.task_done()
except:
con.rollback()
raise
finally:
print "Made it to 'finally' in consumer %d" % self.mod
con.close()
def main(argv):
tread1Count = 3
tread2Count = 4
# This is the notefactsselector data queue
nfsQueue = Queue.Queue()
# Start consumer/writer threads
j = 0
treads2 = []
while j < tread2Count:
treads2.append(ConsumerThread(j, nfsQueue))
treads2[-1].start()
j += 1
# Start reader/populator threads
i = 0
treads1 = []
while i < tread1Count:
treads1.append(PopulatorThread(i, tread1Count, nfsQueue))
treads1[-1].start()
i += 1
# Wait for reader/populator threads
print "Waiting to join %d populator threads" % len(treads1)
i = 0
for tread in treads1:
print "Waiting to join a populator thread %d" % i
tread.join()
i += 1
#Add one sentinel value to queue for each write thread
print "Adding sentinel values to end of queue"
for tread in treads2:
nfsQueue.put(None)
# Wait for consumer/writer threads
print "Waiting to join consumer/writer threads"
for tread in treads2:
print "Waiting on a consumer/writer"
tread.join()
# Wait for Queue
print "Waiting to join queue with %d items" % nfsQueue.qsize()
nfsQueue.join()
print "Queue has been joined"
if __name__ == '__main__':
main(sys.argv)
I have simplified the database implementation somewhat to save space.
When I run the code, the populator and consumer threads seem to
reach the end, since I get the "Made it to finally in ..." messages.
I get the "Waiting to join n populator threads" message, and eventually the
"Waiting to join a populator thread n" messages.
I get the "Waiting to join consumer/writer threads" message as well as each of the "Waiting on a consumer/writer" messages I expect.
I get the "Waiting to join queue with 0 items" message I expect, but not the "Queue has been joined" message; apparently the program is blocking while waiting for the queue, and it never terminates.
I suspect I have my thread initializations or thread joinings in the wrong order somehow, but I have little experience with concurrent programming, so my intuitions about how to do things aren't well developed. I find plenty of Python/Jython examples of queues populated by while loops and read by threads, but none so far about queues populated by one set of threads and read by a different set.
The populator and consumer threads appear to finish.
The program seems to be blocking finally waiting for the Queue object to terminate.
Thanks to any who have suggestions and lessons for me!

Are you calling task_done() on each item in the queue when you are done processing it? If you don't tell the queue explicitly that each task is done, it'll never return from join().
PS: You don't see "Waiting to join a populator thread %d" because you forgot the print in front of it :)

Related

Why cannot join the processes after the work is done?

I have a Queue, a Manager and a Lock inside a class. The class has a run function that starts 6 Process and they all wait to an exit_flag to be true in order to end their jobs. However, I cannot "end" the jobs because when I call the join methods on each job, it blocks it. The code looks as follows:
from multiprocessing import Process, Lock, Queue, Manager
class MyClass():
def __init__(self):
self.q = Queue(maxsize=50)
self.lock = Lock()
manager = Manager()
self.manager = manager.dict()
def fill_queue(self,idx):
while not self.exit():
#do something
result,result_type= self.perform_extraction()
if result_type not in self.manager():
self.manager[result_type] = []
while self.q.full() and not self.exit():
sleep(10)
if self.exit():
print('Exit filler')
break
self.lock.acquire()
self.q.put((result,result_type))
self.lock.release()
else:
print(f'queue filler {idx} ended')
def empty_queue(self,idx):
while not self.exit():
if self.q.emtpy():
continue
self.lock.aquire()
result, result_type = self.q.get()
self.lock.release()
result,id = self.perform_test(queue_value)
if result >=0 and result not in self.manager[result_type]:
self.manager[id] += [(result, result_type)]
self.insert_to_database(result, result_type) --> this inserts the value into a sqlite3 ddbb
else:
print(f'worker {idx} ended')
def run(self,n_workers):
jobs = []
for _ in range(2):
p = Process(target = self.fill_queue, args=(_,))
jobs.append(p)
for _ in range(n_workers):
p = Process(target = self.empty_queue, args=(_,))
jobs.append(p)
for job in jobs:
job.start()
for idx,job in enumerate(jobs):
print(f'joining job {idx}')
job.join()
if not job.is_alive():
print(f'closing job {idx}')
job.close()
else:
print(f'job {idx} still alive')
if __name__ == '__main__':
mc = MyClass()
mc.run(n_workers = 4)
print('RUN ENDED!')
The manager is used to communicate between processes and when the criteria is met and I have X amount of elements in the manager, the self.exit() function returns True.
When I run this code, it gets stucked printing joining job 0 and it stays there forever, I dont know why. If I add some timeout and set it to job.join(5) (5 arbitary, no real reason), it prints:
joining job 0
job 0 still alive
joining job 1
job 1 still alive
joining job 2
closing job 2
joining job 3
closing job 3
joining job 4
closing job 4
joining job 5
closing job 5
RUN ENDED!
And the code does not finish. I also tried to do job.terminate() if the job is still alive and this throwed an error telling me that some leaked folders could not be found. Does this means that I have some zombie processes?
Why this is happening? What I'm doing wrong?
EDIT: Added some logic on the interaction with manager(). I'm using the manager to add a couple of types of results and append all results of same type to a list, so the dict structure is something like {result_type:[result_values]} and the reason to use manager is to avoid storing duplicate results and check when I the algorithm mets the exit criteria.
Now, the exit criteria function looks like this:
def exit(self):
for v in self.manager.values():
if len(v) < 10:
return False
return True
So, all processes ends when I have 10 items on each list of each type. There are 3 possible amount of different types, so once each type is added as a manager key, it only needs to be filled and when all 3 have at least 10 values (there could be more), then all the job should end.
EDIT 2: Added some printing information to fill_queue and empty_queue functions. What is printed is:
worker 0 ended
worker 1 ended
queue filler 0 ended
worker 2 ended
queue filler 1 ended
joining job 0 --> *
worker 3 ended
so this usually (like always) prints before all workers print the "ended" statement, but it never joins the first job. This is actually stuck at the first try of calling join method at the for idx,job in enumerate(jobs): cycle.

How to stop child threads if keyboard exception occurs in python?

I'm facing problem with the thread concept i.e I have a function which will create 10 threads to do task. If any key board interruption occurs, those created threads are still executing and i would like to stop those threads and revert back the changes.
The following code sinppet is the sample approach:
def store_to_db(self,keys_size,master_key,action_flag,key_status):
for iteration in range(10):
t = threading.Thread(target=self.store_worker, args=())
t.start()
threads.append(t)
for t in threads:
t.join()
def store_worker():
print "DOING"
The idea to make this work is:
you need a "thread pool" where threads are checking against if their do_run attribute is falsy.
you need a "sentinel thread" outside that pool which checks the thread status in the pool and adjusts the do_run attribute of the "thread pool" thread on demand.
Example code:
import threading
import random
import time
import msvcrt as ms
def main_logic():
# take 10 worker threads
threads = []
for i in range(10):
t = threading.Thread(target=lengthy_process_with_brake, args=(i,))
# start and append
t.start()
threads.append(t)
# start the thread which allows you to stop all threads defined above
s = threading.Thread(target=sentinel, args=(threads,))
s.start()
# join worker threads
for t in threads:
t.join()
def sentinel(threads):
# this one runs until threads defined in "threads" are running or keyboard is pressed
while True:
# number of threads are running
running = [x for x in threads if x.isAlive()]
# if kb is pressed
if ms.kbhit():
# tell threads to stop
for t in running:
t.do_run = False
# if all threads stopped, exit the loop
if not running:
break
# you don't want a high cpu load for nothing
time.sleep(0.05)
def lengthy_process_with_brake(worker_id):
# grab current thread
t = threading.currentThread()
# start msg
print(f"{worker_id} STARTED")
# exit condition
zzz = random.random() * 20
stop_time = time.time() + zzz
# imagine an iteration here like "for item in items:"
while time.time() < stop_time:
# the brake
if not getattr(t, "do_run", True):
print(f"{worker_id} IS ESCAPING")
return
# the task
time.sleep(0.03)
# exit msg
print(f"{worker_id} DONE")
# exit msg
print(f"{worker_id} DONE")
main_logic()
This solution does not 'kill' threads, just tell them to stop iterating or whatever they do.
EDIT:
I just noticed that "Keyboard exception" was in the title and not "any key". Keyboard Exception handling is a bit different, here is a good solution for that. The point is almost the same: you tell the thread to return if a condition is met.

Why is this queue.join call blocking indefinitely?

I'm playing about with a personal project in python3.6 and I've run into the following issue which results in the my_queue.join() call blocking indefinitely. Note this isn't my actual code but a minimal example demonstrating the issue.
import threading
import queue
def foo(stop_event, my_queue):
while not stop_event.is_set():
try:
item = my_queue.get(timeout=0.1)
print(item) #Actual logic goes here
except queue.Empty:
pass
print('DONE')
stop_event = threading.Event()
my_queue = queue.Queue()
thread = threading.Thread(target=foo, args=(stop_event, my_queue))
thread.start()
my_queue.put(1)
my_queue.put(2)
my_queue.put(3)
print('ALL PUT')
my_queue.join()
print('ALL PROCESSED')
stop_event.set()
print('ALL COMPLETE')
I get the following output (it's actually been consistent, but I understand that the output order may differ due to threading):
ALL PUT
1
2
3
No matter how long I wait I never see ALL PROCESSED output to the console, so why is my_queue.join() blocking indefinitely when all the items have been processed?
From the docs:
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
task_done() to indicate that the item was retrieved and all work on it
is complete. When the count of unfinished tasks drops to zero, join()
unblocks.
You're never calling q.task_done() inside your foo function. The foo function should be something like the example:
def worker():
while True:
item = q.get()
if item is None:
break
do_work(item)
q.task_done()

Python asyncio queue doesnt finish

I have one producer and 3 consumers. Each consumer waits to acquire a global lock before it can proceed. The program runs and doesnt finish and come out of the while loop. Could you tell me where it is going wrong?
import asyncio
import random
async def produce(queue, n):
for x in range(1, n + 1):
# produce an item
print('producing {}/{}'.format(x, n))
# simulate i/o operation using sleep
await asyncio.sleep(random.random())
item = str(x)
# put the item in the queue
await queue.put(item)
# indicate the producer is done
await queue.put(None)
async def consume(queue, lock):
while True:
item = await queue.get()
if item is None:
# the producer emits None to indicate that it is done
break
# wait for an item from the producer
async with lock:
# process the item
print('consuming item {}...'.format(item))
# simulate i/o operation using sleep
await asyncio.sleep(0.3)
loop = asyncio.get_event_loop()
lock = asyncio.Lock()
queue = asyncio.Queue(loop=loop)
producer_coro = produce(queue, 10)
consumers = []
for _ in range(3):
consumers.append(consume(queue, lock))
all_coroutines = []
all_coroutines.append(producer_coro)
all_coroutines.extend(consumers)
loop.run_until_complete(asyncio.wait(all_coroutines))
loop.close()
The problem is in the consumer:
if item is None:
# the producer emits None to indicate that it is done
break
The None sentinel is only picked up by a single consumer, and the rest are left waiting for the next value to arrive through the queue. A simple fix is to return the sentinel value back to the queue:
if item is None:
# The producer emits None to indicate that it is done.
# Propagate it to other consumers and quit.
await queue.put(None)
break
Alternatively, produce could enqueue as many None sentinels as there are consumers - but that would require the producer to know how many consumers there are, which is not always desirable.
Adding to the answer #user4815162342 provided, try:
if item is None and queue.qsize() == 0:
await queue.put(None)
break
I had an issue where the consumer also had to queue.put() to the same queue to rerun the function but it hung at the end without the both conditions.

Python Fire dynamic urls using multithreading

I'm new to to Python-Threading, and I've gone through multiple posts but I really did not understand how to use it. However I tried to complete my task, and I want to check if I'm doing it with right approach.
Task is :
Read big CSV containing around 20K records, fetch id from each record and fire an HTTP API call for each record of the CSV.
t1 = time.time()
file_data_obj = csv.DictReader(open(file_path, 'rU'))
threads = []
for record in file_data_obj:
apiurl = https://www.api-server.com?id=record.get("acc_id", "")
thread = threading.Thread(target=requests.get, args=(apiurl,))
thread.start()
threads.append(thread)
t2 = time.time()
for thread in threads:
thread.join()
print("Total time required to process a file - {} Secs".format(t2-t1))
As there are 20K records, would it start 20K threads? OR OS/Python will handle it? If yes, can we restrict it?
How can I collect the response returned by requests.get?
Would t2 - t1 really give mw the time required to process whole file?
As there are 20K records, would it start 20K threads? OR OS/Python will handle it? If yes, can we restrict it?
Yes - it will start a thread for each iteration. The maximum amount of threads is dependent on your OS.
How can I grab the response returned by requests.get?
If you want to use the threading module only, you'll have to make use of a Queue. Threads return None by design, hence you'll have to implement a line of communication between the Thread and you main loop yourself.
from queue import Queue
from threading import Thread
import time
# A thread that produces data
q = Queue()
def return_get(q, apiurl):
q.put(requests.get(apiurl)
for record in file_data_obj:
apiurl = https://www.api-server.com?id=record.get("acc_id", "")
t = threading.Thread(target=return_get, args=(q, apiurl))
t.start()
threads.append(t)
for thread in threads:
thread.join()
while not q.empty:
r = q.get() # Fetches the first item on the queue
print(r.text)
An alternative is to use a worker pool.
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
import urllib.request
threads = []
pool = ThreadPoolExecutor(10)
# Submit work to the pool
for record in file_data_obj:
apiurl = https://www.api-server.com?id=record.get("acc_id", "")
t = pool.submit(fetch_url, 'http://www.python.org')
threads.append(t)
for t in threads:
print(t.result())
You can use ThreadPoolExecutor
Retrieve a single page and report the URL and contents
def load_url(url, timeout):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()
Create pool executor with N workers
with concurrent.futures.ThreadPoolExecutor(max_workers=N_workers) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))

Resources