Python multiprocessing update size of iterable - python-3.x

i have the following:
from multiprocessing import Pool
def process_elements(index_of_data_inputs):
<process>
if <condition>:
# i would like to change the size of data_inputs
if __name__ == '__main__':
pool = Pool() # Create a multiprocessing Pool
pool.map(process_elements, range(0, len(data_inputs)) # process data_inputs iterable with pool
how i can change the size of data_inputs and so change the number of times process_elements
is called?
the work behind that i would like to parallelize is:
i = 0
while i < len(elements):
new_elems = process_some_elements(x,y)
if len(new_elems) > 0:
elements = elements + new_elems
i += 1

Consider simple example of communication between processes with multiprocessing module in Python:
import multiprocessing
import queue
import random
def process_elements(num, comq):
val = random.random()
if val > 0.5:
comq.put(1)
return num, int(1000 * val)
if __name__ == '__main__':
# initial data
numbers = list(range(10))
# data structure fot communication between multiple processes
m = multiprocessing.Manager()
q = m.Queue()
with multiprocessing.Pool(processes=4) as pool:
# get answer for original data
ans = pool.starmap(process_elements, [(num, q) for num in numbers])
print(numbers)
print(ans)
# create additional data based on the answer for initial data
new_numbers = numbers[-1:]
try:
while True:
new_numbers.append(new_numbers[-1] + q.get_nowait())
except queue.Empty:
pass
# get answer for additional data
new_ans = pool.starmap(process_elements, [(num, q) for num in new_numbers[1:]])
print(new_numbers)
print(new_ans)

Related

Why serial code is faster than concurrent.futures in this case?

I am using the following code to process some pictures for my ML project and I would like to parallelize it.
import multiprocessing as mp
import concurrent.futures
def track_ids(seq):
'''The func is so big I can not put it here'''
ood = {}
for i in seq:
# I load around 500 images and process them
ood[i] = some Value
return ood
seqs = []
for seq in range(1, 10):# len(seqs)+1):
seq = txt+str(seq)
seqs.append(seq)
# serial call of the function
track_ids(seq)
#parallel call of the function
with concurrent.futures.ProcessPoolExecutor(max_workers=mp.cpu_count()) as ex:
ood_id = ex.map(track_ids, seqs)
if I run the code serially it takes 3.0 minutes but for parallel with concurrent, it takes 3.5 minutes.
can someone please explain why is that? and present a way to solve the problem.
btw, I have 12 cores.
Thanks
Here's a brief example of how one might go about profiling multiprocessing code vs serial execution:
from multiprocessing import Pool
from cProfile import Profile
from pstats import Stats
import concurrent.futures
def track_ids(seq):
'''The func is so big I can not put it here'''
ood = {}
for i in seq:
# I load around 500 images and process them
ood[i] = some Value
return ood
def profile_seq():
p = Profile() #one and only profiler instance
p.enable()
seqs = []
for seq in range(1, 10):# len(seqs)+1):
seq = txt+str(seq)
seqs.append(seq)
# serial call of the function
track_ids(seq)
p.disable()
return Stats(p), seqs
def track_ids_pr(seq):
p = Profile() #profile the child tasks
p.enable()
retval = track_ids(seq)
p.disable()
return (Stats(p, stream="dummy"), retval)
def profile_parallel():
p = Profile() #profile stuff in the main process
p.enable()
with concurrent.futures.ProcessPoolExecutor(max_workers=mp.cpu_count()) as ex:
retvals = ex.map(track_ids_pr, seqs)
p.disable()
s = Stats(p)
out = []
for ret in retvals:
s.add(ret[0])
out.append(ret[1])
return s, out
if __name__ == "__main__":
stat, retval = profile_parallel()
stat.print_stats()
EDIT: Unfortunately I found out that pstat.Stats objects cannot be used normally with multiprocessing.Queue because it is not pickleable (which is needed for the operation of concurrent.futures). Evidently it normally will store a reference to a file for the purpose of writing statistics to that file, and if none is given, it will by default grab a reference to sys.stdout. We don't actually need that reference however until we actually want to print out the statistics, so we can just give it a temporary value to prevent the pickle error, and then restore an appropriate value later. The following example should be copy-paste-able and run just fine rather than the pseudocode-ish example above.
from multiprocessing import Queue, Process
from cProfile import Profile
from pstats import Stats
import sys
def isprime(x):
for d in range(2, int(x**.5)):
if x % d == 0:
return False
return True
def foo(retq):
p = Profile()
p.enable()
primes = []
max_n = 2**20
for n in range(3, max_n):
if isprime(n):
primes.append(n)
p.disable()
retq.put(Stats(p, stream="dummy")) #Dirty hack: set `stream` to something picklable then override later
if __name__ == "__main__":
q = Queue()
p1 = Process(target=foo, args=(q,))
p1.start()
p2 = Process(target=foo, args=(q,))
p2.start()
s1 = q.get()
s1.stream = sys.stdout #restore original file
s2 = q.get()
# s2.stream #if we are just adding this `Stats` object to another the `stream` just gets thrown away anyway.
s1.add(s2) #add up the stats from both child processes.
s1.print_stats() #s1.stream gets used here, but not before. If you provide a file to write to instead of sys.stdout, it will write to that file)
p1.join()
p2.join()

Producer Consumer message sharing not working in multiprocessing

i am trying to run a scenario where i have a producer which is capturing frames from webcam and putting it in a queue.
and then consumer reads image from input queue and does some processing and puts o/p image in outgoing queue.
Issue is, consumer read from queue is not blocking. Ideally it should be, also when it reads value from queue, size is always constant 128, which is wrong. I am sure size of image that I am putting in queue is far greater.
from __future__ import print_function
import multiprocessing
import time
import logging
import sys
import cv2
class Consumer(multiprocessing.Process):
def __init__(self, incoming_q, outgoing_q):
multiprocessing.Process.__init__(self)
self.outgoing_q = outgoing_q
self.incoming_q = incoming_q
def run(self):
proc_name = self.name
print(f"{proc_name} - inside process_feed..starting")
while True:
#print(f"size of incoming_q=>{self.incoming_q.qsize()}")
try:
#print(f"{proc_name} - size of B incoming_q=>{self.incoming_q.qsize()}")
image_np = self.incoming_q.get(True)
size_of_img = sys.getsizeof(image_np)
#print(f"{proc_name} - size of A incoming_q=>{self.incoming_q.qsize()}")
if size_of_img > 128:
print(f"{proc_name} - size image=>{size_of_img}")
time.sleep(1)
self.outgoing_q.put_nowait(image_np)
except:
pass
print("inside process_feed..ending")
class Producer(multiprocessing.Process):
def __init__(self, incoming_q, outgoing_q):
multiprocessing.Process.__init__(self)
self.incoming_q = incoming_q
self.outgoing_q = outgoing_q
def run(self):
proc_name = self.name
print("inside capture_feed")
stream = cv2.VideoCapture(0)
try:
counter = 0
while True:
counter += 1
if counter == 1:
if not self.incoming_q.full():
(grabbed, image_np) = stream.read()
size_of_img = sys.getsizeof(image_np)
print(f"{proc_name}........B.......=>{self.incoming_q.qsize()}")
print(f"{proc_name} - size image=>{size_of_img}")
self.incoming_q.put(image_np)
print(f"{proc_name}........A.......=>{self.incoming_q.qsize()}")
counter = 0
try:
image_np = self.outgoing_q.get_nowait()
logging.info("reading value for o/p")
cv2.imshow('object detection', image_np)
except:
pass
if cv2.waitKey(25) & 0xFF == ord('q'):
break
finally:
stream.release()
cv2.destroyAllWindows()
print("inside capture_feed..ending")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
stream = cv2.VideoCapture(0)
incoming_q = multiprocessing.Queue(maxsize=100)
outgoing_q = multiprocessing.Queue(maxsize=100)
logging.info("before start of thread")
max_process = 1
processes = []
processes.append(Producer(incoming_q, outgoing_q))
for i in range(max_process):
p = Consumer(incoming_q, outgoing_q)
p.daemon = True
processes.append(p)
logging.info("inside main thread..middle")
for p in processes:
p.start()
logging.info("inside main thread..ending")
logging.info("waiting in main thread too....")
logging.info("waiting in main thread finished....")
for p in processes:
p.join()
logging.info("inside main thread..ended")
I was able to figure out issue with my approach. I missed whole concept of pickle (serialization).
I changed my code to serialize numpy array before writing to queue and deserialize after reading it. Code started working as expected.
also printing 128 as sizeof np array is fine, i was misinterpreting that number.
def serialize_ndarray(arr:np.ndarray):
serialized = pickle.dumps(arr)
return serialized
def deserialize_ndarray(string):
data = pickle.loads(string)
return data

Using multiprocessing inside a function

I want to take the working code from below and put it into a function.
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
if __name__ == '__main__':
p = [i for i in range(1000)]
p = parameters(p,2)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
print(o)
So that I can call it whenever I want. I tried doing something like this:
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
def calculate(s,n):
p = [i for i in range(s)]
p = parameters(p,n)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
return o
print(calculate(1000,2))
However this does not work at all, It tells me That another process has started before one has ended. But the code above did work. Is there a way to properly take this code into a function? If not with this setup, then with what setup?
Make sure to protect code that should only run in the main process with if __name__ == '__main__':. This code works:
import multiprocessing as mp
def parameters(x,n):
for i in x:
yield (i,n)
def power(a):
x, n = a
return x**n
def calculate(s,n):
p = [i for i in range(s)]
p = parameters(p,n)
agents = 4
chunk = 10
with mp.Pool(processes = agents) as pool:
o = pool.map(power,p,chunksize = chunk)
return o
if __name__ == '__main__':
print(calculate(1000,2))
Without the if, the following error is raised:
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.

How can I speed up this loop using multiprocessing or multithreading?

I am afraid that I'm not doing the multithreading thing the right way, so I came here in search of wisdom. I have two arrays of addresses and I have to check if the address of the first array exists in the second array and in case it doesn't look for the most similar address in array 2.
The array that has the "oficial" addresses is called directory and the array that I need to validate is called look_address.
The code goes as follows:
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
from datetime import datetime,timedelta
import threading
import queue
class myThread(threading.Thread):
def __init__(self,threadID,name,q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name=name
self.q = q
def run(self):
print(f"starting {self.name}")
process_data(self.name,self.q)
print(f"ending {self.name}")
locs = []
ratios={}
def process_data(threadName,q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
d = q.get()
queueLock.release()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos
else:
queueLock.release()
threadlist = ["T-1","T-2","T-3","T-4","T-5","T-6","T-7","T-8","T-9","T-10"]
nameList = look_address
queueLock = threading.Lock()
workQueue = queue.Queue(len(nameList)+1)
threads=[]
threadID=1
exitFlag=0
for name in threadlist:
thread = myThread(threadID,name,workQueue)
thread.start()
threads.append(thread)
threadID+=1
queueLock.acquire()
for addr in nameList:
workQueue.put(addr)
queueLock.release()
total_steps = len(workQueue.queue)
tot_sec = 0
t0 = datetime.now()
while not workQueue.empty():
total_seconds =(datetime.now()-t0).total_seconds()
if total_seconds == 0:
total_seconds = 1e-8
progress = 1-len(workQueue.queue)/total_steps
tot_sec+=total_seconds
print("\rProgreso: {pr:.2f}% || Buenas/Errores: {gb}/{bd}".format(
pr = progress*100,
its = 1/total_seconds,
elap = timedelta(seconds=np.round(tot_sec)),
gb=len(locs),
bd=len(errors),
eta = timedelta(seconds=np.round(total_seconds*(total_steps-len(workQueue.queue))))),end="",flush=True)
exitFlag = 1
for t in threads:
t.join()
print("\nExiting Main Thread")
Each request in process.extract takes around 25s (did a %timeit). Now, with the script above it doesn't seems to speed up the data processing. It has been running for like 2 hours and it has progressed by around 4.29%.
My two questions are:
Is the implementation of multithreading correct?
How can I speed up the data processing? Maybe run this on a VPS on amazon or google?
I want to understand why this is so slow and how I can speed things up.
EDIT: Changed from:
if not workQueue.empty():
d = q.get()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos
queueLock.release()
to:
if not workQueue.empty():
d = q.get()
queueLock.release()
d = d.strip()
if directory.isin([d]).any():
locs.append(d)
else:
pos = process.extract(d,directory.values,scorer=fuzz.ratio,limit=50)
ratios[d] = pos

cx_freeze creates multiple instances of program

I'm trying to compile some Python 3.3 code using cx_freeze and, after compiling, the resulting test.exe file will create an indefinite number of instances of the program, causing my Windows 7 system to become unstable. It works just as intended when just running in Python, but once compiled it causes issues. Here are my imports in my main script:
import sys
from multiprocessing import Pool, Queue
from threading import Thread
from time import sleep, time
from inspect import getmembers
from PyQt5 import QtWidgets, QtCore, QtGui
from main_ui import Ui_MainWindow # Generated UI from pyuic, imports
# QtWidgets, QtCore, and QtGui
from devices import Device1, Device2 # Both are serial.Serial objects
The setup.py script:
import sys
from cx_Freeze import setup, Executable
product_name = 'Product Name'
path_platforms = ("C:\Python33\Lib\site-packages\PyQt5\plugins\platforms\qwindows.dll",
"platforms\qwindows.dll")
includes = ['PyQt5.QtWidgets', 'PyQt5.QtCore', 'PyQt5.QtGui']
include_files = [path_platforms]
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter']
packages = ['os']
path = []
bdist_msi_options = {'add_to_path': False}
build_exe_options = {'includes': includes,
'include_files': include_files,
'excludes': excludes,
'packages': packages,
'path': path,
'silent': True}
base = None
if sys.platform == 'win32':
base = 'Win32GUI'
exe = Executable(script='main.pyw',
base=base,
targetName='test.exe')
setup(name=product_name,
version='1.0',
description='The Test Program',
executables=[exe],
options = {'bdist_msi': bdist_msi_options, 'build_exe': build_exe_options})
And when I run python setup.py build, the following error occurs:
Missing modules:
? System imported from serial.serialcli
? TERMIOS imported from serial.serialposix
? __main__ imported from bdb
? _gestalt imported from platform
? _posixsubprocess imported from subprocess
? clr imported from serial.serialcli
Despite these errors, it still generates a test.exe file. When I execute it, it generates a seemingly infinite number of windows and the only way to stop it is to hard reset the computer. Again, the main script works just fine running under Python, but fails once compiled. Any help would be greatly appreciated!
EDIT: As requested, here is my main script:
import sys
from multiprocessing import Pool, Queue, freeze_support
from threading import Thread
from time import sleep, time
from inspect import getmembers
from PyQt5 import QtWidgets, QtCore, QtGui
from main_ui import Ui_MainWindow # Generated by pyuic
import parts # Imports time.sleep, datetime.datetime, and threading.Thread
from devices import GwPowerSupply, DataQ # Imports time.sleep and serial.Serial
# GwPowerSupply is a serial.Serial object to handle communications with a GwInstek PSP-603
# DataQ is also a serial.Serial object to handle communications with a DataQ-155
def file_logger(message):
logging = True
if logging:
with open('log.txt', 'a') as f:
f.write('{}: {}\n'.format(time(), message))
def compute():
"""
A function, designed as an independent process, to gather data from the DataQ and Power Supply
input queues, convert to human values, and output as a single queue
"""
compute.running = True
compute.paused = False
# The initial dict to pass on to the queue
data_dict = {'upstream': 0, 'downstream': 0, 'high_flow': 0, 'low_flow': 0, 'voltage': 0, 'current': 0, 'offset': 0}
while compute.running:
if compute.paused or compute.input_queue.empty():
continue
# Get the raw voltage data and convert to pressure/flow
analog_input = compute.input_queue.get()
file_logger('Compute received {}'.format(analog_input))
if analog_input is None:
continue
# Four items comes from the DataQ for pressures and flow
if len(analog_input) == 4:
# Pressure Transducers are both 1-11V, 0-500 PSI
if isinstance(analog_input[0], (float, int)):
data_dict['upstream'] = (analog_input[0]-1) * 50
if isinstance(analog_input[1], (float, int)):
data_dict['downstream'] = (analog_input[1]-1) * 50
# High Flow is 0-5V, 0-1000 Liters/min
if isinstance(analog_input[2], (float, int)):
data_dict['high_flow'] = (analog_input[2]*200) * .035147 # Convert SLM to SCFM
# Low Flow is 0-5V, 0-5 Liters/min
if isinstance(analog_input[3], (float, int)):
data_dict['low_flow'] = analog_input[3] * 1000 # Convert SLM to SCCM
# Two items are from the power supply for voltage and current
elif len(analog_input) == 2:
if isinstance(analog_input[0], (float, int)):
data_dict['voltage'] = analog_input[0]
if isinstance(analog_input[1], (float, int)):
data_dict['current'] = analog_input[1]
# A single item is the offset from the Valve program
elif len(analog_input) == 1:
data_dict['offset'] = analog_input[0]
else:
return
compute.output_queue.put(data_dict)
file_logger('Compute put out {}'.format(data_dict))
def data_q_producer():
"""
A function, designed as an independent process, to gather data from the DataQ and feed it
to the computing process
"""
# Initialize COM port
data_q = DataQ('COM4')
data_q.start()
# Continuously gather data
while True:
if not data_q.paused and not data_q.stopped:
# Gather data and put to queue, either for response or normal
file_logger('Getting Data from DataQ')
if data_q.response:
data = data_q.get_response_data()
data_q_producer.response_queue.put(data)
else:
data = data_q.get_data()
data_q_producer.queue.put(data)
file_logger('Got {} from DataQ'.format(data))
# If a command is received, such as to energize a relay, handle
if not data_q_producer.output.empty():
output = data_q_producer.output.get()
file_logger('Sending {} to DataQ'.format(output))
# Strings are to stop, run response, etc.
if isinstance(output, str):
if output == 'stop':
data_q.set_output(0, 0, 0, 0)
data_q.stop()
data_q.close()
data_q_producer.queue.put([])
return
elif output == 'start resp':
data_q.response = True
data_q.pause()
data_q.start_resp()
data_q.start()
elif output == 'stop resp':
print('Stopping Response Test')
data_q.pause()
data_q.setup()
data_q.start()
data_q.response = False
# If a single integer is received, it is the new leakage offset.
elif isinstance(output, float):
data_q_producer.queue.put([output, ])
# A List is to set the digital outputs
elif isinstance(output, list):
data_q.set_output(output[0], output[1], output[2], output[3])
def pps_producer():
"""
A function, designed as an independent process, to gather data from the Power Supply and feed it
to the computing process
"""
# Initialize COM port
pps = GwPowerSupply('COM1')
pps.set_relay(True)
# Continuously gather voltage and current readings
while True:
file_logger('Getting Data from Power Supply')
voltage = pps.get_value('V')
current = pps.get_value('A')
file_logger('Got {}V, {}A from power supply'.format(voltage, current))
pps_producer.queue.put([voltage, current])
# If a command is received to change voltage, current, etc.; handle
if not pps_producer.output.empty():
output = pps_producer.output.get()
file_logger('Got {} for Power Supply'.format(output))
# Bool is to set the relay on or off
if isinstance(output, bool):
pps.set_relay(output)
# String is primarily to stop the power supply (set the relay to Off)
elif isinstance(output, str) and output == 'stop':
pps.set_relay(False)
pps.close()
pps_producer.queue.put([])
return
# A tuple is changing a power supply output setting
else:
pps.set_value(output[0], output[1])
def pool_init(input_queue, output_queue, data_q_out, pps_out, response_queue):
"""
Initializes the above functions with external queue variables.
see http://stackoverflow.com/a/3843313/852994 for more details
"""
compute.output_queue = output_queue
compute.input_queue = input_queue
data_q_producer.queue = input_queue
data_q_producer.output = data_q_out
data_q_producer.response_queue = response_queue
pps_producer.queue = input_queue
pps_producer.output = pps_out
class MainGui(QtWidgets.QMainWindow):
"""
The Main interface builder for the program
"""
def __init__(self):
# Initialize MainGui and create the window
super(MainGui, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# The current valve part being tested
self.valve = None
# Disables the 'Energize' button when running ATP
self.auto_mode = False
# The current measured leakage offset based on the current run's test
self.measured_offset = 0
# The leakage offset table based on initial testing
# #TODO: retest offsets and go to 450 PSI
self.offset_table = ((-50, 30), (0, 31), (50, 44), (100, 37), (150, 41), (200, 44),
(250, 49), (300, 54), (350, 63), (400, 72), (450, 81))
# A table of calculated leakage offsets to give single-incremental points based on the
# above tested values
self.calculated_offsets = []
for i in range(len(self.offset_table)-1):
for x in range(self.offset_table[i][0], self.offset_table[i-1][0]):
x1 = self.offset_table[i][0]
x2 = self.offset_table[i+1][0]
y1 = self.offset_table[i][1]
y2 = self.offset_table[i+1][1]
y = ((x-x1) * (y2-y1)) / (x2-x1) + y1
self.calculated_offsets.append(y)
# Connect UI clicks and presses to commands
self.ui.btn_all.clicked.connect(lambda: self.select_all_tests(True))
self.ui.btn_none.clicked.connect(lambda: self.select_all_tests(False))
self.ui.comboBox.currentTextChanged.connect(self.select_part)
self.ui.btn_energize.clicked.connect(self.energize)
self.ui.btn_start.clicked.connect(self.start_tests)
self.ui.btn_skip.clicked.connect(self.skip_press)
# Select the initial part
self.select_part()
# Initialize queues
self.input_queue = Queue(10)
self.output_queue = Queue(10)
self.data_q_out = Queue(10)
self.pps_out = Queue(10)
self.response_queue = Queue(400)
self.test_queue = Queue(5)
self.log_queue = Queue(10)
# Initialize timer to update on-screen values
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_data)
self.timer.start(25)
# Initialize process pool
self.pool = Pool(processes=4, initializer=pool_init,
initargs=(self.input_queue, self.output_queue, self.data_q_out,
self.pps_out, self.response_queue))
# Place the data producing functions into the process pool
self.pool.apply_async(func=data_q_producer)
self.pool.apply_async(func=compute)
self.pool.apply_async(func=pps_producer)
def closeEvent(self, *args, **kwargs):
# Verify COM ports are closed properly before exiting
file_logger('Attempting Exit')
self.timer.stop()
self.test_queue.put('ESC')
self.data_q_out.put('stop')
self.pps_out.put('stop')
sleep(.5)
file_logger('Exited')
def keyPressEvent(self, event):
file_logger('Keypress Event: {}'.format(event.key()))
# Capture different key presses for different functions
if event.key() == QtCore.Qt.Key_Return:
self.test_queue.put(float(self.ui.lineEdit.text()))
elif event.key() == QtCore.Qt.Key_Backspace:
self.test_queue.put('ESC')
elif event.key() == QtCore.Qt.Key_S:
self.test_queue.put('SKIP')
def skip_press(self):
file_logger('Skip press Event')
self.test_queue.put('SKIP')
def print_to_log(self, text):
# Enter a line into the log with auto-scrolling
self.ui.log_output.append(text)
cursor = self.ui.log_output.textCursor()
QtGui.QTextCursor.movePosition(cursor, QtGui.QTextCursor.End)
self.ui.log_output.setTextCursor(cursor)
def update_data(self):
# Update status boxes
if not self.output_queue.empty():
file_logger('Update Interface Event')
data_dict = self.output_queue.get()
# Before calculating corrected leakage, get the offset
self.measured_offset = data_dict['offset']
# Modify low flow with offset
data_dict['low_flow'] -= self.measured_offset - self.calculated_offsets[int(data_dict['upstream'])]
# Update the status on the UI
self.ui.upstream_pressure.setText('{:.1f}'.format(data_dict['upstream']))
self.ui.downstream_pressure.setText('{:.1f}'.format(data_dict['downstream']))
self.ui.flow_sensor.setText('{:.2f}'.format(data_dict['high_flow']))
self.ui.leakage_sensor.setText('{:.0f}'.format(data_dict['low_flow']))
self.ui.voltage.setText('{:.2f}'.format(data_dict['voltage']))
self.ui.current.setText('{:.3f}'.format(data_dict['current']))
# Pass the values on to the test queue so the ATP process can use them
self.test_queue.put(data_dict)
if self.test_queue.full():
self.test_queue.get()
file_logger('Updated Interface')
# Update log
if not self.log_queue.empty():
text = self.log_queue.get()
file_logger('Printing to log: {}'.format(text))
# For the countdown timer, delete the previous line, but not the first count!
if isinstance(text, int) and text != 1:
cursor = self.ui.log_output.textCursor()
QtGui.QTextCursor.movePosition(cursor, QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
QtGui.QTextCursor.movePosition(cursor, QtGui.QTextCursor.StartOfLine, QtGui.QTextCursor.KeepAnchor)
QtGui.QTextCursor.removeSelectedText(cursor)
# Delete last newline character so the number doesn't print on the next line
QtGui.QTextCursor.deletePreviousChar(cursor)
self.print_to_log(str(text))
file_logger('Printed to log: {}'.format(text))
def select_all_tests(self, state=True):
# Select (or deselect if state is False) all tests
for i in range(len(self.ui.listWidget)):
self.ui.listWidget.item(i).setSelected(state)
def select_part(self):
# Update test list with a new part every time the combo box is changed
part_name = self.ui.comboBox.currentText()
for name, obj in getmembers(parts):
# Get the objects only labled as 'Part'
if 'Part' in name:
# Get the object with a part name that corresponds the the selected part
if part_name in obj().part_name:
self.valve = obj()
# Clear out the current contents of the test list
self.select_all_tests(False)
self.ui.listWidget.clear()
# Update test list with new tests
for test in self.valve.procedure:
self.ui.listWidget.addItem(test[0])
# Pre-select all tests
self.select_all_tests()
# Set Coils up properly; if there is only one coil in the unit, disable the second coil
self.ui.coil_1.setChecked(True)
if self.valve.coils < 2:
self.ui.coil_2.setChecked(False)
self.ui.coil_2.setEnabled(False)
else:
self.ui.coil_2.setEnabled(True)
self.ui.coil_2.setChecked(True)
return
def energize(self):
# Energize function for the energize button, but only if not running any test!
if self.auto_mode:
pass
else:
if self.ui.btn_energize.isChecked():
coil1 = int(self.ui.coil_1.checkState() / 2)
coil2 = int(self.ui.coil_2.checkState() / 2)
self.data_q_out.put([coil1, coil2, 2, 2])
else:
self.data_q_out.put([0, 0, 2, 2])
def start_tests(self):
file_logger('Starting Tests')
# Starts the testing thread
self.ui.log_output.setHtml('')
t = Thread(target=self.run_tests)
t.daemon = True
t.start()
def run_tests(self):
# Don't let the user try to start while running nor change the part number mid-test!
self.ui.btn_start.setEnabled(False)
self.ui.comboBox.setEnabled(False)
line = '-----------------------------------------------'
for test in self.valve.procedure:
# Verify the test is selected to run by iterating through all the test items in
# the test list and, if matching the current test name, verify the checked state
for i in range(len(self.ui.listWidget)):
if test[0] == self.ui.listWidget.item(i).text() and self.ui.listWidget.item(i).isSelected():
file_logger('Testing {}'.format(test[0]))
self.log_queue.put('<b>{1}\r\nRunning {0}\r\n{1}</b> '.format(test[0], line))
test[1](self.log_queue, self.test_queue, self.pps_out, self.data_q_out, self.response_queue)
# Tell the user of an escape or a skip
if self.valve.escape:
file_logger('Escaped'.format(test[0]))
self.log_queue.put('<b><font color="blue">Escaped</b></font> ')
self.ui.btn_start.setEnabled(True)
self.ui.comboBox.setEnabled(True)
self.valve.escape = False
# If escaping, break out of all loops
return
elif self.valve.skip:
file_logger('Skipped'.format(test[0]))
self.log_queue.put('<b><font color="orange">Skipped</b></font> ')
self.valve.skip = False
else:
file_logger('Test Successful')
# Once the test is found, break out of the test name matching loop
break
# If the test is not selected, notify user by displaying 'Skipping'
elif test[0] == self.ui.listWidget.item(i).text():
self.log_queue.put('<b>{1}</b>\r\nSkipping {0}'.format(test[0], line))
break
# Re-enable starting tests and selecting part numbers
self.ui.btn_start.setEnabled(True)
self.ui.comboBox.setEnabled(True)
if __name__ == '__main__':
freeze_support()
#input_queue = Queue(10)
#output_queue = Queue(10)
#data_q_out = Queue(10)
#pps_out = Queue(10)
#response_queue = Queue(400)
## Initialize process pool
#pool = Pool(processes=4, initializer=pool_init,
# initargs=(input_queue, output_queue, data_q_out, pps_out, response_queue))
#
## Place the data producing functions into the process pool
#pool.apply_async(func=data_q_producer)
#pool.apply_async(func=compute)
#pool.apply_async(func=pps_producer)
file_logger('####### NEW RUN #######\n')
app = QtWidgets.QApplication(sys.argv)
window = MainGui()
window.show()
file_logger('####### END RUN #######\n')
sys.exit(app.exec_())
You need to add the following code to your main, before anything else:
from multiprocessing import freeze_support
freeze_support()
See this stackoverflow post

Resources