I'm very new to PyQt and threading (only about a year). I apologize for the long post (most of the questions are at the end). I could use some assistance with why my worker thread is losing output when performing the work. This is PyQt5 and Python 3.5.
#Creates your own signal class to output stdout.
class EmittingStream(QtCore.QObject):
textWritten = QtCore.pyqtSignal(str)
def write(self, text):
self.textWritten.emit(str(text))
def flush(self):
pass
class MainWindow(QtWidgets.QMainWindow, Ui_mainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent=parent)
self.setupUi(self)
self.closeButton.clicked.connect(self.close)
self.clearButton.clicked.connect(self.clear_all)
self.submitButton.clicked.connect(self.submit_ips)
self.addButton.clicked.connect(self.add_ip_on_button_press)
#Keep Save button inactive until an IP(s) have been submitted.
self.saveButton.clicked.connect(self.write_mem)
self.saveButton.setEnabled(False)
sys.stdout = EmittingStream(textWritten=self.normalOutputWritten)
sys.stderr = EmittingStream(textWritten=self.errorOutputWritten)
#Creates instance of WorkerThread in the main window
self.workerThread = WorkerThread()
#Creates isntance of WorkerThreadSave in the main window
self.workerThreadSave = WorkerThreadSave()
def __del__(self):
# Restore sys.stdout
sys.stdout = sys.__stdout__
# Uses EmittingStream class to output stdout to outputTextBox
def normalOutputWritten(self, text):
"""Append text to the QTextEdit."""
cursor = self.outputTextBox.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
# self.outputTextBox.setTextCursor(cursor)
self.outputTextBox.ensureCursorVisible()
def errorOutputWritten(self, text):
cursor = self.outputTextBox.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.outputTextBox.ensureCursorVisible()
def submit_ips(self):
msgBox = QtWidgets.QMessageBox()
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.setWindowTitle("Error")
if not hostiplist and not cidriplist:
msgBox.setText("Ooops, Please enter an acceptible host or network IP Address.")
msgBox.exec()
else:
print("*************** Process Started ***************\n")
self.submitButton.setEnabled(False)
self.addButton.setEnabled(False)
self.clearButton.setEnabled(False)
self.saveButton.setEnabled(True)
self.workerThread.start()
#Performs Save Function through API
def write_mem(self):
print("*************** Saving Device(s) Started ***************\n")
self.saveButton.setEnabled(False)
self.addButton.setEnabled(True)
self.clearButton.setEnabled(True)
self.submitButton.setEnabled(True)
del hostiplist[:]
del cidriplist[:]
self.workerThreadSave.start()
The above code is obviously my main UI. When clicking the submit button it starts a separate thread to perform the processing and work (and also to not freeze the UI).
Unfortunately, I end up losing some of the print statements (I'm assuming my signals and slots aren't performing or the thread stops before the output is printed).
class WorkerThread(QThread):
def __init__(self, parent=None):
super(WorkerThread, self).__init__(parent)
def run(self):
#Post function for adding host IP's
def post_object (f):
self.api_path = "/api/objects/networkobjects"
self.req = urllib.request.Request(str(server) + self.api_path, json.dumps(self.post_data).encode('utf-8'), headers)
self.base64string = base64.encodestring(('%s:%s' % (username,password)).encode()).decode().replace('\n', '')
self.req.add_header("Authorization", "Basic %s" % self.base64string)
try:
f = urllib.request.urlopen(self.req)#Had to add .request between the url.
self.status_code = f.getcode()
# print("Status code is {}.".format(self.status_code))
if self.status_code == 201:
print ("The following object was created successfully: {}".format(self.post_data["name"]))
except urllib.error.HTTPError as err: #Python3 requires urllib.error.HTTPError to work.
print ("Error received from server: {}. HTTP Status code: {}".format(err.reason, err.code))
try:
print("Yes {}".format(json.loads(err.read))) #json.loads is not being sent...if you format the error it'll send that...not the error itself.
json_error = json.loads(err.read)
if json_error:
print (json.dumps(json_error,sort_keys=True,indent=4, separators=(',', ': '))) #Error won't print json.dumps error
print ("Error received from server: {}. HTTP Status code: {}".format(err.reason, err.code))
except ValueError:
pass
finally:
if f: f.close()
return (f)
I run the post_object function within a for loop below:
try:
#Starts for loop REST API adds.
for s in serverList:
server = s
# Operation for CIDR IPs
for c in cidriplist:
self.network = c
self.patch_data_group = {
"members.add": [
{
"kind": "IPv4Network",
"value": str(self.network)
}]
}
patch_object_group(f)
jsonoutputlist.append(self.patch_data_group)
#Operation for single host IPs
for h in hostiplist:
self.host = h
self.post_data = {
"host": {
"kind": "IPv4Address",
"value": str(self.host)},
"kind": "object#NetworkObj",
"name": str(self.host),
"objectId": str(self.host)
}
self.patch_data_group = {
"members.add": [
{
"kind": "objectRef#NetworkObj",
"objectId": str(self.host)
}]
}
jsonoutputlist.append(self.post_data)
post_object(f)
patch_object_group(f)
# print ("\n JSON Data format for change control: \n{}".format(self.patch_data_group))
print("\n*************** IPs Add Complete ***************\n")
except:
(type, value, traceback) = sys.exc_info()
sys.excepthook(type, value, traceback)
This is where the main issues arise. During the operation of the for loops, the print statements and return API call statements are dropped or cut off.
For example,
if self.status_code == 201:
print ("The following object was created successfully: {}".format(self.post_data["name"]))
The above statement only prints one line below the if statement...it won't allow me to do more than one line...and the one line print statement is usually cut off. As well as the json.dumps statements won't print at all...
Any help would be greatly appreciated.
Thanks,
Related
I have this code that is supposed to log to a memory variable:
(the_message_lines):
import logging
from thompcoutils import log_utils
the_message_lines = []
class MemoryHandler(logging.StreamHandler):
"""
Handler that keeps all log messages in memory until the beginning of the day or the size exceeds a value
"""
def emit(self, record: logging.LogRecord):
global the_message_lines
try:
msg = self.format(record)
the_message_lines.append(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
#staticmethod
def get_lines():
return the_message_lines
#staticmethod
def reset_lines():
global the_message_lines
the_message_lines.clear()
if __name__ == '__main__':
log_utils.load_log_config('logging.ini')
logger = log_utils.get_logger()
logger.warning('beginning')
for i in range(3):
lines = MemoryHandler.get_lines()
logger.warning('remaining %d seconds', i, extra={'same_line':True})
logger.warning('end')
for line in MemoryHandler.get_lines():
print(line)
It behaves as it should but the_message_lines come and go(?)
If I put a break point in the emit method and observe the_message_lines, it behaves as it should, accumulating log messages every time.
If I put a break point in the loop that is logging, the_message_lines is empty every time!
So, between log requests, the_message_lines appears to delete itself, but in the emit, it is fine.
At the end of the main() method, get_lines() returns an empty array.
What am I missing?
I am working on the following small tkinter application.
The URL & token is for a public development demo which can be found here if interested - https://snipe-it.readme.io/reference#api-overview
My current issue is displaying logging messages to the current tkinter window.
Currently it just prints to the console. I've tried a few different methods but I'm not very good with tkinter at the moment.
I have tried solutions like so - How do I create a popup window in tkinter? - but this just duplicates the main window each time.
Any help or pointers would be appreciated.
The csv file looks like so:
asset_tag,
1382613192,
1169063140,
785100692,
4565667567345457574535,
import csv
import logging
from datetime import datetime
from multiprocessing import Process, Queue
from tkinter import *
from tkinter import filedialog, ttk
from tkinter.messagebox import showinfo
import requests
#todo set initialdir as current user
# initiate logging module
logging.basicConfig(level=logging.INFO)
# set headers are they are used in several functions
headers = {
"Accept": "application/json",
"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImp0aSI6ImVmMGVhY2Y4MjAyYzgwZWI2M2JkNmIwZDc0OGYwY2FkYzU2Y2ZlMzgyNzY4ODY0N2EwNmU4ZTBlNmYwZDgwODNjZmMyMzI2YWYyYTZlMTFkIn0.eyJhdWQiOiIxIiwianRpIjoiZWYwZWFjZjgyMDJjODBlYjYzYmQ2YjBkNzQ4ZjBjYWRjNTZjZmUzODI3Njg4NjQ3YTA2ZThlMGU2ZjBkODA4M2NmYzIzMjZhZjJhNmUxMWQiLCJpYXQiOjE0OTMzMzI2MjgsIm5iZiI6MTQ5MzMzMjYyOCwiZXhwIjoxODA4ODY1NDI4LCJzdWIiOiIyIiwic2NvcGVzIjpbXX0.NU7ZRIt-d4b0o8uv9ipo1vSWcg1svbmPp47kHErafm9iuK4FjygKd2_4Hp73HKAmjiYcEn3r39pwNh2t9BMFnTXv0KeDGC8zfZ9z7OJN_a59LPoarWBFzCsYETyAm-CeeFnfdj9Cr0ZeGOnnaPuWPYxicwKFeqJI4Hn8nCymcamDGE0u4WOO95ihGOAen4_fqpj-kkBDsvsGhB-cQxeuKdlbvO1yOsKmgQv-kQuxiFMn1zgU7P02mC6XXrbw6jTm7JOaBSbvqSwNtsrSKZkim1jxLsQ4dm36lFmeMkU6hZvNSUnxg8JwbmoxQ_3tZlG3IJh3Sc9ZUi-AEAQ4bbGzi_xNS9fenIdzLDaSiv_esYyNOYXqOuSBk8Yr-720N9OcVjGLnPrV3RtmPisV1aLFgKWLImtlyQgUq3d5LA3QXz8Q_8isvO9Am1u8ri2plbHGJLJ6GRW_mYcBEYMwUozaeXTUe_FUSSO8gpGtO9Hpa5SbERY272_tojyVXpYPaPdUYYmS9CP332jBNESPT8wGwpOM-iddeVo_n82w3dHmDEdp1Brbs3_vKk0AcgvDLsAbd4dZZO-UqddVx6SDb3HLw1Pmw1wGGYHA6w8wWQAiS9kg2xMcz5i75HOULaN3miqYvcPCvHpI2CBfuvdplI8QNm_XzFPmoQRu_5kR8knzla4",
"Content-Type": "application/json"
}
# functions
def check_in(id, asset_tag, headers):
# check in function, takes the asset ID and POST's to the url
# can optionally set a checkin note in the payload
url = "https://develop.snipeitapp.com/api/v1/hardware/"+id+"/checkin"
# modify if you would like a note with the checkin
payload = {"note": "checked in by ME"}
try:
response = requests.request("POST", url, json=payload, headers=headers)
checkin_response = response.json()
'''
This if statement is required here because if the asset is already checked in, it still returns an http successful message.
So this checks if the value in the status key is equal to "error" and if so then it will log this as an error.
'''
if "error" in checkin_response['status']:
logging.error("Unable to checkin asset %s - %s"
% (asset_tag, checkin_response['messages']))
else:
logging.info("Successfully checked in asset %s" % (asset_tag))
# catch any http errors and continue
except requests.exceptions.RequestException as e:
logging.error(e)
pass
def get_asset_id(asset_tag, output_q, headers):
# this function gets the asset id from a given asset tag
url = "https://develop.snipeitapp.com/api/v1/hardware/bytag/"+asset_tag+""
output_dict = {}
try:
response = requests.request("GET", url, headers=headers, verify=False)
response.raise_for_status()
json_response = response.json()
'''
This checks if the status key exists in the response and if it does then it returns an error.
This is because the status key does not exist at all in a successful query.
Again this is required because even if the asset tag doesn't exist, it still returns http 200.
'''
if "status" in json_response:
logging.error("Unable to retrieve asset ID for asset tag %s - %s"
% (asset_tag, json_response['messages']))
else:
logging.info("Successfully retrieved asset ID from %s" %
(asset_tag))
asset_id = str(json_response['id'])
check_in(asset_id, asset_tag, headers)
except requests.exceptions.RequestException as e:
logging.error(e)
pass
output_q.put(output_dict)
# class for tkinter configuration
class App:
def __init__(self, master):
self.label = ttk.Label(master, text="Bulk Checkin")
self.label.grid(row=0, column=0, columnspan=2)
ttk.Button(master, text="Select File",
command=self.select_file).grid(row=2, column=0)
ttk.Button(master, text="Checkin Assets",
command=self.checkin_assets).grid(row=2, column=1)
def select_file(self):
filename = filedialog.askopenfilename(initialdir="C:/Users/fraser/Desktop/", filetypes=(("CSV Files","*.csv"),))
self.infile = open(filename, "r")
print(self.infile.name)
def checkin_assets(self):
# function for Checkin Assets button click
# start a timer so we can see how long it takes
start_time = datetime.now()
output_q = Queue(maxsize=20)
procs = []
with open(self.infile.name, 'r') as myfile:
csv_reader = csv.DictReader(myfile)
line_count = 0
for row in csv_reader:
if line_count == 0:
", ".join(row)
line_count += 1
line_count += 1
asset_tag = row["asset_tag"]
my_proc = Process(target=get_asset_id, args=(
asset_tag, output_q, headers))
my_proc.start()
procs.append(my_proc)
# Make sure all processes have finished
for a_proc in procs:
a_proc.join()
# log how long this took
logging.info("\nElapsed time: " + str(datetime.now() - start_time))
def main():
main = Tk()
app = App(main)
main.mainloop()
if __name__ == "__main__":
main()
Here you go, this is assuming you want the messages as a popup. If not, create your own messaging window, or add a label or something you can update to the main one. I used the two messaging classes from the referenced post, but you can call them from anywhere and pass a message you want displayed.
import csv
import logging
from datetime import datetime
from multiprocessing import Process, Queue
from tkinter import *
from tkinter import filedialog, ttk
from tkinter.messagebox import showinfo
class App(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master)
self.label = ttk.Label(master, text="Bulk Checkin")
self.label.grid(row=0, column=0, columnspan=2)
ttk.Button(master, text="Select File",
command=self.select_file).grid(row=2, column=0)
ttk.Button(master, text="Checkin Assets",
command=self.checkin_assets).grid(row=2, column=1)
def select_file(self):
filename = filedialog.askopenfilename(initialdir="C:/Users/fraser/Desktop/", filetypes=(("CSV Files","*.csv"),))
self.infile = open(filename, "r")
print(self.infile.name)
def checkin_assets(self):
# function for Checkin Assets button click
# start a timer so we can see how long it takes
start_time = datetime.now()
output_q = Queue(maxsize=20)
procs = []
with open(self.infile.name, 'r') as myfile:
csv_reader = csv.DictReader(myfile)
line_count = 0
for row in csv_reader:
if line_count == 0:
", ".join(row)
line_count += 1
line_count += 1
asset_tag = row["asset_tag"]
my_proc = Process(target=get_asset_id, args=(
asset_tag, output_q, headers))
my_proc.start()
procs.append(my_proc)
# Make sure all processes have finished
for a_proc in procs:
a_proc.join()
# log how long this took
logging.info("\nElapsed time: " + str(datetime.now() - start_time))
def popup_bonus(self, message):
win = Toplevel()
win.wm_title("Message")
l = Label(win, text=message)
l.grid(row=0, column=0)
b = ttk.Button(win, text="Ok", command=win.destroy)
b.grid(row=1, column=0)
def popup_showinfo(self, message):
showinfo("Message", message)
def main():
main = Tk()
app = App(main)
main.mainloop()
if __name__ == "__main__":
main()
I use easygui because it is the simplest and one-liner. It can work alongside tkinter so there is no compatibility issues. For example, if you want a messagebox/alert, just do this:
import easygui
easygui.msgbox('in cmd, type: "shutdown -a" within 20 seconds', title="Chance to abort shutdown")
I'm experiencing an issue while trying to use pymqi to send messages to IBM MQ.
A project i'm working on
Reads from DB2
Prepares data for WorkerProcess
WorkerProcess does some decision based on that data and sens a message to MQ
When the WorkerProcess tries to commit the message for MQ, I get an error:
Stacktrace:
worker_process.py:
self.mq.commit()
mq_service.py:
self.qmgr.commit()
pymqi/__init__.py line 1689, in commit
raise MQMIError (rv[0], rv[1])
pymqi.MQMIError: MQI Error. Comp: 2, Reason 2012: FAILED: MQRC_ENVIRONMENT_ERROR
Code and stack trace has been typed by hand and can contain typos.
Bellow code is a pseudo code of what I'm doing.
Any help and/or advice is greatly appreciated.
main.py:
c_processors = []
for i in range(num_of_proccessors):
p = WorkerProcess()
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
worker_process.py
class WorkerProcess(Process):
def __init__(self):
Process.__init__(self)
self.mq = MQService()
def run(self):
self.mq.send_message('test')
self.mq.commit()
self.mq.close_connection()
mq_service.py
class MQService():
def __init__(self):
self.connect()
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
def connect(self):
self._connect_to_qmgr(manager,chanel,host,port,user,password) #these arguments are retrieved from config
self._q = pymqi.Queue(self.qmgr, queue_name)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.QueueManager(None)
_cd = pymqi.CD()
_cd.ChannelName = channel.encode('utf-8')
_cd.ConnectionName = f"{host} ({port})".encode('utf-8')
_cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN
_cd.TransportType = pymqi.CMQC.MQXPT_TCP
_connect_options = pymqi.CMQC.MQCNO_HANDLE_SHARE_BLOCK
_qmgr.connect_with_options(manager, cd=_cd, opts=_connect_options, user=user, password=password)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()
EDIT:
Additional information:
I'm running IBM MQ client version 9.1.0.1.
There are no errors in AMQERR0*.LOG files.
LD_LIBRARY_PATH is set
This error showed while refactoring the code.
Below is the code that is working (before refactoring):
Some arguments in function signature are replaced with (args) for the sake of brevity and readability*
main.py:
def connect_to_mq():
return MQService(args*) # these arguments are read from Config file
def process_chunk(args*):
_mq = connect_to_mq()
_mq.send_message('test')
_mq.commit()
_mq.close_connection()
c_processors = []
for i in range(num_of_proccessors):
p = Process(target=process_chunk, args=(args*))
p.start()
c_processors.append(p)
for p in c_processors:
p.join()
mq_service.py
class MQService():
def __init__(self, args*):
self.pmd = pymqi.MD()
self.pmd.Format = pymqi.CMQC.MQFMT_STRING
self.pmo = pymqi.PMO(Options=pymqi.CMQC.MQPMO_FAIL_IF_QUIESCING)
self.pmo.Options |= pymqi.CMQC.MQGMO_SYNCPOINT
self.connect_to_qmgr(args*)
self.connect_to_queue(args*)
def _connect_to_qmgr(self,manager,chanel,host,port,user,password):
self.qmgr = pymqi.connect(manager,
chanel,
"%s (%s)" % (host, port),
user=user,
password=password)
def connect_to_queue(q_name):
self._q = pymqi.Queue(self.qmgr, q_name)
def send_message(self, message):
self._q.put(message, self.pmd, self.pmo)
def commit(self):
self.qmgr.commit()
def rollback(self):
self.qmgr.backout()
def close_connection(self):
self.qmgr.disconnect()
I am creating a python script that will identify changes to a log file and print some data from the new logs.
I use watchdog to create an event handler and everything seems to work fine except from that, I get duplicate events every time I modify the file. I checked creation and delete, they both work as expected and trigger one time.
I have read the similar question which explains having a created and a modified event when I save a file but this is not my case. I just get two modification events.
Here is my code:
import os, sys, time
import subprocess
import threading
import win32print
from tkinter import filedialog
from tkinter import *
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
# docstring for FileSystemEventHandler
def __init__(self, observer, filename, dirname):
# super(Handler, FileSystemEventHandler).__init__(self,)
self.observer = observer
self.filename = filename
self.dirname = dirname
print("Handler filename = " , self.filename)
print("Handler dirname = " , self.dirname)
def on_modified(self, event):
if self.filename == event.src_path:
print("The file was modified")
print (event.src_path)
# go get the last line and print the data
# try:
# hJob = win32print.StartDocPrinter (hPrinter, 1, ("test of raw data", None, "RAW"))
# try:
# win32print.StartPagePrinter (hPrinter)
# win32print.WritePrinter (hPrinter, raw_data)
# win32print.EndPagePrinter (hPrinter)
# finally:
# win32print.EndDocPrinter (hPrinter)
# finally:
# win32print.ClosePrinter (hPrinter)
def on_created(self, event):
print("A file was created (", event.src_path, ")")
def on_deleted(self, event):
print("A file was deleted (", event.src_path, ")")
if __name__ == "__main__":
Flags=2
Name=None
Level=1
printers = win32print.EnumPrinters(Flags, Name, Level)
print("\nChoose a printer to use:")
i=1
for p in printers:
print(i,')' , p[2])
i = i+1
if sys.version_info >= (3,):
raw_data = bytes ("This is a test", "utf-8")
else:
raw_data = "This is a test"
printer = int(input())
printer_name = printers[printer-1][2] #win32print.GetDefaultPrinter ()
print("You chose ", printer_name, "\nI will now print from the specified file with this printer")
hPrinter = win32print.OpenPrinter (printer_name)
# root = Tk()
# root.filename = filedialog.askopenfilename(initialdir = "/Desktop",title = "Select file",filetypes = (("log files","*.log"),("all files","*.*")))
file_path = "some_file_path" # root.filename
file_directory = os.path.dirname(file_path)
# print (file_path)
print (file_directory)
observer = Observer()
event_handler = Handler(observer, file_path, file_directory)
observer.schedule(event_handler, path=file_directory, recursive=False)
observer.start()
observer.join()
any ideas would be appreciated
EDIT:
After some debugging I found out that Windows10 is changing the file modification time twice every time I save it.
The proof of concept code is this:
prev_modification_time = os.path.getmtime(file_path)
while True:
current_mod_time = os.path.getmtime(file_path)
if prev_modification_time != current_mod_time :
print ("the file was modified, last modification time is: ", current_mod_time)
prev_modification_time = current_mod_time
pass
Final edit:
After testing my code on linux (Debian Stretch to be exact) it worked like a charm. So this combined with the previous edit probably shows that watchdog works fine and it is windows10 that has some issue. Should I post it on a different question or here?
I'm currently getting this error. I'm confused because from what I can tell Generator Exit just gets called whenever a generator finishes, but I have a ton of other Generators inheriting this class that do not call this error. Am I setting the Generator up properly? or is there some implicit code I'm not taking into account that is calling close()?
"error": "Traceback (most recent call last):\n File \"/stashboard/source/stashboard/checkers.py\", line 29, in run\n yield self.check()\nGeneratorExit\n",
the code where this yield statement is called:
class Checker():
def __init__(self, event, frequency, params):
self.event = event
self.frequency = frequency
self.params = params
#gen.coroutine
def run(self):
""" Run check method every <frequency> seconds
"""
while True:
try:
yield self.check()
except GeneratorExit:
logging.info("EXCEPTION")
raise GeneratorExit
except:
data = {
'status': events.STATUS_ERROR,
'error': traceback.format_exc()
}
yield self.save(data)
yield gen.sleep(self.frequency)
#gen.coroutine
def check(self):
pass
#gen.coroutine
def save(self, data):
yield events.save(self.event, data)
and this is the code that is inheriting from it:
class PostgreChecker(Checker):
# checks list of Post
formatter = 'stashboard.formatters.PostgreFormatter'
def __init__(self, event, frequency, params):
super().__init__(event, frequency, params)
self.clients = []
for DB in configuration["postgre"]:
# setup and create connections to PG servers.
postgreUri = queries.uri(DB["host"], DB["port"], DB["dbName"],
DB["userName"], DB["password"])
# creates actual link to DB
client = queries.TornadoSession(postgreUri)
# starts connection
client.host = DB["host"]
self.clients.append(client)
#gen.coroutine
def check(self):
for client in self.clients:
try:
yield client.validate()
self.save({'host': client.host,
'status': events.STATUS_OK})
except (ConnectionError, AutoReconnect, ConnectionFailure):
self.save({'host': client.host,
'status': events.STATUS_FAIL})
Tornado never calls close() on your generators, but the garbage collector does (starting in Python 3.4 I think). How is checker.run() called? Use IOLoop.spawn_callback() for fire-and-forget coroutines; this will keep a reference to them and allow them to keep running indefinitely.
the specific issue here was that my db cursors were not automatically re-connecting. I was using the queries library, but switched over to momoko and the issue is gone