psycopg2 - how to detect broken but not closed connection? - python-3.x

I'm using psycopg2 ThreadedConnectionPool.
When putting an already closed connection back in the pool using psycopg2's putconn, it doesn't get added to the pool, assuring that I won't be getting a closed connection next time I call get_conn.
Is there a case in which the connection is broken (as in not usable anymore) but not closed?
If so, how can I detect it?
For example:
from psycopg2.pool import ThreadedConnectionPool
DSN = "postgresql://User:password#localhost/db"
pool = ThreadedConnectionPool(8, 10, DSN)
def execute(query):
close = False
conn = pool.getconn()
try:
cur = conn.cursor()
cur.execute(query)
cur.commit()
except ExceptionThatIndicatesTheConnectionIsBrokenButNotClosed:
close = True
except Exception:
conn.rollback()
pool.putconn(conn, close=close)
ExceptionThatIndicatesTheConnectionIsBrokenButNotClosed - are there any specific exceptions/pgcodes/pgerros that indicate just that?
Thanks :)

Related

relation does not exist in postgreSQL but already exist

I've read a lot of articles about my problem but no one solve it. So u can see my code here
DATABASE_URL = os.environ.get('url_of_my_db')
con = None
try:
con = psycopg2.connect(DATABASE_URL)
cur = con.cursor()
print('PostgreSQL database version:')
#cur.execute('SELECT version()')
#cur.execute('SELECT * FROM qwerty')
#cur.execute(sql.SQL('SELECT * FROM {}').format(sql.Identifier('qwerty')))
#cur.execute(sql.SQL("INSERT INTO {} (chat_id, username, created_on) VALUES (8985972942, vovakirdan, 2022-01-05)").format(sql.Identifier('users')))
cur.execute("""INSERT INTO users (chat_id, username, created_on)
VALUES (3131,
vovakirdan,
2022-01-05)""")
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the HerokuPostgres
cur.close()
except Exception as error:
print('Cause: {}'.format(error))
finally:
# close the communication with the database server by calling the close()
if con is not None:
con.close()
print('Database connection closed.')
and in my DB table named "users" (named without quotes) are exist, but I still have this error:
error
...relation "users" does not exist
all the #commented code doesn't work and send the same error besides SELECT version(), it works perfectly that proves that connection works.
The problem was that PostgreDB wants me to use SELECT colum FROM schema.table instead of SELECT colum FROM table. And that's all. Thanks everyone

Python3 pika channel.basic_consume() causing MySQL too many connections

I had using pika to make a connection to RabbitMQ and consume message, once I start the script on ubuntu prod environment it is working as expected but is opening mysql connection and never closes them and ends up in Too many connection on mysql server.
Will appreciate any recommendation on the code below, as well can not understand what is going wrong. Thanking you in advance.
The flow is the following
Starting pika on Python3
Subscribe to a channel and waiting for messages
In callback i do various validation and save or update data inside MySql
The result that is showing the problem is the at the end of question a screenshot from ubuntu htop, that is showing new connection on MySql and keep adding them on the top
Pika Verion = 0.13.0
For MySql I use pymysql.
Pika Script
def main():
credentials = pika.PlainCredentials(tunnel['queue']['name'], tunnel['queue']['password'])
while True:
try:
cp = pika.ConnectionParameters(
host=tunnel['queue']['host'],
port=tunnel['queue']['port'],
credentials=credentials,
ssl=tunnel['queue']['ssl'],
heartbeat=600,
blocked_connection_timeout=300
)
connection = pika.BlockingConnection(cp)
channel = connection.channel()
def callback(ch, method, properties, body):
if 'messageType' in properties.headers:
message_type = properties.headers['messageType']
if events.get(message_type):
result = Descriptors._reflection.ParseMessage(events[message_type]['decode'], body)
if result:
result = protobuf_to_dict(result)
model.write_response(external_response=result, message_type=message_type)
else:
app_log.warning('Message type not in allowed list = ' + str(message_type))
app_log.warning('continue listening...')
channel.basic_consume(callback, queue=tunnel['queue']['name'], no_ack=True)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
break
except pika.connection.exceptions.ConnectionClosed as e:
app_log.error('ConnectionClosed :: %s' % str(e))
continue
except pika.connection.exceptions.AMQPChannelError as e:
app_log.error('AMQPChannelError :: %s' % str(e))
continue
except Exception as e:
app_log.error('Connection was closed, retrying... %s' % str(e))
continue
if __name__ == '__main__':
main()
Inside the script i have a model that doing inserts or updated in the database, code below
def write_response(self, external_response, message_type):
table_name = events[message_type]['table_name']
original_response = external_response[events[message_type]['response']]
if isinstance(original_response, list):
external_response = []
for o in original_response:
record = self.map_keys(o, message_type, events[message_type].get('values_fix', {}))
external_response.append(self.validate_fields(record))
else:
external_response = self.map_keys(original_response, message_type, events[message_type].get('values_fix', {}))
external_response = self.validate_fields(external_response)
if not self.mysql.open:
self.mysql.ping(reconnect=True)
with self.mysql.cursor() as cursor:
if isinstance(original_response, list):
for e in external_response:
id_name = events[message_type]['id_name']
filters = {id_name: e[id_name]}
self.event(
cursor=cursor,
table_name=table_name,
filters=filters,
external_response=e,
message_type=message_type,
event_id=e[id_name],
original_response=e # not required here
)
else:
id_name = events[message_type]['id_name']
filters = {id_name: external_response[id_name]}
self.event(
cursor=cursor,
table_name=table_name,
filters=filters,
external_response=external_response,
message_type=message_type,
event_id=external_response[id_name],
original_response=original_response
)
cursor.close()
self.mysql.close()
return
On ubuntu i use systemd to run the script and restart in case something goes wrong, below is systemd file
[Unit]
Description=Pika Script
Requires=stunnel4.service
Requires=mysql.service
Requires=mongod.service
[Service]
User=user
Group=group
WorkingDirectory=/home/pika_script
ExecStart=/home/user/venv/bin/python pika_script.py
Restart=always
[Install]
WantedBy=multi-user.target
Image from ubuntu htop, how the MySql keeps adding in the list and never close it
Error
tornado_mysql.err.OperationalError: (1040, 'Too many connections')
i have found the issue, posting if will help somebody else.
the problem was that mysqld went into infinite loop trying to create indexing to a specific database, after found to which database was trying to create the indexes and never succeed and was trying again and again.
solution was to remove the database and recreate it, and the mysqld process went back to normal. and the infinite loop to create indexes dissapeared as well.
I would say increasing connection may solve your problem temperately.
1st find out why the application is not closing the connection after completion of task.
2nd Any slow queries/calls on the DB and fix them if any.
3rd considering no slow queries/calls on DB and also application is closing the connection/thread after immediately completing the task, then consider playing with "wait_timeout" on mysql side.
According to this answer, if you have MySQL 5.7 and 5.8 :
It is worth knowing that if you run out of usable disc space on your
server partition or drive, that this will also cause MySQL to return
this error. If you're sure it's not the actual number of users
connected then the next step is to check that you have free space on
your MySQL server drive/partition.
From the same thread. You can inspect and increase number of MySQL connections.

Is there any way to put timeout in pandas read_sql function?

I connect to a DB2 server through ODBC connection in my python code. The DB2 server gets reboot for maintainence or disconnects me while running specific server side tasks, happens 1 or 2 times in a day. At that time if my code has started executing the pandas read_sql function to fetch result of query, it goes into a infinite wait even when the server is up after lets say 1 hour.
I want to put a timeout in the execution of read_sql and whenever that timeout occurs I want to refresh the connection with DB2 server so that a fresh connection is made again before continuing the query.
I have tried making a while loop and picking chunks of data from DB2 instead of pulling whole result at once, but problem is if DB2 disconnects in pulling chunk python code still goes into infinite wait.
chunk_size = 1000
offset = 0
while True:
sql = "SELECT * FROM table_name limit %d offset %d" % (chunk_size,offset)
df = pd.read_sql(sql, conn)
df.index += (offset+1)
offset += chunk_size
sys.stdout.write('.')
sys.stdout.flush()
if df.shape[0] < chunk_size:
break
I need the read_sql to throw some exception or return a value if the sql execution takes more than 3 minutes. If that happenes I need the connection to DB2 to refresh.
You could use the package func-timeout. You can install via pip as below:
pip install func-timeout
So, for example, if you have a function “doit(‘arg1’, ‘arg2’)” that you want to limit to running for 5 seconds, with func_timeout you can call it like this:
from func_timeout import func_timeout, FunctionTimedOut
try:
doitReturnValue = func_timeout(5, doit, args=(‘arg1’, ‘arg2’))
except FunctionTimedOut:
print ( “doit(‘arg1’, ‘arg2’) could not complete within 5 seconds, hence terminated.\n”)
except Exception as e:
# Handle any exceptions that doit might raise here

Mysql.connector Python, no result when connection is already used

I have a simple code which sends a message by http to another webapp and check that the message is well inserted in the database (2 times)
So it is not this code which insert in the database (it is done in another app)
SELECT_TABLE1_BY_ID_AND_DATE = "SELECT * FROM table1 WHERE table1.id = %s AND timedata = FROM_UNIXTIME(%s)"
SELECT_TABLE2_BY_ID_AND_DATE = "SELECT * FROM table2 WHERE table2.id = %s AND timedata = FROM_UNIXTIME(%s)"
try:
conn = mysql.connector.connect(user=db['user'], password=db['password'], host=db['host'], port=db['port'], database="TEST", raise_on_warnings=True)
cursor = conn.cursor()
self.send1Message(msg1) # Send to HTTP Webapp
cursor.execute(SELECT_TABLE1_BY_ID_AND_DATE, (idD, timing))
print(cursor.fetchall()) #1
self.send2Message(msg2) Send to HTTP Webapp
cursor.execute(SELECT_TABLE2_BY_ID_AND_DATE, (idD, timing))
print(cursor.fetchall()) #2
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
If I use the same SQL connection between the 2 sendTable, only the first fetchAll returns data. (#1 prints data and #2 prints empty list).
I tried also to close the connection after #1 and start another connection. It works for both (#1 prints data and #2 too).
(I have to precise that my queries are correct and that the data is well insert in the database on time by the webapp).
Is it a normal behaviour of a connection ?
Thanks a lot!
Try 2 connections...
conn1 = mysql.connector.connect(user=db['user'], password=db['password'], host=db['host'], port=db['port'], database="TEST", raise_on_warnings=True)
conn2 = mysql.connector.connect(user=db['user'], password=db['password'], host=db['host'], port=db['port'], database="TEST", raise_on_warnings=True)
cursor1 = conn1.cursor()
self.send1Message(msg1) # Send to HTTP Webapp
cursor1.execute(SELECT_TABLE1_BY_ID_AND_DATE, (idD, timing))
print(cursor1.fetchall()) #1
self.send2Message(msg2) Send to HTTP Webapp
cursor2 = conn2.cursor()
cursor2.execute(SELECT_TABLE2_BY_ID_AND_DATE, (idD, timing))
print(cursor2.fetchall()) #2

How to make connection in python to connect as400 and call any as400 programs with parameter

Anyone knows How to make connection in python to connect as400 iseries system and call any as400 programs with parameter.
For example how to create library by connecting as400 through python. I want to call " CRTLIB LIB(TEST) " from python script.
I am able to connect to DB2 database through pyodbc package.
Here is my code to connect DB2 database.
import pyodbc
connection = pyodbc.connect(
driver='{iSeries Access ODBC Driver}',
system='ip/hostname',
uid='username',
pwd='password')
c1 = connection.cursor()
c1.execute('select * from libname.filename')
for row in c1:
print (row)
If your IBM i is set up to allow it, you can call the QCMDEXC stored procedure using CALL in your SQL. For example,
c1.execute("call qcmdexc('crtlib lib(test)')")
The QCMDEXC stored procedure lives in QSYS2 (the actual program object is QSYS2/QCMDEXC1) and does much the same as the familiar program of the same name that lives in QSYS, but the stored procedure is specifically meant to be called via SQL.
Of course, for this example to work, your connection profile has to have the proper authority to create libraries.
It's also possible that your IBM i isn't set up to allow this. I don't know exactly what goes into enabling this functionality, but where I work, we have one partition where the example shown above completes normally, and another partition where I get this instead:
pyodbc.Error: ('HY000', '[HY000] [IBM][System i Access ODBC Driver][DB2 for i5/OS]SQL0901 - SQL system error. (-901) (SQLExecDirectW)')
This gist shows how to connect to an AS/400 via pyodbc:
https://gist.github.com/BietteMaxime/6cfd5b2dc2624c094575
A few notes; in this example, SYSTEM is the DSN you're set up for the AS/400 in the with pyodbc.connect statement. You could also switch this to be SERVER and PORT with these modifications:
import pyodbc
class CommitMode:
NONE = 0 # Commit immediate (*NONE) --> QSQCLIPKGN
CS = 1 # Read committed (*CS) --> QSQCLIPKGS
CHG = 2 # Read uncommitted (*CHG) --> QSQCLIPKGC
ALL = 3 # Repeatable read (*ALL) --> QSQCLIPKGA
RR = 4 # Serializable (*RR) --> QSQCLIPKGL
class ConnectionType:
READ_WRITE = 0 # Read/Write (all SQL statements allowed)
READ_CALL = 1 # Read/Call (SELECT and CALL statements allowed)
READ_ONLY = 2 # Read-only (SELECT statements only)
def connstr(server, port, commit_mode=None, connection_type=None):
_connstr = 'DRIVER=iSeries Access ODBC Driver;SERVER={server};PORT={port};SIGNON=4;CCSID=1208;TRANSLATE=1;'.format(
server=server,
port=port,
)
if commit_mode is not None:
_connstr = _connstr + 'CommitMode=' + str(commit_mode) + ';'
if connection_type is not None:
_connstr = _connstr + 'ConnectionType=' + str(connection_type) + ';'
return _connstr
def main():
with pyodbc.connect(connstr('myas400.server.com', '8471', CommitMode.CHG, ConnectionType.READ_ONLY)) as db:
cursor = db.cursor()
cursor.execute(
"""
SELECT * FROM IASP.LIB.FILE
"""
)
for row in cursor:
print(' '.join(map(str, row)))
if __name__ == '__main__':
main()
I cleaned up some PEP-8 as well. Good luck!

Resources