Python Script Creates Directories In /tmp/, Taking Up System Space - linux

I am running a script that acts as a server, allows two clients to connect to it, and for one specific client to send a message to the server, the server modifies it, then sends it to the other client.
This appears to work, as the receiving client acknowledges that the input was received and is valid. This is a script that I intend to run continuously.
However, a big issue is that my /tmp/ directory is filling up with directories named _M... (The ellipses representing a random string), that contains python modules (such as cryptography, which, as far as I'm aware, I'm not using), and timezone information (quite literally every timezone that python supports). It seems to be creating them very frequently, but I can't identify what in the process exactly is doing this.
I have created a working cleanup bash script that removes files older than 5 minutes from the directory every 5 minutes, however, I cannot guarantee that when I am duplicating this process for other devices, that the directories will have the same name formatting. Rather than create a unique bash script for each process that I create, I'd rather be able to clean up the directories from within the python script, or even better, to prevent the directories from being created at all.
The problem is, I'm not certain of how this is accomplished, and I do not see anything on SO regarding what is creating these directories, nor how to delete them.
The following is my script
import time, socket, os, sys, re, select
IP = '192.168.109.8'
PORT = [3000, 3001]
PID = str(os.getpid())
PIDFILE = "/path/to/pidfile.pid"
client_counter = 0
sockets_list = []
def runCheck():
if os.path.isfile(PIDFILE):
return False
else:
with open(PIDFILE, 'w') as pidfile:
pidfile.write(PID)
return True
def openSockets():
for i in PORT:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((IP, i))
s.listen(1)
sockets_list.append(s)
def receiveMessage(client_socket):
try:
message = client_socket.recv(2048).decode('utf-8')
if not message:
return False
message = str(message)
return message
except:
return False
def fixString(local_string):
#processes
return local_string
def main():
try:
openSockets()
clients = {}
print(f'Listening for connections on {IP}:{PORT[0]} and {PORT[1]}...')
client_count = 0
while True:
read_sockets, _, exception_sockets = select.select(sockets_list, [], sockets_list)
for notified_socket in read_sockets:
if notified_socket == sockets_list[0] or notified_socket == sockets_list[1]:
client_socket, client_address = sockets_list[client_count].accept()
client_count = (client_count + 1) % 2
sockets_list.append(client_socket)
clients[client_socket] = client_socket
print('Accepted new connection from: {}'.format(*client_address))
else:
message = receiveMessage(notified_socket)
if message is False:
continue
message = fixString(message)
for client_socket in clients:
if client_socket != notified_socket:
if message != "N/A":
client_socket.send(bytes(message, "utf-8"))
for notified_socket in exception_sockets:
sockets_list.remove(notified_socket)
del clients[notified_socket]
time.sleep(1)
except socket.timeout:
for i in sockets_list:
i.close()
os.remove(PIDFILE)
sys.exit()
except Exception as e:
for i in sockets_list:
i.close()
err_details = str('Error in line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
os.remove(PIDFILE)
print("Exception: {}".format(err_details))
sys.exit()
if __name__ == "__main__":
if runCheck():
main()
else:
pass
How might I set it up so that the python script will delete the directories it creates in the /tmp/ directory, or better, to not create them in the first place? Any help would be greatly appreciated.

As it would turn out, it is PyInstaller that was generating these files. In the documentation, it states that pyinstaller generates this _MEI directory when creating the executable in single-file mode, and it is supposed to delete it as well, but for some reason it didn't.

Related

Python3 pika channel.basic_consume() causing MySQL too many connections

I had using pika to make a connection to RabbitMQ and consume message, once I start the script on ubuntu prod environment it is working as expected but is opening mysql connection and never closes them and ends up in Too many connection on mysql server.
Will appreciate any recommendation on the code below, as well can not understand what is going wrong. Thanking you in advance.
The flow is the following
Starting pika on Python3
Subscribe to a channel and waiting for messages
In callback i do various validation and save or update data inside MySql
The result that is showing the problem is the at the end of question a screenshot from ubuntu htop, that is showing new connection on MySql and keep adding them on the top
Pika Verion = 0.13.0
For MySql I use pymysql.
Pika Script
def main():
credentials = pika.PlainCredentials(tunnel['queue']['name'], tunnel['queue']['password'])
while True:
try:
cp = pika.ConnectionParameters(
host=tunnel['queue']['host'],
port=tunnel['queue']['port'],
credentials=credentials,
ssl=tunnel['queue']['ssl'],
heartbeat=600,
blocked_connection_timeout=300
)
connection = pika.BlockingConnection(cp)
channel = connection.channel()
def callback(ch, method, properties, body):
if 'messageType' in properties.headers:
message_type = properties.headers['messageType']
if events.get(message_type):
result = Descriptors._reflection.ParseMessage(events[message_type]['decode'], body)
if result:
result = protobuf_to_dict(result)
model.write_response(external_response=result, message_type=message_type)
else:
app_log.warning('Message type not in allowed list = ' + str(message_type))
app_log.warning('continue listening...')
channel.basic_consume(callback, queue=tunnel['queue']['name'], no_ack=True)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
break
except pika.connection.exceptions.ConnectionClosed as e:
app_log.error('ConnectionClosed :: %s' % str(e))
continue
except pika.connection.exceptions.AMQPChannelError as e:
app_log.error('AMQPChannelError :: %s' % str(e))
continue
except Exception as e:
app_log.error('Connection was closed, retrying... %s' % str(e))
continue
if __name__ == '__main__':
main()
Inside the script i have a model that doing inserts or updated in the database, code below
def write_response(self, external_response, message_type):
table_name = events[message_type]['table_name']
original_response = external_response[events[message_type]['response']]
if isinstance(original_response, list):
external_response = []
for o in original_response:
record = self.map_keys(o, message_type, events[message_type].get('values_fix', {}))
external_response.append(self.validate_fields(record))
else:
external_response = self.map_keys(original_response, message_type, events[message_type].get('values_fix', {}))
external_response = self.validate_fields(external_response)
if not self.mysql.open:
self.mysql.ping(reconnect=True)
with self.mysql.cursor() as cursor:
if isinstance(original_response, list):
for e in external_response:
id_name = events[message_type]['id_name']
filters = {id_name: e[id_name]}
self.event(
cursor=cursor,
table_name=table_name,
filters=filters,
external_response=e,
message_type=message_type,
event_id=e[id_name],
original_response=e # not required here
)
else:
id_name = events[message_type]['id_name']
filters = {id_name: external_response[id_name]}
self.event(
cursor=cursor,
table_name=table_name,
filters=filters,
external_response=external_response,
message_type=message_type,
event_id=external_response[id_name],
original_response=original_response
)
cursor.close()
self.mysql.close()
return
On ubuntu i use systemd to run the script and restart in case something goes wrong, below is systemd file
[Unit]
Description=Pika Script
Requires=stunnel4.service
Requires=mysql.service
Requires=mongod.service
[Service]
User=user
Group=group
WorkingDirectory=/home/pika_script
ExecStart=/home/user/venv/bin/python pika_script.py
Restart=always
[Install]
WantedBy=multi-user.target
Image from ubuntu htop, how the MySql keeps adding in the list and never close it
Error
tornado_mysql.err.OperationalError: (1040, 'Too many connections')
i have found the issue, posting if will help somebody else.
the problem was that mysqld went into infinite loop trying to create indexing to a specific database, after found to which database was trying to create the indexes and never succeed and was trying again and again.
solution was to remove the database and recreate it, and the mysqld process went back to normal. and the infinite loop to create indexes dissapeared as well.
I would say increasing connection may solve your problem temperately.
1st find out why the application is not closing the connection after completion of task.
2nd Any slow queries/calls on the DB and fix them if any.
3rd considering no slow queries/calls on DB and also application is closing the connection/thread after immediately completing the task, then consider playing with "wait_timeout" on mysql side.
According to this answer, if you have MySQL 5.7 and 5.8 :
It is worth knowing that if you run out of usable disc space on your
server partition or drive, that this will also cause MySQL to return
this error. If you're sure it's not the actual number of users
connected then the next step is to check that you have free space on
your MySQL server drive/partition.
From the same thread. You can inspect and increase number of MySQL connections.

Python Can't decode byte : Invalid start byte

So I'm building this socket application, and it works just fine on my computer. But when i start server socket on another laptop, it just crashes with a invalid start byte error:
How do i proper encode the program to work with all laptops
This is the error i get on :
Other laptops.
My laptop.
I have tried to change the encoding, but I'm just not quite sure where i have to change it.
Class Listener:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_address = (socket.gethostbyname(socket.gethostname()), 10000)
self.sock.bind(self.server_address)
print(f"LISTENER : {str(self.server_address[0])} port {str(self.server_address[1])}")
def listen(self):
self.sock.listen(1)
while True:
print("Connection Open")
print(" Waiting for connections")
self.connection, self.client_address = self.sock.accept()
try:
print(f"Connection from {str(self.client_address)}")
while True:
data = self.connection.recv(1024)
if data:
message = str(data)
if not "print" in message.lower(): # This just checks if the client wants to print system information from the server
Validate(message) # this checks for a command the server have to do
else:
self.connection.sendall(pickle.dumps(self.computerinfomation))
else:
self.listen()
except Exception as e:
print(e)
I want it to work on other laptops as well, and i just cant see why it wont.
Furas came with a solution.
I changed the
message = str(data)
to
message = str(data, encoding="utf-8")
I did the same on the client side
Not going to lie. I just changed the encoding = utf-16.
Example:
df = pd.read_csv(C:/folders path/untitled.csv, encoding = "utf-16")

Simultaneous input and output for network based messaging program

In python, I am creating a message system where a client and server can send messages back and forth simeltaneously. Here is my code for the client:
import threading
import socket
# Global variables
host = input("Server: ")
port = 9000
buff = 1024
# Create socket instance
s = socket.socket()
# Connect to server
s.connect( (host, port) )
print("Connected to server\n")
class Recieve(threading.Thread):
def run(self):
while True: # Recieve loop
r_msg = s.recv(buff).decode()
print("\nServer: " + r_msg)
recieve_thread = Recieve()
recieve_thread.start()
while True: # Send loop
s_msg = input("Send message: ")
if s_msg.lower() == 'q': # Quit option
break
s.send( s_msg.encode() )
s.close()
I have a thread in the background to check for server messages and a looping input to send messages to the server. The problem arises when the server sends a message and the user input is immediately bounced up to make room for the servers message. I want it so that the input stays pinned to the bottom of the shell window, while the output is printed from the 2nd line up, leaving the first line alone. I have been told that you can use curses or Queues to do this, but I am not sure which one would be best in my situation nor how to implement these modules into my project.
Any help would be appreciated. Thank you.
I want it so that the input stays pinned to the bottom of the shell
window, while the output is printed from the 2nd line up, leaving the
first line alone. I have been told that you can use curses
Here's a supplemented version of your client code using curses.
import threading
import socket
# Global variables
host = input("Server: ")
port = 9000
buff = 1024
# Create socket instance
s = socket.socket()
# Connect to server
s.connect( (host, port) )
print("Connected to server\n")
import sys
write = sys.stdout.buffer.raw.write
from curses import *
setupterm()
lines = tigetnum('lines')
change_scroll_region = tigetstr('csr')
cursor_up = tigetstr('cuu1')
restore_cursor = tigetstr('rc')
save_cursor = tigetstr('sc')
def pin(input_lines): # protect input_lines at the bottom from scrolling
write(save_cursor + \
tparm(change_scroll_region, 0, lines-1-input_lines) + \
restore_cursor)
pin(1)
class Recieve(threading.Thread):
def run(self):
while True: # Recieve loop
r_msg = s.recv(buff).decode()
write(save_cursor+cursor_up)
print("\nServer: " + r_msg)
write(restore_cursor)
recieve_thread = Recieve()
recieve_thread.daemon = True
recieve_thread.start()
while True: # Send loop
s_msg = input("Send message: ")
if s_msg.lower() == 'q': # Quit option
break
s.send( s_msg.encode() )
pin(0)
s.close()
It changes the scrolling region to leave out the screen's bottom line, enters the scrolling region temporarily to output the server messages, and changes it back at the end.

Python 3.4 - How to 'run' another script python script continuously, How to pass http get / post to socket

This question is two-fold.
1. So I need to run code for a socket server that's all defined and created in another.py, Clicking run on PyCharm works just fine, but if you exec() the file it just runs the bottom part of the code.
There are a few answers here but they are conflicting and for Python 2.
From what I can gather there are three ways:
- Execfile(), Which I think is Python 2 code.
- os.system() (But I've seen it be said that it's not correct to pass to the OS for this)
- And subprocess.Popen (unsure how to use this either)
I need this to run in the background, it is used to create threads for sockets for the recv portion of the overall program and listen on those ports so I can input commands to a router.
This is the complete code in question:
import sys
import socket
import threading
import time
QUIT = False
class ClientThread(threading.Thread): # Class that implements the client threads in this server
def __init__(self, client_sock): # Initialize the object, save the socket that this thread will use.
threading.Thread.__init__(self)
self.client = client_sock
def run(self): # Thread's main loop. Once this function returns, the thread is finished and dies.
global QUIT # Need to declare QUIT as global, since the method can change it
done = False
cmd = self.readline() # Read data from the socket and process it
while not done:
if 'quit' == cmd:
self.writeline('Ok, bye. Server shut down')
QUIT = True
done = True
elif 'bye' == cmd:
self.writeline('Ok, bye. Thread closed')
done = True
else:
self.writeline(self.name)
cmd = self.readline()
self.client.close() # Make sure socket is closed when we're done with it
return
def readline(self): # Helper function, read up to 1024 chars from the socket, and returns them as a string
result = self.client.recv(1024)
if result is not None: # All letters in lower case and without and end of line markers
result = result.strip().lower().decode('ascii')
return result
def writeline(self, text): # Helper func, writes the given string to the socket with and end of line marker at end
self.client.send(text.strip().encode("ascii") + b'\n')
class Server: # Server class. Opens up a socket and listens for incoming connections.
def __init__(self): # Every time a new connection arrives, new thread object is created and
self.sock = None # defers the processing of the connection to it
self.thread_list = []
def run(self): # Server main loop: Creates the server (incoming) socket, listens > creates thread to handle it
all_good = False
try_count = 0 # Attempt to open the socket
while not all_good:
if 3 < try_count: # Tried more than 3 times without success, maybe post is in use by another program
sys.exit(1)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create the socket
port = 80
self.sock.bind(('127.0.0.1', port)) # Bind to the interface and port we want to listen on
self.sock.listen(5)
all_good = True
break
except socket.error:
print('Socket connection error... Waiting 10 seconds to retry.')
del self.sock
time.sleep(10)
try_count += 1
print('Server is listening for incoming connections.')
print('Try to connect through the command line with:')
print('telnet localhost 80')
print('and then type whatever you want.')
print()
print("typing 'bye' finishes the thread. but not the server",)
print("eg. you can quit telnet, run it again and get a different ",)
print("thread name")
print("typing 'quit' finishes the server")
try:
while not QUIT:
try:
self.sock.settimeout(0.500)
client = self.sock.accept()[0]
except socket.timeout:
time.sleep(1)
if QUIT:
print('Received quit command. Shutting down...')
break
continue
new_thread = ClientThread(client)
print('Incoming Connection. Started thread ',)
print(new_thread.getName())
self.thread_list.append(new_thread)
new_thread.start()
for thread in self.thread_list:
if not thread.isAlive():
self.thread_list.remove(thread)
thread.join()
except KeyboardInterrupt:
print('Ctrl+C pressed... Shutting Down')
except Exception as err:
print('Exception caught: %s\nClosing...' % err)
for thread in self.thread_list:
thread.join(1.0)
self.sock.close()
if "__main__" == __name__:
server = Server()
server.run()
print('Terminated')
Notes:
This is created in Python 3.4
I use Pycharm as my IDE.
One part of a whole.
2. So I'm creating a lightning detection system and this is how I expect it to be done:
- Listen to the port on the router forever
The above is done, but the issue with this is described in question 1.
- Pull numbers from a text file for sending text message
Completed this also.
- Send http get / post to port on the router
The issue with this is that i'm unsure how the router will act if I send this in binary form, I suspect it wont matter, the input commands for sending over GSM are specific. Some clarification may be needed at some point.
- Recieve reply from router and exception manage
- Listen for relay trip for alarm on severe or close strike warning.
- If tripped, send messages to phones in storage from text file
This would be the http get / post that's sent.
- Wait for reply from router to indicate messages have been sent, exception handle if it's not the case
- Go back to start
There are a few issues I'd like some background knowledge on that is proving hard to find via the old Google and here on the answers in stack.
How do I grab the receive data from the router from another process running in another file? I guess I can write into a text file and call that data but i'd rather not.
How to multi-process and which method to use.
How to send http get / post to socket on router, post needed occording to the router manual is as follows: e.g. "http://192.168.1.1/cgi-bin/sms_send?number=0037061212345&text=test"
Notes: Using Sockets, threading, sys and time on Python 3.4/Pycharm IDE.
Lightning detector used is LD-250 with RLO Relay attached.
RUT500 Teltonica router used.
Any direction/comments, errors spotted, anything i'm drastically missing would be greatly appreciated! Thank you very much in advance :D constructive criticism is greatly encouraged!
Okay so for the first part none of those suggested in the OP were my answer. Running the script as is from os.system(), exec() without declaring a new socket object just ran from __name__, this essentially just printed out "terminated", to get around this was simple. As everything was put into a classes already, all I had to do is create a new thread. This is how it was done:
import Socketthread2
new_thread = Socketthread2.Server() # Effectively declaring a new server class object.
new_thread.run()
This allowed the script to run from the beginning by initialising the code from the start in Socket, which is also a class of Clientthread, so that was also run too. Running this at the start of the parent program allowed this to run in the background, then continue with the new code in parent while the rest of the script was continuously active.

PyQt threaded ftp: Cannot queue arguments of type 'QUrlInfo'

I have the need to download all files in an ftp directory. I don't know the files in the dir at the time my program starts, so I want the program to list the contents of the dir, then download each of the files it finds.
I've made a little demo script that downloads a file from ftp & updates a progress bar while doing so. The downloading & updating the progress bar works fine, however, I'm trying to do the next step which is to list the contents of some dir & download the files, and that part isn't working.
At the moment, I'm just trying to do a list on any directory & print the results to the command line.
When I try to do a listInfo.connect, I get an error message:
QObject::connect: Cannot queue arguments of type 'QUrlInfo'
(Make sure 'QUrlInfo' is registered using qRegisterMetaType().)
... as I understand it, qRegisterMetaType is not something that can be done in PyQt & is also a sign of a fundamental problem, and herein lies my problem. I can do a commandFinished.connect and dataTransferProgress.connect without issue, but listInfo.connect doesn't seem to work (as I would expect it).
Any ideas how to correct this?
Here's some example code (pardon the length). I would like to be able to print the listed files/urls from the function "lister". Ultimately, I'd like to then have that function formulate new urls & pass them back to connectAndDownload to download each of the files (of course, this will require modifications to connectAndDownload, but we're not there yet).
#!/usr/bin/env python
from PyQt4 import QtCore, QtGui, QtNetwork
class FtpWorker(QtCore.QThread):
dataTransferProgress = QtCore.pyqtSignal(int,int)
def __init__(self,url,parent=None):
super(FtpWorker,self).__init__(parent)
self.ftp = None
self.outFile = None
self.get_index = -1
self.url = url
def run(self):
self.connectAndDownload()
self.exec_()
def ftpCommandFinished(self, command_index, error):
print "-----commandfinished-----",command_index
if self.ftp.currentCommand == QtNetwork.QFtp.ConnectToHost:
if error:
QtGui.QMessageBox.information(self, "FTP",
"Unable to connect to the FTP server at %s. Please "
"check that the host name is correct.")
return
if self.ftp.currentCommand == QtNetwork.QFtp.Get or command_index == self.get_index:
if error:
print "closing outfile prematurely"
self.outFile.close()
self.outFile.remove()
else:
print "closed outfile normally"
self.outFile.close()
self.outFile = None
def ftpDataTransferProgress(self,a,b):
self.dataTransferProgress.emit(a,b)
def lister(self,url_info):
print url_info.name()
def connectAndDownload(self):
if self.ftp:
self.ftp.abort()
self.ftp.deleteLater()
self.ftp = None
return
self.ftp = QtNetwork.QFtp()
self.ftp.commandFinished.connect(self.ftpCommandFinished)
self.ftp.listInfo.connect(self.lister)
self.ftp.dataTransferProgress.connect(self.ftpDataTransferProgress)
url = QtCore.QUrl(self.url)
print "connect",self.ftp.connectToHost(url.host(), url.port(21))
print "login",self.ftp.login(url.userName(), url.password())
print "Connecting to FTP server %s..." % str(url.host())
import os
fileName = os.path.basename(self.url)
if QtCore.QFile.exists(fileName):
print "removing '%s'" % fileName
os.unlink(fileName)
self.outFile = QtCore.QFile(fileName)
if not self.outFile.open(QtCore.QIODevice.WriteOnly):
QtGui.QMessageBox.information(self, "FTP",
"Unable to save the file %s: %s." % (fileName, self.outFile.errorString()))
self.outFile = None
return
tmp = self.ftp.list()
print "starting list",tmp
print "ftp.get(%s,%s)" % (str(url.path()), self.outFile)
self.get_index = self.ftp.get(url.path(), self.outFile)
class AddProgresWin(QtGui.QWidget):
def __init__(self, parent=None):
super(AddProgresWin, self).__init__(parent)
self.thread = FtpWorker(url="ftp://ftp.qt.nokia.com/developerguides/qteffects/screenshot.png")
self.thread.dataTransferProgress.connect(self.updateDataTransferProgress)
self.nameLabel = QtGui.QLabel("0.0%")
self.nameLine = QtGui.QLineEdit()
self.progressbar = QtGui.QProgressBar()
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(self.progressbar, 0, 0)
mainLayout.addWidget(self.nameLabel, 0, 1)
self.setLayout(mainLayout)
self.setWindowTitle("Processing")
self.thread.start()
def updateDataTransferProgress(self, readBytes, totalBytes):
self.progressbar.setMaximum(totalBytes)
self.progressbar.setValue(readBytes)
perct = "%2.1f%%" % (float(readBytes)/float(totalBytes)*100.0)
self.nameLabel.setText(perct)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.path)
pbarwin = AddProgresWin()
pbarwin.show()
sys.exit(app.exec_())
It appears that this is a Qt bug. From Phil Thompson, "It's arguably a Qt bug -it should call qRegisterMetaType() itself for any types used in signal arguments."
It was also brought to my attention that for this purpose, there's no need to thread, as QFtp is asynchronous & comes with its own signals. I've reimplemented the ftp.list() (and associated signal handling) in the main thread & all is well.

Resources