hourly action resetter with threading - multithreading

this is some code I have written based off basic knowledge of threading I acquired. What I am trying to do is limit the actions hourly so it doesn't spam.
import datetime
import time
from threading import Thread
the_Minute = str(datetime.datetime.now().time())[3:5]
action_Limit = 0
def updater():
global the_Minute
while True:
the_Minute=str(datetime.datetime.now().time())[3:5]
#updates the minute value
def resetter():
global action_Limit
global the_Minute
while True:
if the_Minute=='00':
action_Limit=0
time.sleep(30)
def performer():
global action_Limit
global the_Minute
while the_Minute!='00' and action_Limit<100:
#perform actions here
action_Limit+=1
print(action_Limit)
time.sleep(1)
updater_Thread = Thread(target=updater)
resetter_Thread = Thread(target=resetter)
performer_Thread = Thread(target=performer)
updater_Thread.start
performer_Thread.start
resetter_Thread.start
When I run this, nothing happens, but I also don't receive any errors. Could anyone tell me how I could make it work, or point me to some resources to help? Thank you for your time.

Related

queue.get(block=True) while start_background_task (flask-socketio) is running and doing queue.put()

I have an issue related to queue using a background task that never ends (continuously run to grab real-time data.
What I want to achieve:
Starting server via flask-socketio (eventlet),
monkey_patch(),
Using start_background_task, run a function from another file that grabs data in real time,
While this background task is running (indefinitely), storing incoming data in a queue via queue.put(),
Always while this task is running, from the main program watching for new data in the queue and processing them, meaning here socketio.emit().
What works: my program works well if, in the background task file, the while loop ends (while count < 100: for instance). In this case, I can access the queue from the main file and emit data.
What doesn't work: if this while loop is now while True:, the program blocks somewhere, I can't access the queue from the main program as it seems that it waits until the background task returns or stops.
So I guess I'm missing something here... so if you guys can help me with that, or give me some clues, that would be awesome.
Here some relevant parts of the code:
main.py
from threading import Thread
from threading import Lock
from queue import Queue
from get_raw_program import get_raw_data
from flask import Flask, send_from_directory, Response, jsonify, request, abort
from flask_socketio import SocketIO
import eventlet
eventlet.patcher.monkey_patch(select=True, socket=True)
app = Flask(__name__, static_folder=static_folder, static_url_path='')
app.config['SECRET_KEY'] = 'secret_key'
socketio = SocketIO(app, binary=True, async_mode="eventlet", logger=True, engineio_logger=True)
thread = None
thread_lock = Lock()
data_queue = Queue()
[...]
#socketio.on('WebSocket_On')
def grab_raw_data(test):
global thread
with thread_lock:
if thread is None:
socketio.emit('raw_data', {'msg': 'Thread is None:'})
socketio.emit('raw_data', {'msg': 'Starting Thread... '})
thread = socketio.start_background_task(target=get_raw_data(data_queue, test['mode']))
while True:
if not data_queue.empty():
data = data_queue.get(block=True, timeout=0.05)
socketio.emit('raw_data', {'msg': data})
socketio.sleep(0.0001)
get_raw_program.py (which works, can access queue from main.py)
def get_raw_data(data_queue, test):
count = 0
while count < 100:
data.put(b'\xe5\xce\x04\x00\xfe\xd2\x04\x00')
time.sleep(0.001)
count += 1
get_raw_program.py (which DOESN'T work, can't access queue from main.py)
def get_raw_data(data_queue, test):
count = 0
while True:
data.put(b'\xe5\xce\x04\x00\xfe\xd2\x04\x00')
time.sleep(0.001)
count += 1
I tried with regular Thread instead of start_background_task, and it works well. Thanks again for your help, greatly appreciated :-)

How to terminate thread by given name

I founded a nice package that is called kthread which simplify the threading in python. I have also found out that it is possible to name give a thread by its name etc:
import sys
import time
import kthread
def func():
try:
while True:
time.sleep(0.2)
finally:
sys.stdout.write("Greetings from Vice City!\n")
sys.stdout.flush()
t = kthread.KThread(target=func, name="KillableThread1")
t.start()
t.is_alive()
t.terminate()
t.is_alive()
Instead of doing t.terminate() I wonder if there is a possibility to remove a thread by given name?
The following piece of code might help.
thread_dict = {}
t1 = kthread.KThread(target=func, name="KillableThread1")
t2 = kthread.KThread(target=func, name="KillableThread2")
thread_dict[t1.name] = t1
thread_dict[t2.name] = t2
After that when you want to kill a thread then do the following:
thread_dict["KillableThread1"].terminate()

Launching parallel tasks: Subprocess output triggers function asynchronously

The example I will describe here is purely conceptual so I'm not interested in solving this actual problem.
What I need to accomplish is to be able to asynchronously run a function based on a continuous output of a subprocess command, in this case, the windows ping yahoo.com -t command and based on the time value from the replies I want to trigger the startme function. Now inside this function, there will be some more processing done, including some database and/or network-related calls so basically I/O processing.
My best bet would be that I should use Threading but for some reason, I can't get this to work as intended. Here is what I have tried so far:
First of all I tried the old way of using Threads like this:
import subprocess
import re
import asyncio
import time
import threading
def startme(mytime: int):
print(f"Mytime {mytime} was started!")
time.sleep(mytime) ## including more long operation functions here such as database calls and even some time.sleep() - if possible
print(f"Mytime {mytime} finished!")
myproc = subprocess.Popen(['ping', 'yahoo.com', '-t'], shell=True, stdout=subprocess.PIPE)
def main():
while True:
output = myproc.stdout.readline()
if myproc.poll() is not None:
break
myoutput = output.strip().decode(encoding="UTF-8")
print(myoutput)
mytime = re.findall("(?<=time\=)(.*)(?=ms\s)", myoutput)
try:
mytime = int(mytime[0])
if mytime < 197:
# startme(int(mytime[0]))
p1 = threading.Thread(target=startme(mytime), daemon=True)
# p1 = threading.Thread(target=startme(mytime)) # tried with and without the daemon
p1.start()
# p1.join()
except:
pass
main()
But right after startme() fire for the first time, the pings stop showing and they are waiting for the startme.time.sleep() to finish.
I did manage to get this working using the concurrent.futures's ThreadPoolExecutor but when tried to replace the time.sleep() with the actual database query I found out that my startme() function will never complete so no Mytime xxx finished! message is ever shown nor any database entry is being made.
import sqlite3
import subprocess
import re
import asyncio
import time
# import threading
# import multiprocessing
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
conn = sqlite3.connect('test.db')
c = conn.cursor()
c.execute(
'''CREATE TABLE IF NOT EXISTS mytable (id INTEGER PRIMARY KEY, u1, u2, u3, u4)''')
def startme(mytime: int):
print(f"Mytime {mytime} was started!")
# time.sleep(mytime) ## including more long operation functions here such as database calls and even some time.sleep() - if possible
c.execute("INSERT INTO mytable VALUES (null, ?, ?, ?, ?)",(1,2,3,mytime))
conn.commit()
print(f"Mytime {mytime} finished!")
myproc = subprocess.Popen(['ping', 'yahoo.com', '-t'], shell=True, stdout=subprocess.PIPE)
def main():
while True:
output = myproc.stdout.readline()
myoutput = output.strip().decode(encoding="UTF-8")
print(myoutput)
mytime = re.findall("(?<=time\=)(.*)(?=ms\s)", myoutput)
try:
mytime = int(mytime[0])
if mytime < 197:
print(f"The time {mytime} is low enought to call startme()" )
executor = ThreadPoolExecutor()
# executor = ProcessPoolExecutor() # I did tried using process even if it's not a CPU-related issue
executor.submit(startme, mytime)
except:
pass
main()
I did try using asyncio but I soon realized this is not the case but I'm wondering if I should try aiosqlite
I also thought about using asyncio.create_subprocess_shell and run both as parallel subprocesses but can't think of a way to wait for a certain string from the ping command that would trigger the second script.
Please note that I don't really need a return from the startme() function and the ping command example is conceptually derived from the mitmproxy's mitmdump output command.
The first code wasn't working as I did a stupid mistake when creating the thread so p1 = threading.Thread(target=startme(mytime)) does not take the function with its arguments but separately like this p1 = threading.Thread(target=startme, args=(mytime,))
The reason why I could not get the SQL insert statement to work in my second code was this error:
SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 10688 and this is thread id 17964
that I didn't saw until I wrapped my SQL statement into a try/except and captured the error. So I needed to make the SQL database connection inside my startme() function
The other asyncio stuff was just nonsense and cannot be applied to the current issue here.

Multithreading in Python within a for loop

Let's say I have a program in Python which looks like this:
import time
def send_message_realtime(s):
print("Real Time: ", s)
def send_message_delay(s):
time.sleep(5)
print("Delayed Message ", s)
for i in range(10):
send_message_realtime(str(i))
time.sleep(1)
send_message_delay(str(i))
What I am trying to do here is some sort of multithreading, so that the contents of my main for loop continues to execute without having to wait for the delay caused by time.sleep(5) in the delayed function.
Ideally, the piece of code that I am working upon looks something like below. I get a message from some API endpoint which I want to send to a particular telegram channel in real-time(paid subscribers), but I also want to send it to another channel by delaying it exactly 10 minutes or 600 seconds since they're free members. The problem I am facing is, I want to keep sending the message in real-time to my paid subscribers and kind of create a new thread/process for the delayed message which runs independently of the main while loop.
def send_message_realtime(my_realtime_message):
telegram.send(my_realtime_message)
def send_message_delayed(my_realtime_message):
time.sleep(600)
telegram.send(my_realtime_message)
while True:
my_realtime_message = api.get()
send_message_realtime(my_realtime_message)
send_message_delayed(my_realtime_message)
I think something like ThreadPoolExecutor does what you are look for:
import time
from concurrent.futures.thread import ThreadPoolExecutor
def send_message_realtime(s):
print("Real Time: ", s)
def send_message_delay(s):
time.sleep(5)
print("Delayed Message ", s)
def work_to_do(i):
send_message_realtime(str(i))
time.sleep(1)
send_message_delay(str(i))
with ThreadPoolExecutor(max_workers=4) as executor:
for i in range(10):
executor.submit(work_to_do, i)
The max_workers would be the number of parallel messages that could have at a given moment in time.
Instead of a multithread solution, you can also use a multiprocessing solution, for instance
from multiprocessing import Pool
...
with Pool(4) as p:
print(p.map(work_to_do, range(10)))

my python3 threading prog didnt work with fifo-queue

That's my code:
import threading
import queue
qq=queue.Queue(10)
def x11grab(n):
print('haha')
while True:
a='abcd'+str(n)
n+=1
qq.put(a)
print('put queue:',a)
def rtpsend():
while True:
s=qq.get()
head=s[:4]
body=s[4:]
print('head',head)
print('body',body)
t1=threading.Thread(target=x11grab,args=(1,))
t2=threading.Thread(target=rtpsend)
t1.start
t2.start
I wanna x11grab() function put string 'abcd1','abcd2'... into queue,and rtpsend() function get the string from queue, and display it. It's a demo, but it didn't work. I think your advice could be helpful.:-)
You never start your threads! change
t1.start
t2.start
to
t1.start()
t2.start()

Resources