why daemon process terminate - linux

I use python-daemon for my home project. But I not understand why daemon teminated after 1 day working.
this my code:
import requests
import time
import subprocess
import os
import lockfile
import daemon
import logging
import signal
import sys
Class MyDaemon():
def run(self):
while True:
try:
self.check_updates()
time.sleep(5)
except ConnectionError as e:
self.log.error('Connerction Error')
time.sleep(120)
except (OSError, IOError) as e:
self.log.error('I/O Error')
time.sleep(120)
except Exception as e:
self.log.error('Exception: '.format(e.__class__))
time.sleep(120)
if __name__ == "__main__":
bot = MyDaemon()
context = daemon.DaemonContext(
working_directory=bot.path,
umask=0o002,
pidfile=lockfile.FileLock('/tmp/mydaemon.pid'),
)
# exit()
if context.pidfile.is_locked():
exit('daemon is running now!')
#context.signal_map = {signal.SIGHUP: 'terminate',
# signal.SIGUSR1: bot.reload_bot(),
# signal.SIGTERM: bot.exit_bot(),
# }
context.files_preserve = [bot.fh.stream] # logging
with context:
bot.run()
This privat bot for telegram. In function check_updates(self) send request and treats response for server It's func normali workink in shell(tmux on server). In function run() I catch all exceptions (at least i think so :) ) but this path not workin where i wrong?
Thank's

Related

How do I call an async function in a new process using multiprocessing? [duplicate]

This question already has answers here:
FastAPI runs api-calls in serial instead of parallel fashion
(2 answers)
Closed 11 months ago.
I created a server that wait for webhook signal, and when there is signal, it will create a new process to run the loop() function, and when running the loop() function, I want it to call the function printmessage() asynchronously, so it will run the next line in the loop function without waiting the printmessage() function finish processing, but I got the following errors, how I resolve it?
#main.py
import time
from fastapi import Request, FastAPI
import multiprocessing as mp
import uvicorn
import asyncio
async def printmessage(fruit):
print(fruit)
time.sleep(5)
async def loop(fruit):
while True:
task = asyncio.create_task(printmessage(fruit))
time.sleep(1)
fruit="apple"
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")
app = FastAPI()
#app.post("/webhook")
async def webhook(request : Request):
print("WEBHOOK RECEIVED")
p = mp.Process(target=loop,args=[fruit])
p.start()
print('done')
return 'WEBHOOK RECEIVED'
The intended output should be printing apple every 1 second.
ERRORS:
RuntimeWarning: coroutine 'loop' was never awaited
self._target(*self._args, **self._kwargs)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
I tried the following way to avoid the errors but there is no output at all:
#main.py
import time
from fastapi import Request, FastAPI
import multiprocessing as mp
import uvicorn
import asyncio
async def printmessage(fruit):
print(fruit)
time.sleep(5)
async def loop(fruit):
while True:
task = asyncio.create_task(printmessage(fruit))
time.sleep(1)
def preloop(fruit):
asyncio.run(loop(fruit))
fruit="apple"
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")
app = FastAPI()
#app.post("/webhook")
async def webhook(request : Request):
print("WEBHOOK RECEIVED")
p = mp.Process(target=preloop,args=[fruit])
p.start()
print('done')
return 'WEBHOOK RECEIVED'
Here is how you can call an async function in a new process using multiprocessing.
In this code, each request to /webhook creates a new process, which prints apple every 5 seconds.
from __future__ import annotations
import asyncio
from multiprocessing import Process
from fastapi import FastAPI
app = FastAPI()
process_pool: list[Process] = []
async def print_message(fruit):
print(fruit)
async def loop(fruit):
while True:
await print_message(fruit)
await asyncio.sleep(5)
def run_loop(fruit):
asyncio.run(loop(fruit))
#app.get("/webhook")
async def webhook():
print("WEBHOOK RECEIVED")
fruit = "apple"
process = Process(target=run_loop, args=(fruit,))
process_pool.append(process)
process.start()
print('done')
return 'WEBHOOK RECEIVED'
#app.on_event("shutdown")
async def shutdown_event():
for process in process_pool:
process.kill()
for process in process_pool:
while process.is_alive():
continue
process.close()
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")

How to run Flask SocketIO in a thread?

I'm trying to run Flask SocketIO in thread using Python 3 and I cannot get it to work.
My code will not continue in my while loop.
How can I run it in thread?
import threading
from flask import Flask, render_template, request, redirect, url_for
from flask_socketio import SocketIO
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
#turn the flask app into a socketio app
socketio = SocketIO(app)
#app.route("/")
def index():
return render_template('index.html', **templateData)
if __name__ == "__main__":
threading.Thread(target=socketio.run(app),kwargs=dict(debug=False, use_reloader=False,host='0.0.0.0', port=5000)).start()
sleep(2)
while True:
try:
Print("Hello I'm in a while loop")
except KeyboardInterrupt:
sys.exit()
you should pass socketio.run as target, and app as argument
threading.Thread(
target=socketio.run,
args=(app,),
kwargs=dict(debug=False, use_reloader=False,host='0.0.0.0', port=5000)
).start()
also seems like you forget to import sleep from time
from time import sleep
and also please notice that templateData is not defined in code

Can't stop aiohttp websocket server

I can't cancel my aiohttp websocket server from within the application. I want to stop the server and shutdown when I get a "cancel" string
from the client. Yes, I get it, and I finish my co-routine (websocket_handler), but there are three co-routines from the aiohttp library which still continue working.
Of course, I can invoke asyncio.get_event_loop().stop() at the end of my co-routine, but is there a graceful way for stopping aiohttp server?
From my code one can see that I've tried to use Application().on_shutdown.append(), but it failed.
What is the right way?
#!/usr/bin/env python
# -- coding: utf-8 --
import os
import asyncio
import signal
import weakref
import aiohttp.web
from aiohttp import ClientConnectionError, WSCloseCode
# This restores the default Ctrl+C signal handler, which just kills the process
#https://stackoverflow.com/questions/27480967/why-does-the-asyncios-event-loop-suppress-the-keyboardinterrupt-on-windows
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
HOST = os.getenv('HOST', 'localhost')
PORT = int(os.getenv('PORT', 8881))
async def testhandle(request):
#Сопрограмма одрабатывающая http-запрос по адресу "http://127.0.0.1:8881/test"
print("server: into testhandle()")
return aiohttp.web.Response(text='Test handle')
async def websocket_handler(request):
#Сопрограмма одрабатывающая ws-запрос по адресу "http://127.0.0.1:8881"
print('Websocket connection starting')
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'].add(ws)
print('Websocket connection ready')
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
print(msg.data)
break
else:
print(msg.data)
await ws.send_str("You said: {}".format(msg.data))
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' %
ws.exception())
except (asyncio.CancelledError, ClientConnectionError):
pass # Тут оказываемся когда, клиент отвалился.
# В будущем можно тут освобождать ресурсы.
finally:
print('Websocket connection closed')
request.app['websockets'].discard(ws)
#pending = asyncio.Task.all_tasks()
#asyncio.get_event_loop().stop()
return ws
async def on_shutdown(app):
for ws in set(app['websockets']):
await ws.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')
def main():
loop = asyncio.get_event_loop()
app = aiohttp.web.Application()
app['websockets'] = weakref.WeakSet()
app.on_shutdown.append(on_shutdown)
app.add_routes([aiohttp.web.get('/', websocket_handler)]) #, aiohttp.web.get('/test', testhandle)
try:
aiohttp.web.run_app(app, host=HOST, port=PORT, handle_signals=True)
print("after run_app")
except Exception as exc:
print ("in exception")
finally:
loop.close()
if __name__ == '__main__':
main()
I believe the correct answer is simply:
raise aiohttp.web.GracefulExit()
After catching the exception, it calls all the handlers appended to the on_shutdown and on_cleanup signals and dies.
One can see in the source code the aiohttp.web.run_app() waits for two exceptions: GracefulExit and KeyboardInterrupt. While the latter is rather uninteresting, following the trace of GracefulExit can lead you to this place in web_runner.py, that registers SIGINT and SIGTERM signal handlers to a function with raise GracefulExit().
Indeed, I also managed to gracefully shut it down by raising the signal.SIGINT or signal.SIGTERM from itself, e.g.
import signal
signal.raise_signal(signal.SIGINT)
This was tested to work on Fedora Linux 34, Python 3.9.7, aiohttp 3.7.4.
https://docs.aiohttp.org/en/v3.0.1/web_reference.html#aiohttp.web.Application.shutdown
app.shutdown()
app.cleanup()
After shutdown you should also do cleanup()

How do I receive new responses in the Websocket?

I have a client module:
#!/usr/bin/env python
# WS client example
import asyncio
import websockets
async def hello():
async with websockets.connect(
'ws://A.B.C.D:8765') as websocket:
name = input("What's your name? ")
await websocket.send(name)
greeting = await websocket.recv()
print(greeting)
asyncio.get_event_loop().run_until_complete(hello())
and the server module:
from __future__ import print_function
#!/usr/bin/env python
import asyncio
import datetime
import random
import websockets
import ast
from collections import defaultdict
import csv
import datetime
from itertools import chain
import json
import os
import operator
import sys
import pymongo
from pymongo import MongoClient
try:
client = MongoClient('localhost', 27017)
db = client["Bubble"]
except Exception as e:
print(e)
start_match = datetime.datetime.strptime(
"2018-07-01 18:00:00", '%Y-%m-%d %H:%M:%S')
collection = "CRODEN_R16"
async def hello(websocket, path):
entity_name = await websocket.recv()
print(entity_name)
while True:
file = open("set_start_match.txt", "r")
for line in file:
start_today = datetime.datetime.strptime(
line.split('.')[0], '%Y-%m-%d %H:%M:%S')
print(start_today)
now = datetime.datetime.utcnow()
diff = now - start_today
request_match = start_match + diff
print(diff)
for post in db[collection].find():
if "emotion" not in post.keys():
print("Ignored")
continue
if post["timeStamp"] > request_match:
if post["entity_name"] == entity_name:
print("Satisfied")
currDict = {}
currDict["entity"] = post["entity_name"]
currDict["emotion"] = max(
post["emotion"].items(), key=operator.itemgetter(1))[0]
currDict["profile_image"] = post["userProfile"]
currDict["tweet"] = post["tweet"]
currDict_json = json.dumps(currDict, default=str)
print(currDict["tweet"])
await websocket.send(currDict_json)
await asyncio.sleep(1)
del currDict
try:
start_server = websockets.serve(hello, '0.0.0.0', 8765)
print("Start entity server")
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
except Exception as e:
print(e)
Now, the issue is that I want to send name as an input only once and receive the output continuously.
When I wrote this in client:
while True:
greeting = await.websocket.recv()
print(greeting)
The same response is returned again and again. Even on the server side, where I am printing the rendered results from db, I am printing the same doc.
I am completely clueless as to what is the issue?
Note: I have tried to the run the once-run client module and there I was getting perfect results. It was just that I had to give the same input again and again. I want it to be automated.
To get data continuously someone has to send data continuously.
If someone sends data continuously then someone else has to get data continuously.
So both sides need loop.
client - it sends numbers continuously in loop.
#!/usr/bin/env python
import asyncio
import websockets
import time
async def hello():
async with websockets.connect(
'ws://localhost:8769') as websocket:
name = input("What's your name? ")
await websocket.send(name)
i = 0
while True:
print('send:', i)
await websocket.send(str(i))
time.sleep(2)
i += 1
try:
asyncio.get_event_loop().run_until_complete(hello())
except KeyboardInterrupt:
print('KeyboardInterrupt')
server - it receives numbers continuously in loop
import asyncio
import websockets
async def hello(websocket, path):
entity_name = await websocket.recv()
print('name:', entity_name)
while True:
data = await websocket.recv()
print('recv:', data)
try:
print("Start entity server")
start_server = websockets.serve(hello, '0.0.0.0', 8769)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
except KeyboardInterrupt: # keyboard need special except
print("KeyboardInterrupt")
start_server.ws_server.close() # solutin for [Errno 98]
except Exception as ex:
print(ex)

how to use epoll on tornado

i am trying to make epoll work on tornado
import tornado.ioloop
import tornado.web
from tornado.platform.epoll import EPollIOLoop
from tornado import web, gen
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
EPollIOLoop().start()
but when i start the program and visit the url localhost:8888/ it didn't return anything.
is that my system didn't meet the requirement?my linux version was Ubuntu 12.04.1 LTS.
Just use tornado.ioloop.IOLoop.instance(). It choose best IOLoop for your platform.
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.instance()
print ioloop # prints <tornado.platform.epoll.EPollIOLoop object at ..>
ioloop.start()
You should call self.finish() if you use asynchronous decorator:
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to finish the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns.
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
self.finish()

Resources