Rpyc Python: Rpyc service thread start call is blocking - multithreading

The server starts fine I can update values, but the hello world string is not printed. The program is stuck at t.start(), how can I make it non-blocking.
import rpyc
class rpyc_service(rpyc.Service):
def on_connect(self):
self.exposed_var = 0
def on_disconnect(self):
pass
def exposed_update_var(self, val):
self.exposed_var = val
if __name__ == '__main__':
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(rpyc_service, port = 18861)
t.start()
# The hello world section is never printed
print "Hello world"

Related

How to safely terminate a thread in Python

I have below code, where I am using OpenCV to start webcam video. Along with that I also have a thread running that pings www.google.com to check network connectivity.
import time
import cv2
import os
from threading import Thread
stopThread = False
def CheckNetwork():
global stopThread
while True:
time.sleep(60)
host = "www.google.com"
response = os.system("ping " + host)
if response == 0:
print("Internet host reachable")
else:
print("Internet host not reachable")
if stopThread:
break
def main():
global stopThread
Thread(target=CheckNetwork).start()
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
cv2.imshow('Camera', img)
key = cv2.waitKey(1)
if key == ord('q'):
stopThread = True
break
cv2.destroyAllWindows()
main()
This code is running fine. If I have to close the application by pressing q, OpenCV window closes but application keeps running for 60sec because of the thread and only after 60sec whole application terminates safely.
I wanted to know if this is a good way to close the threads. Is there any better way available which can immediately terminate threads in Python?
There's no native way of stopping a thread in Python. Instead of using a stop flag, you can also use ctypes that calls the Python API to raise an exception in the thread.
import ctypes
# Other imports...
class ThreadWithException(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
# code here...
def get_id(self):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,
ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')

Shutdown during recv on python socket

During the execution of this code, it blocks on the join
I have a TCP server running on ("127.0.0.1", 1777) for the test
I tried using directly the socket with recv, but the result is the same
Any idea, why the shutdown on READ doesn't interrupt the read ?
import socket
from threading import Thread
from time import sleep
class Parser(Thread):
rbufsize = 4096
wbufsize = 4096
encoding="utf-8"
new_line = "\n"
def __init__(self):
super().__init__()
self._socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self._wfile = None
self._rfile = None
def run(self):
self._socket.connect(("127.0.0.1", 1777))
self._rfile = self._socket.makefile('rb', self.rbufsize, encoding=self.encoding, newline=self.new_line)
self._wfile = self._socket.makefile('wb', self.wbufsize, encoding=self.encoding, newline=self.new_line)
while True:
data = self._rfile.readline()
if not data:
break
self._handle_data(data)
self._cleanup()
def _cleanup(self):
"""
Fermeture
"""
if not self._wfile.closed:
try:
self._wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self._socket.shutdown(socket.SHUT_RDWR)
self._wfile.close()
self._rfile.close()
self._socket.close()
def stop(self):
self._socket.shutdown(socket.SHUT_RD)
if __name__ == "__main__":
p = Parser()
p.start()
sleep(5)
p.stop()
print("start join")
p.join()

Mocking REST APIs with Flask_restful using threading

I'm looking to mock a set of REST APIs for some tests. The following main() function works fine (i.e. it returns {"some-data": 1234} as json to the browser when I GET localhost:8099). The issue is it blocks the main thread:
from gevent import monkey, sleep, pywsgi
monkey.patch_all()
import flask
from flask_restful import reqparse, abort, Api, Resource
import queue
import sys
import threading
STUFFS = {"some-data": 1234}
class Stuff(Resource):
def get(self):
return flask.jsonify(STUFFS)
class ControlThread(threading.Thread):
def __init__(self, http_server, stop_event):
threading.Thread.__init__(self)
self.stop_event = stop_event
self.http_server = http_server
self.running = False
def run(self):
try:
while not self.stop_event.is_set():
if not self.running:
self.http_server.start()
self.running = True
sleep(0.001)
except (KeyboardInterrupt, SystemExit):
pass
self.http_server.stop()
class StuffMock:
def __init__(self, port, name=None):
if name is None:
name = __name__
self.app = flask.Flask(name)
self.api = Api(self.app)
self.api.add_resource(Stuff, "/stuff/")
self.stop_event = threading.Event()
self.http_server = pywsgi.WSGIServer(('', port), self.app)
self.serving_thread = ControlThread(self.http_server,
self.stop_event)
self.serving_thread.daemon = True
def start(self):
self.serving_thread.start()
def stop(self):
self.stop_event.set()
self.serving_thread.join()
def main():
mocker = StuffMock(8099)
mocker.start()
try:
while True:
sleep(0.01)
except (KeyboardInterrupt, SystemExit):
mocker.stop()
sys.exit()
if __name__ == "__main__":
main()
Without the sleep() call in the while loop above, nothing resolves. Here is a more succinct usage to demonstrate:
import time
from stuff_mock import StuffMock
mocker = StuffMock(8099)
mocker.start()
while True:
user_text = input("let's do some work on the main thread: ")
# will only resolve the GET request after user input
# (i.e. when the main thread executes this sleep call)
time.sleep(0.1)
if user_text == "q":
break
mocker.stop()
The gevent threading module seems to work differently from the core one. Does anyone have any tips or ideas about what's going on under the hood?
Found that if I switch out threading for multiprocessing (and threading.Thread for multiprocessing.Process), everything works as expected, and I can spin up arbitrary numbers of mockers without blocking.

ZeroMQ hangs in a python multiprocessing class/object solution

I'm trying to use ZeroMQ in Python (pyzmq) together with multiprocessing. As a minmal (not) working example I have a server- and a client-class which both inherit from multiprocessing.Process. The client as a child-process should send a message to the server-child-process which should print the message:
#mpzmq_class.py
from multiprocessing import Process
import zmq
class Server(Process):
def __init__(self):
super(Server, self).__init__()
self.ctx = zmq.Context()
self.socket = self.ctx.socket(zmq.PULL)
self.socket.connect("tcp://localhost:6068")
def run(self):
msg = self.socket.recv_string()
print(msg)
class Client(Process):
def __init__(self):
super(Client, self).__init__()
self.ctx = zmq.Context()
self.socket = self.ctx.socket(zmq.PUSH)
self.socket.bind("tcp://*:6068")
def run(self):
msg = "Hello World!"
self.socket.send_string(msg)
if __name__ == "__main__":
s = Server()
c = Client()
s.start()
c.start()
s.join()
c.join()
Now if I run this the server-process seems to hang at the receive-call msg = socket.receive_string(). In another (more complicated) case, it even hung at the socket.connect("...")-statement.
If I rewrite the script to use functions instead of classes/objects, it runs just fine:
# mpzmq_function.py
from multiprocessing import Process
import zmq
def server():
ctx = zmq.Context()
socket = ctx.socket(zmq.PULL)
socket.connect("tcp://localhost:6068")
msg = socket.recv_string()
print(msg)
def client():
ctx = zmq.Context()
socket = ctx.socket(zmq.PUSH)
socket.bind("tcp://*:6068")
msg = "Hello World!"
socket.send_string(msg)
if __name__ == "__main__":
s = Process(target=server)
c = Process(target=client)
s.start()
c.start()
s.join()
c.join()
Output:
paul#AP-X:~$ python3 mpzmq_function.py
Hello World!
Can anybody help me with this? I guess it's something I didn't understand concerning the usage of multiprocessing.
Thank you!
I run into the same issue.
I guess the problem is, that the run method has no access to the context object.
Maybe it has something to do with the C implementation and the fact, that processes do not have shared memory.
If instantiate the context in the run method, it works.
Here a working example:
#mpzmq_class.py
from multiprocessing import Process
import zmq
class Base(Process):
"""
Inherit from Process and
holds the zmq address.
"""
def __init__(self, address):
super().__init__()
self.address = address
class Server(Base):
def run(self):
ctx = zmq.Context()
socket = ctx.socket(zmq.PULL)
socket.connect(self.address)
msg = socket.recv_string()
print(msg)
class Client(Base):
def run(self):
ctx = zmq.Context()
socket = ctx.socket(zmq.PUSH)
socket.bind(self.address)
msg = "Hello World!"
socket.send_string(msg)
if __name__ == "__main__":
server_addr = "tcp://127.0.1:6068"
client_addr = "tcp://*:6068"
s = Server(server_addr)
c = Client(client_addr)
s.start()
c.start()
s.join()
c.join()
I added a base class to demonstrate that you can still access normal Python objects from the run method. If you put the context object into the init Method, it won't work.

python manager managed list

I am using pythons multiprocessing module in some of my code. I have a controller class that controls a class and performs some action.
import multiprocessing
from multiprocessing import Queue, Process, Manager
class dosomething(multiprocessing.Process):
def __init__(self,managerList):
self.mlist = managerList
print self.mlist
def run(self):
self.mlist.append((4,5,6))
class doController:
def __init__(self):
mgr = Manager()
self.mlist = mgr.list()
self.mlist.append((1,2,3,4))
t = dosomething(self.mlist)
#t.daemon = True
#t.start()
def printer(self):
return self.mlist
gd = doController()
print gd.printer()
Pring mlist in the init part of dosomething prints [(1, 2, 3, 4)] as expected but the list in the dosomething part does not work giving out IOError 11. Can anyone help if it's right or wrong?
The call to the Process.__init__ is missing.
You don't necessarely need to create a Process subclass you could use functions:
from multiprocessing import Process, Manager
def dosomething(mlist):
mlist.append((4,5,6))
def main():
manager = Manager()
L = manager.list((1,2,3,4))
p = Process(target=dosomething, args=(L,))
p.start()
p.join()
print L
if __name__ == '__main__':
main()

Resources