How to run and cancel a Linux command using Flask Python API? - python-3.x

I am working on a Flask based Python api. It has two api, run_cmd and stop_cmd. Run cmd will execute a command in the terminal. This command will keep on going until someone manually cancels it. So to cancel it, we have stop_cmd api. Below is the code:
from flask import Flask, jsonify, request
from threading import Thread
from subprocess import call
app = Flask(__name__)
def RunCmd():
call('while true; do echo "hello"; sleep 2s; done', shell=True)
#app.route('/run_cmd', methods=['GET'])
def run_cmd():
Thread(target=RunCmd).start()
return jsonify({"status": "ok"}), 200
#app.route('/stop_cmd', methods=['GET'])
def stop_cmd():
# This api will stop the cmd running in RunCmd
As you can see in the above code, if we hit the /run_cmd, it starts and keeps printing hello in the terminal. I wanted to know how can we cancel this ongoing session of the command so that we can write it in stop cmd api. Is this possible?

This is how I solved it
from flask import Flask, jsonify, request
from threading import Thread
import subprocess
import psutil
from subprocess import call
from command_runner import command_runner
app = Flask(__name__)
proc = ""
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def RunCmd():
global proc
proc = subprocess.Popen(['while true; do echo "hello"; sleep 2s; done'], shell=True)
#app.route('/run_cmd', methods=['GET'])
def run_cmd():
Thread(target=RunCmd).start()
return jsonify({"status": "ok"}), 200
#app.route('/stop_cmd', methods=['GET'])
def stop_cmd():
global proc
kill(proc.pid)
return jsonify({"status": True}), 200
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000)

Subprocess.call is a part of an older, deprecated API if I am informed correctly. Instead, you should probably use subprocess.Popen(). Then you could start your command by running
proc = subprocess.Popen(["somecommand", "-somearg", "somethingelse"])
This will return a Popen object which you can terminate by sending it a signal, for example proc.terminate() or proc.kill().

Related

Starting a Flask Server from Within Test Code?

I'm working through the Flasky tutorial from Miguel Grinberg's book Flask Web Development 2e and I've run into a snag with the end-to-end testing in Chapter 15. When I try to run the code I get a console message
* Ignoring a call to 'app.run()' that would block the current 'flask' CLI command.
Only call 'app.run()' in an 'if __name__ == "__main__"' guard.
followed by the browser reporting "Firefox cannot establish a connection..." This suggest to me that the test server is not starting.
Here's the code, from pages 231-233 of the book (the file is tests/test_selenium.py):
import threading
import unittest
from selenium import webdriver
from app import create_app, db, fake
from app.models import Role, User
class SeleniumTestCase(unittest.TestCase):
browser = None
#classmethod
def setUpClass(cls) -> None:
try:
cls.browser = webdriver.Firefox()
except Exception as e:
pass
if cls.browser:
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel('ERROR')
db.create_all()
Role.insert_roles()
fake.users(10)
fake.posts(10)
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='john#example.com', password='cat', username='john', role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
cls.server_thread = threading.Thread(
target=cls.app.run,
kwargs={
'debug': 'false',
'use_reloader': False,
'use_debugger': False,
'host': '0.0.0.0',
'port': 5000
}
)
cls.server_thread.start()
#classmethod
def tearDownClass(cls) -> None:
if cls.browser:
cls.browser.get('http://localhost:5000/shutdown')
cls.browser.quit()
cls.server_thread.join()
db.drop_all()
db.session.remove()
cls.app_context.pop()
def setUp(self) -> None:
if not self.browser:
self.skipTest('Web browser not available')
def tearDown(self) -> None:
pass
def test_admin_home_page(self):
self.browser.get('http://localhost:5000/') # fails here
self.assertRegex(self.browser.page_source, 'Hello,\s+Stranger!')
self.fail('Finish the test!')
How can I get a test server up and running from within the test code? (I putzed around with Flask-Testing for a few days before giving it up as unmaintained.)
ADDENDUM: Further experimentation has determined that the problem lies in the explicit call to app.run() conflicting with the Flask CLI's implicit call to app.run(), but without the explicit call the test server doesn't start.
I want to run this from the Flask CLI the same as my unit tests. This means I need to find a way to start the test server after the test database is populated, which happens after the test class's code begins to run. The CLI command code is:
#app.cli.command()
#click.argument('test_names', nargs=-1)
def test(coverage, test_names):
"""Run the unit tests"""
import unittest
if test_names:
tests = unittest.TestLoader().loadTestsFromNames(test_names)
else:
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
so running from __main__ would bypass the tests' load/run sequence.
I found a feasible solution using Timer
import unittest
from threading import Timer
Create two variables on top in your code
timer = None
myapp = None
class ApplicationTest(unittest.TestCase):
Now at the bottom of the file create main method and custom method of timer, I assume startTest as method name
In the main method you can call create_app and put it in global variable and use that myapp variable inside your selenium testing code
unittest.main() will manually trigger your test class and run the test cases one after one, unfortunately the test runs twice, I don't know why
def startTest():
timer.cancel()
unittest.main()
if __name__ == '__main__':
timer = Timer(6.0, startTest)
timer.start()
myapp = create_app()
myapp.run(debug=True, threaded=True)

flask ignoring subprocess.call

I have been trying to create a webpage which allows people to start gameservers with just a simple GUI. Im using flask, and have made a very simple page which should pass CLI commands to the os. Right now flask runs the whole function except for the subprocess call which it just seems to ignore. The commands work when pasting the commands manually.
the script:
from flask import render_template
command = 'cd /home/gameserver/PaperMC/ && ./pmcserver start'
def minecraftscript():
call(command, shell=True)
return render_template('index.html')
main.py:
from scripts import *
app = Flask(__name__)
#app.route('/')
def index():
return render_template('index.html')
#app.route('/controlpanel')
def login():
return render_template('controlpanel.html')
#app.route('/')
def startminecraft():
minecraftpy.minecraftscript()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=81, debug=True)

How to Access pywebview.Window Object from Another multiprocessing.Process?

I have a webview that controlling the flask api.
The webview will have a button to start the flask server and a button to stop the server. That is why I have to use multiprocessing.Process to create a separate process for Flask. With that, I cannot access my pywebview.Window anymore. I want to use pywebview.Window to evaluate some javascript with pywebview.Window.evaluate_js() within the Flask process (of course it has to be the same pywebview.Window that I already created before open a new process for Flask).
Is anybody know how to accomplish this issue. I appreciate it!
Some sample code:
from flask import Flask, request, jsonify
import os, sys, re, json, socket, sqlite3, base64, requests, webview
from flask_cors import CORS
class ServerFlaskApi:
def __init__(self):
self.app = Flask(__name__, root_path=Root_Dir)
self.app.add_url_rule("/", view_func=self.Default)
def Default(self):
return "Welcome to the Python Http Server for your Application!", 200
def PrintToWebViewConsole(self):
#Trying to use pywebview.Window here, of course WebviewWindow is not defined!!!
WebviewWindow.evaluate_js(js_script)
################
class WebviewApi:
def __init__(self):
self.server_thread = None
def StartServer(self):
self.server_thread = multiprocessing.Process(target=Run_Flask_Server, daemon=True)
self.server_thread.start()
def StopServer(self):
self.server_thread.terminate()
def Run_Flask_Server():
serverApi = ServerFlaskApi()
CORS(serverApi.app)
serverApi.app.run(host=Server_Host, port=Server_Port, debug=True, use_reloader=False)
################
if __name__ == "__main__":
WebViewApi = WebviewApi()
WebviewWindow = webview.create_window(title="Server Monitor", url="view/main-gui.html", js_api=WebViewApi, width=550, height=750, min_size=(550, 750), resizable=False, on_top=True, confirm_close=False)
webview.start(debug=False)
I'm still new in Python, so any suggestion is welcome!
Thank you in advance!
I guess I have to use Threading instead of Processing, since Thread is sharing memory and Process is not.
Also, for anybody who want to stop a Thread, here is a function to do that, not sure if this is a good way to do it, but it does the job for me:
def Kill_Thread(thread):
if not isinstance(thread, threading.Thread):
raise TypeError("Must be set as threading.Thread type!!!")
thread_id = thread.ident
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print("Exception raise failure")

Python subprocess for docker-compose

I have an interesting set of requirements that I am trying to conduct using the Python subprocess module and docker-compose. This whole setup is possible in one docker-compose but due to requirement this is what I would like to setup:
call the docker-compose using python subprocess to activate the
test-servers
print all the std-out of above docker-compose running.
as soon as the test-server up and running via docker-compose; call the testing scripts for that server.
This is my docker-compose.py looks like:
import subprocess
from subprocess import PIPE
import os
from datetime import datetime
class MyLog:
def my_log(self, message):
date_now = datetime.today().strftime('%d-%m-%Y %H:%M:%S')
print("{0} || {1}".format(date_now, message))
class DockercomposeRun:
log = MyLog()
def __init__(self):
dir_name, _ = os.path.split(os.path.abspath(__file__))
self.dirname = dir_name
def run_docker_compose(self, filename):
command_name = ["docker-compose", "-f", self.dirname + filename, "up"]
popen = subprocess.Popen(command_name, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
return popen
now in my test.py
as soon as my stdout is blank I would like to break the loop of printing and run the rest of the test in test.py.
docker_compose_run = DockercomposeRun()
rc = docker_compose_run.run_docker_compose('/docker-compose.yml.sas-viya-1')
for line in iter(rc.stdout.readline, ''):
print(line, end='')
if line == '':
break
popen.stdout.close()
# start here actual test cases
.......
But for me the loop is never broken even though the stdout of docker-compose goes blank after the server is up and running. And, the test cases are never executed.
Is it the right approach or how I can achieve this?
I think the issue here is because you are not running docker-compose in detached mode and its blocking the application run. Can you try adding "-d" to command_name?

Terminate subprocess

I'm curious, why the code below freezes. When I kill python3 interpreter, "cat" process remains as a zombie. I expect the subprocess will be terminated before main process finished.
When I send manually SIGTERM to cat /dev/zero, the process is correctly finished (almost immediately)
#!/usr/bin/env python3
import subprocess
import re
import os
import sys
import time
from PyQt4 import QtCore
class Command(QtCore.QThread):
# stateChanged = QtCore.pyqtSignal([bool])
def __init__(self):
QtCore.QThread.__init__(self)
self.__runned = False
self.__cmd = None
print("initialize")
def run(self):
self.__runned = True
self.__cmd = subprocess.Popen(["cat /dev/zero"], shell=True, stdout=subprocess.PIPE)
try:
while self.__runned:
print("reading via pipe")
buf = self.__cmd.stdout.readline()
print("Buffer:{}".format(buf))
except:
logging.warning("Can't read from subprocess (cat /dev/zero) via pipe")
finally:
print("terminating")
self.__cmd.terminate()
self.__cmd.kill()
def stop(self):
print("Command::stop stopping")
self.__runned = False
if self.__cmd:
self.__cmd.terminate()
self.__cmd.kill()
print("Command::stop stopped")
def exitApp():
command.stop()
time.sleep(1)
sys.exit(0)
if __name__ == "__main__":
app = QtCore.QCoreApplication(sys.argv)
command = Command()
# command.daemon = True
command.start()
timer = QtCore.QTimer()
QtCore.QObject.connect(timer, QtCore.SIGNAL("timeout()"), exitApp)
timer.start(2 * 1000)
sys.exit(app.exec_())
As you noted yourself, the reason for zombie is that signal is caught by shell and doesn't effect process created by it. However there is a way to kill shell and all processes created by it; you have to use process group feature. See How to terminate a python subprocess launched with shell=True Having said that if you can manage without shell=True that's always preferable - see my answer here.
I solved this problem in a different way, so here's the result:
I have to call subprocess.Popen with shell=False, because otherwise it creates 2 processes (shell and the process) and __cmd.kill() send signal to shell while "process" remains as a zombie

Resources