Python Flask for cnn-text-classification tensorflow POST request format - python-3.x

I am doing cnn-text-classification-tf. My aim is to do prediction from frozen graph.
My question is how can the prediction be done from the frozen graph. I found a great tutorial https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc. where he is implementing frozen graph using flask.
I am using flask code like below to do prediction.
import json, argparse, time
import tensorflow as tf
from linkedin import load_graph
from flask import Flask, request
from flask_cors import CORS
##################################################
# API part
##################################################
app = Flask(__name__)
cors = CORS(app)
#app.route("/api/predict", methods=['POST'])
def predict():
start = time.time()
data = request.data.decode("utf-8")
if data == "":
params = request.form
x_in = json.loads(params['x'])
else:
params = json.loads(data)
x_in = params['x']
##################################################
# Tensorflow part
##################################################
y_out = persistent_sess.run(y, feed_dict={
x: x_in
# x: [[3, 5, 7, 4, 5, 1, 1, 1, 1, 1]] # < 45
})
##################################################
# END Tensorflow part
##################################################
json_data = json.dumps({'y': y_out.tolist()})
print("Time spent handling the request: %f" % (time.time() - start))
return json_data
##################################################
# END API part
##################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename", default="frozen_model2.pb", type=str, help="Frozen model file to import")
parser.add_argument("--gpu_memory", default=.2, type=float, help="GPU memory per process")
args = parser.parse_args()
##################################################
# Tensorflow part
##################################################
print('Loading the model')
graph = load_graph(args.frozen_model_filename)
x = graph.get_tensor_by_name('prefix/input_x:0')
y = graph.get_tensor_by_name('prefix/output/predictions:0')
print('Starting Session, setting the GPU memory usage to %f' % args.gpu_memory)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory)
sess_config = tf.ConfigProto(gpu_options=gpu_options)
persistent_sess = tf.Session(graph=graph, config=sess_config)
##################################################
# END Tensorflow part
##################################################
print('Starting the API')
app.run()
I am using terminal on MacOS to run this script
After running this I am using postman to POST the request:
How do I correctly frame this request to get proper response. What is the exact input needed in Postman in the body?

In Postman, you are sending a POST request and data in json format, so you would need to do changes in code.
for getting data, first do the validation as follows:
if not 'data' in request.json:
abort(400)
After that you can access the string as follows:
x_in = request.json["data"]
But in case, you want to do changes in the request then you can try sending data as follows:
{
"data" : {
"x" : "good movie it was"
}
}
This should work, in case not, then do let me know.

Related

How to make a Mystreamlistener class for tweepy

`import tweepy
import stream
import openai
from PIL import Image
from io import BytesIO
# Twitter API credentials
consumer_key = "removed"
consumer_secret = "removed"
access_key = "removed"
access_secret = "removed"
# OpenAI API credentials
openai_key = "removed"
# set up tweepy to authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
# set up OpenAI
openai.api_key = openai_key
# create a tweepy API object
api = tweepy.API(auth)
# function to handle incoming tweets that mention your account
def handle_mention(tweet):
# get the text of the tweet
text = tweet.text
# generate a response using ChatGPT
response = openai.Completion.create(
model="text-davinci-002",
prompt=text,
temperature=0.5,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# take a screenshot of the ChatGPT response
img = Image.new("RGB", (1024, 1024), "white")
d = ImageDraw.Draw(img)
font = ImageFont.truetype("font.ttf", 36)
d.text((10, 10), response, font=font, fill=(0, 0, 0))
img_bytes = BytesIO()
img.save(img_bytes, format="PNG")
img_bytes.seek(0)
# tweet the screenshot
api.update_with_media(
filename="response.png",
file=img_bytes,
in_reply_to_status_id=tweet.id
)
# function to listen for tweets that mention your account
def listen_for_mentions():
# create a tweepy Stream object to listen for mentions
stream = tweepy.stream(auth, listener=MyStreamListener(), bearer_token="removed", track=["#askAIguy"])
# create a tweepy StreamListener to handle incoming tweets
class MyStreamListener(stream):
def on_status(self, status):
# only handle tweets that mention #askaiguy
if "#" in status.text and "#askaiguy" in status.text:
# parse the tweet text to extract the mention of #askaiguy
tweet_text = status.text.lower()
mention_start = tweet_text.index("#askaiguy")
mention_end = mention_start + len("#askaiguy")
mention = tweet_text[mention_start:mention_end]
# handle the mention by calling the handle_mention() function
handle_mention(status)
# start listening for mentions of your account
listen_for_mentions()`
This is the error response I keep getting:
Traceback (most recent call last):
File "main.py", line 67, in <module>
class MyStreamListener(stream):
# TypeError: module() takes at most 2 arguments (3 given)
I know there was an update to the tweepy API and it merged StreamListener into Stream. Can someone please give me some guidance as to what I'm missing?
I've tried changing the name of the funciton, rearranging the structure, removing the arguments given, etc. I've spent about 4 hours working on this problem researching and I've gotten progressively closer but I can't seem to get the bot to listen for mentions.

Python-asyncio and subprocess deployment on IIS: returning HTTP response without running another script completely

I'm facing an issue in creating Realtime status update for merging new datasets with old one and machine learning model creation results via Web framework. The tasks are simple in following steps.
An user/ client will send a new datasets in .CSV file to the server,
On server side my windows machine will receive a file then send an acknowledge,
Merge the new dataset with the old one for new machine learning model creation and
Run another python script(that is to create a new sequential deep-learning model). After the successful completion of another python script my code have to return the response to the client!
I have deployed my python-flask application on IIS-10. To run an another python script, this main flask-api script should have to wait for completing that model creation script. On model creation python script it contains several process like loading datasets, tokenizing, oneHot Encoding, padding techniques, model training for 100 epochs and finally prediction results.
My exact goal is this Flask-API should have to wait for until completing the entire process. I'm sure definitely it will take 8-9 minutes to complete the whole script mentioned in subprocess.run(). While testing this code on development mode it's working excellently without any issues! But while testing it on production mode on IIS no it's not waiting for the whole process and within 6-7 seconds it returning response to the client.
For debugging purpose I included logging to record all events in both Flask script and machine learning model creation script! Through that I came to understand that model creation script only ran 10%!. First I tried simple methods with async def and await to run the subprocess.run() it didn't make any sense! Then I included threading and get_event_loop() and then run_until_complete() to make my parent code wait until finishing the whole process. But finally I'm helpless!! I couldn't able to find a rightful solution. Please let me know what I did wrong.. Thank you.
Configurations:
Python 3.7.9
Windows server 2019 and
IIS 10.0 Express
My code:
import os
import time
import glob
import subprocess
import pandas as pd
from flask import Flask, request, jsonify
from werkzeug.utils import secure_filename
from datetime import datetime
import logging
import asyncio
from concurrent.futures import ThreadPoolExecutor
ALLOWED_EXTENSIONS = {'csv', 'xlsx'}
_executor = ThreadPoolExecutor(1)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = "C:\\inetpub\\wwwroot\\iAssist_IT_support\\New_IT_support_datasets"
currentDateTime = datetime.now()
filenames = None
logger = logging.getLogger(__name__)
app.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('model-creation-status.log')
file_handler.setFormatter(formatter)
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
# app.logger.addHandler(stream_handler)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
#app.route('/file_upload')
def home():
return jsonify("Hello, This is a file-upload API, To send the file, use http://13.213.81.139/file_upload/send_file")
#app.route('/file_upload/status1', methods=['POST'])
def upload_file():
app.logger.debug("/file_upload/status1 is execution")
# check if the post request has the file part
if 'file' not in request.files:
app.logger.debug("No file part in the request")
response = jsonify({'message': 'No file part in the request'})
response.status_code = 400
return response
file = request.files['file']
if file.filename == '':
app.logger.debug("No file selected for uploading")
response = jsonify({'message': 'No file selected for uploading'})
response.status_code = 400
return response
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
print(filename)
print(file)
app.logger.debug("Spreadsheet received successfully")
response = jsonify({'message': 'Spreadsheet uploaded successfully'})
response.status_code = 201
return response
else:
app.logger.debug("Allowed file types are csv or xlsx")
response = jsonify({'message': 'Allowed file types are csv or xlsx'})
response.status_code = 400
return response
#app.route('/file_upload/status2', methods=['POST'])
def status1():
global filenames
app.logger.debug("file_upload/status2 route is executed")
if request.method == 'POST':
# Get data in json format
if request.get_json():
filenames = request.get_json()
app.logger.debug(filenames)
filenames = filenames['data']
# print(filenames)
folderpath = glob.glob('C:\\inetpub\\wwwroot\\iAssist_IT_support\\New_IT_support_datasets\\*.csv')
latest_file = max(folderpath, key=os.path.getctime)
# print(latest_file)
time.sleep(3)
if filenames in latest_file:
df1 = pd.read_csv("C:\\inetpub\\wwwroot\\iAssist_IT_support\\New_IT_support_datasets\\" +
filenames, names=["errors", "solutions"])
df1 = df1.drop(0)
# print(df1.head())
df2 = pd.read_csv("C:\\inetpub\\wwwroot\\iAssist_IT_support\\existing_tickets.csv",
names=["errors", "solutions"])
combined_csv = pd.concat([df2, df1])
combined_csv.to_csv("C:\\inetpub\\wwwroot\\iAssist_IT_support\\new_tickets-chatdataset.csv",
index=False, encoding='utf-8-sig')
time.sleep(2)
# return redirect('/file_upload/status2')
return jsonify('New data merged with existing datasets')
#app.route('/file_upload/status3', methods=['POST'])
def status2():
app.logger.debug("file_upload/status3 route is executed")
if request.method == 'POST':
# Get data in json format
if request.get_json():
message = request.get_json()
message = message['data']
app.logger.debug(message)
return jsonify("New model training is in progress don't upload new file")
#app.route('/file_upload/status4', methods=['POST'])
def model_creation():
app.logger.debug("file_upload/status4 route is executed")
if request.method == 'POST':
# Get data in json format
if request.get_json():
message = request.get_json()
message = message['data']
app.logger.debug(message)
app.logger.debug(currentDateTime)
def model_run():
app.logger.debug("model script starts to run")
subprocess.run("python C:\\.....\\IT_support_chatbot-master\\"
"Python_files\\main.py", shell=True)
# time.sleep(20)
app.logger.debug("script ran successfully")
async def subprocess_call():
# run blocking function in another thread,
# and wait for it's result:
app.logger.debug("sub function execution starts")
await loop.run_in_executor(_executor, model_run)
asyncio.set_event_loop(asyncio.SelectorEventLoop())
loop = asyncio.get_event_loop()
loop.run_until_complete(subprocess_call())
loop.close()
return jsonify("Model created successfully for sent file %s" % filenames)
if __name__ == "__main__":
app.run()

I am unable to simultaneously stream the feed from my RBP3's camera and record to a file at the same time using python

I know how to save to a file using the code below (and timestamp the feed) and I know how to stream using uv4l but I am simply too bad to do it simultaneously.
import time
time.sleep(60)
import picamera
import datetime as dt
camera = picamera.PiCamera()
camera.resolution = (640, 480)
#camera.vflip = True
camera.led = False
x = 0
while True:
bideoname = "/media/pi/cam/" + dt.datetime.now().strftime('%Y-%m-%d-%H') + ".h264"
camera.annotate_background = picamera.Color('black')
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#camera.start_preview()
camera.start_recording(bideoname)
start = dt.datetime.now()
while (dt.datetime.now() - start).seconds < 3600:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(0.2)
camera.stop_recording()
x = x+1
I imagine I would use flask to to create local website to stream the feed to.
I have looked up and down the internets and this example seems to be the closest solution by Dave Jones but I don't if socket can communicate with a browser:
https://raspberrypi.stackexchange.com/questions/27041/record-and-stream-video-from-camera-simultaneously
Also there is this code which streams the camera feed to a page but no mention of how to simultanously record as well:
from flask import Flask, render_template, Response
# Raspberry Pi camera module (requires picamera package, developed by Miguel Grinberg)
from camera_pi import Camera
app = Flask(__name__)
#app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
#app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', port =80, debug=True, threaded=True)
Or maybe this is all wrong and there is simpler solution to this?
Thanks for the help.

Tweepy - How to notify when a tweet is deleted

I learnt that Twitter has stopped providing JSON for deleted tweets.I am trying to get past this limitation by using a polling method to see if tweet is deleted.
But my code still fails. I would appreciate it if you can help me figure out what I am missing.
import sys
import json
import tweepy
from tweepy import Stream
from tweepy.streaming import StreamListener
import datetime
import time
from polling import TimeoutException, poll
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key = 'xx'
consumer_secret = 'xx'
access_token = 'xx'
access_token_secret = 'xx'
# Set up the authorisation to use the Twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Handle the output generated from the stream
class listener(StreamListener):
tweetcount = 0
def on_data(self, data):
# Convert the message to JSON
json_data = json.loads(data)
# text_file = open(json_data['id_str'] + ".json", "w")
# text_file.write(data)
# text_file.close()
if 'id_str' not in json_data:
# If this isn't a status, do nothing.
print("no ID")
else:
#print("Twitter Id ",json_data['id_str'])
#print("User Id ",json_data['user']['id_str'])
if json_data['user']['id_str'] == '51241574': #Associated Press
tweetcount = json_data['user']['statuses_count']
tweet = api.get_status(json_data['id'])
print("Tweet Count ",tweetcount)
print("Account Name ", json_data['user']['name'])
print(tweet.text)
else:
pass
# if 'delete' in json_data:
# print ("DELETED!")
# if json_data['delete']['status']['user_id'] == '51241574':
# deleted_tweet_id =json_data['delete']['status']['id']
# tweetcount -= 1
# print("New Count is ",tweetcount)
# print(deleted_tweet_id)
# deleted_tweet =api.get_status(deleted_tweet_id)
# print(deleted_tweet.text)
#
# else:
# pass
return True
def on_error(self, status):
print("Error status is ",status)
# Start consuming from the stream. This will get all the Tweets & Deletions from the users the user is following.
twitterStream = Stream(auth, listener())
twitterStream.filter(follow=['51241574'], async=True)
# polling method to check if tweet is deleted
try:
user = api.get_user('AP')
poll(lambda: user.statuses_count >= listener.tweetcount > 0, timeout=30, step=1)
print("Tweet was deleted,New Tweet count is ", user.statuses_count)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print (message)
When a listener event is fired, the application shows the value in tweet count variable and checks it against the value retrieved from querying the api.

Does tensorflow-serving supports multi-threading?

I have some problem with using tensorflow serving.
I deployed my tensorflow model as RESTful APIs using tensorflow serving. But I doubt if tf-serving server supports multi-threading. I've done some experiments and it does not seem to be.
I also noticed that there is --tensorflow_session_parallelism option for tensorflow_model_server, but using the option makes my server more slow..
Is there any reference for using tensorflow serving with multi-threading?
Elaborating the content of the link provided by #ReInvent_IO, just in case if the link doesn't work in the future.
Code for the same is shown below:
"""A client that talks to tensorflow_model_server loaded with mnist model.
The client downloads test images of mnist data set, queries the service with
such test images to get predictions, and calculates the inference error rate.
Typical usage example:
mnist_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import sys
import threading
# This is a placeholder for a Google-internal import.
import grpc
import numpy
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import mnist_input_data
Setting the value of concurrency to 5 asking the server to run 5 different threads
tf.app.flags.DEFINE_integer('concurrency', 5,
'maximum number of concurrent inference requests')
tf.app.flags.DEFINE_integer('num_tests', 100, 'Number of test images')
tf.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory. ')
FLAGS = tf.app.flags.FLAGS
class _ResultCounter(object):
"""Counter for the prediction results."""
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
def inc_error(self):
with self._condition:
self._error += 1
def inc_done(self):
with self._condition:
self._done += 1
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def get_error_rate(self):
with self._condition:
while self._done != self._num_tests:
self._condition.wait()
return self._error / float(self._num_tests)
def throttle(self):
with self._condition:
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def _create_rpc_callback(label, result_counter):
"""Creates RPC callback function.
Args:
label: The correct label for the predicted example.
result_counter: Counter for the prediction result.
Returns:
The callback function.
"""
def _callback(result_future):
"""Callback function.
Calculates the statistics for the prediction result.
Args:
result_future: Result future of the RPC.
"""
exception = result_future.exception()
if exception:
result_counter.inc_error()
print(exception)
else:
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(
result_future.result().outputs['scores'].float_val)
prediction = numpy.argmax(response)
if label != prediction:
result_counter.inc_error()
result_counter.inc_done()
result_counter.dec_active()
return _callback
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService with concurrent requests.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result_counter = _ResultCounter(num_tests, concurrency)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'mnist'
request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
result_counter.throttle()
result_future = stub.Predict.future(request, 5.0) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(label[0], result_counter))
return result_counter.get_error_rate()
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
error_rate = do_inference(FLAGS.server, FLAGS.work_dir,
FLAGS.concurrency, FLAGS.num_tests)
print('\nInference error rate: %s%%' % (error_rate * 100))
if __name__ == '__main__':
tf.app.run()

Resources