I am trying to post data to arangodb using grpc and using script below but while running the client getting above error , My server is running in a good way,unable to figure out where is the error
from inspect import trace
import re
from urllib import response
import grpc
import os
import first_pb2_grpc as pb2_grpc
import first_pb2 as pb2
import json
import grpc
from typing import Dict, List
from google.protobuf import json_format
from first_pb2_grpc import*
import traceback
test_data_file_name = "data.json"
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_data_file = os.path.join(curr_dir,test_data_file_name)
def read_json_file(file):
with open(file, encoding="utf8") as f:
data = json.load(f)
return data
test_data = read_json_file(test_data_file)
channel = grpc.insecure_channel('localhost:31024')
stub = pb2_grpc.UnaryStub(channel)
def test(request):
try:
response = stub.GetServerResponse(request)
print(response , 'AAAAA')
return response
except Exception as e:
return str(e)
def test_add_name(message):
try:
request = pb2.Message(
message=message
)
test_response = test(request)
return test_response
except Exception as e:
traceback.print_exc()
return str(e)
if __name__ == "__main__":
message = test_data["message"]
#attribute_val = json_format.Parse(json.dumps(name) , message='hi')
api_response = test_add_name(message)
print(api_response)
Please tell me how can i resolve this
Related
I wanted to create sunrpc client in python using xdrlib library and sunrpc server is already implemented in C. I have implemented one rpc client in python over udp by referencing following link:
https://svn.python.org/projects/stackless/trunk/Demo/rpc/rpc.py
It is giving timeout error as well as can not unpack none object error.
can anyone guide me on this how it can be done?
there is no information available on this on google.
has anyone implemented such type of code?
please help..I am struggling on this like a week now.
Here is my client code:
import rpc
import rpc_new
from tq_const import *
from tq_type import *
import tq_pack
import socket
import os
class PartialTQClient:
def __init__(self):
pass
def addpackers(self):
self.packer = tq_pack.TQPacker(self)
self.unpacker = tq_pack.TQUnpacker(self, '')
def unpack_month_temperatures(self):
return self.unpacker.unpack_array(self.unpacker.unpack_uint)
def call(self, month):
res = self.make_call(0, month, self.packer.pack_uint, self.unpack_month_temperatures)
return res
class UDPTQClient(PartialTQClient, rpc.RawUDPClient):
def __init__(self, host):
rpc.RawUDPClient.__init__(self, host, TQ_PROGRAM, TQ_VERSION, TQ_PORT)
PartialTQClient.__init__(self)
if __name__ == "__main__":
tqcl = UDPTQClient("127.0.0.1")
print(tqcl)
res = tqcl.call(12)
#print ("Got result", res)
Here is my server code:
import rpc
import rpc_new
from tq_const import *
from tq_type import *
import tq_pack
import socket
import os
class TQServer(rpc.UDPServer):
print("Inside TQServer")
def handle_0(self):
print ("Got request")
m = self.unpacker.unpack_uint()
print ("Arguments was", m)
self.turn_around()
self.packer.pack_array([1, 2, 3], self.packer.pack_int)
#res = PFresults(self, status=TRUE, phone="555-12345")
#res.pack()
if __name__ == "__main__":
s = TQServer("", TQ_PROGRAM, TQ_VERSION, TQ_PORT)
print ("Service started...",s)
try:
print("Trying")
s.loop()
finally:
print ("Service interrupted.")
When I am running client and server on localhost I am getting following error: TypeError: cannot unpack non-iterable NoneType object
I have trouble using Flask socketio. Here is the code I use:
import json
import time
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from engineio.async_drivers import gevent
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*")
import queue
queue_notification_thread = queue.Queue()
def callback_notification(data):
print("callback device {}".format(data))
notification_thread = threading.Thread(target=notification_job, args=(data))
notification_thread.start()
queue_notification_thread.put(notification_thread)
def notification_job(data):
print("callback device in notification job {}".format(data))
socketio.emit("notification", data, broadcast=True)
#socketio.on('request')
def handle_message(data):
Logger.instance().debug('received message: {}'.format(data))
try:
if data.__contains__('data'):
response_message = dict()
response_message['Devices'] = dict()
response_message['Devices']['event'] = 'MY_EVENT'
socketio.emit('notification', response_message, broadcast=True)
else:
Logger.instance().error('Can\'t parse data {}'.format(data))
except OSError as err:
print('Error: when process {} \n ValueError {}'.format(data, err))
#socketio.on_error_default # handles all namespaces without an explicit error handler
def default_error_handler(e):
print('An error occured:')
print(e)
if __name__ == '__main__':
serialReader = SerialReader()
serialReader.start_reading(callback_notification)
http_server = WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
http_server.serve_forever()
And the reader with call asynchronisly:
class SerialController:
serial_port: str
serial_device: serial.Serial
reading_thread: threading.Thread
device_name: str
def __init__(self, serial_port: str = "/dev/ttyACM0", baudrate=115200, read_timeout=0.2, device_name=''):
self.serial_port = serial_port
self.device_name = device_name
self.serial_device = serial.Serial(port=self.serial_port, baudrate=baudrate, timeout=0.2)
def start_reading(self, callback_function):
self.reading_callback = callback_function
# run the thread to
self.reading_thread = threading.Thread(target=self.read_job)
self.reading_thread.start()
def read_job(self):
available_data = 0
while True:
if self.serial_device.in_waiting > available_data:
available_data = self.serial_device.in_waiting
print('available_data {}'.format(available_data))
time.sleep(0.1)
else:
if available_data != 0:
data = self.serial_device.readall()
available_data = 0
if data != b'' and data != b'\n':
if self.reading_callback != None:
message = dict()
message["Reader"] = dict()
message["Reader"]["device"] = self.device_name
message["Reader"]["data"] = data
self.reading_callback(message)
time.sleep(1)
When I receive a message in #socketio.on('request') the bradcast emission work properly with no delay. When I use callback_notification called from my serial reader the breadcast emission have variable delay ( from 1seconde to 10 secondes).
On my server the message "callback device ..." is printed instantly but the client receive the message after few second.
I tried to the emission call in a thread like in the shown code but there is no improvment
Python is throwing "Syntax Error" when I compile the code below.
File "app.py", line 11
#app.route('/')
^
SyntaxError: invalid syntax
I'm not sure what it means.
from flask import Flask, render_template
import urllib.request
import json
import time
app = Flask(__name__ ,template_folder='template')
namep = "PewDiePie"
namet = "TSeries"
key = "MY_API_KEY"
#app.route("/")
for x in range(5):
time.sleep(2)
datat = urllib.request.urlopen("https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername="+namep+"&key="+key).read()
datap = urllib.request.urlopen("https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername="+namet+"&key="+key).read()
subt = json.loads(datat)["items"][0]["statistics"]["subscriberCount"]
subsp = json.loads(datap)["items"][0]["statistics"]["subscriberCount"]
def main():
return render_template('index.html', pewds_sub = subsp, tseries_sub = subt)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=80)
Any help regarding this is appreciated.
Thanks!
You must define the function after the route decorator, i.e. after #app.route
Updated code
#app.route("/")
def function_main():
#all logics here
return render_template('index.html', pewds_sub = subsp, tseries_sub = subt)
Make sure to process your calculations inside function else try to pass those argument in defined function.
from flask import Flask, render_template
import urllib.request
import json
import time
app = Flask(__name__ ,template_folder='template')
namep = "PewDiePie"
namet = "TSeries"
key = "MY_API_KEY"
#app.route("/")
def main():
for x in range(5):
time.sleep(2)
datat = urllib.request.urlopen("https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername="+namep+"&key="+key).read()
datap = urllib.request.urlopen("https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername="+namet+"&key="+key).read()
subt = json.loads(datat)["items"][0]["statistics"]["subscriberCount"]
subsp = json.loads(datap)["items"][0]["statistics"]["subscriberCount"]
return render_template('index.html', pewds_sub = subsp, tseries_sub = subt)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=80)
in my case,
I initiated a try block just above for the database connection and forget to put catch block, that's why I have encountered this error.
so I suggest anyone facing the same error,
should check the code above #app.route('/') because if you have import flask
properly this should work pretty fine syntax error in this statement usually indicates that you might have a problem above this line and not at that line.
import requests
from requests import Session
from bs4 import BeautifulSoup
import re
from multiprocessing.dummy import Pool as ThreadPool
def get_total_pages():
tut = []
base_url = 'Your group '
for url in [base_url % i for i in range(1, 27)]:
tut.append(url)
print(tut)
#get_data_from_page(tut)
pool = ThreadPool(8)
results = pool.map(get_data_from_page, tut)
def get_data_from_page(tut):
f = open("emails.txt", 'a')
email = []
for a in tut:
link = s.get(a).text
soup = BeautifulSoup(link, 'lxml')
links = soup.find('div', class_="mens").find_all('span', class_="inviz")
for e in links:
emails = e.text
f.write(emails + ', ')
email.append(emails)
print(email)
def main():
get_total_pages()
if __name__ == '__main__':
main()
This results in an error saying it only works with multiprocessing, and:
raise MissingSchema(error)
requests.exceptions.MissingSchema: Invalid URL 'h': No schema supplied. Perhaps you meant http://h?
problem was in this
for a in tut:
link = s.get(a).text
and was needed
just
link = s.get(a).text
#without for
I am running python3 on a Ubuntu machine and have noticed that the following block of code is fickle. Sometimes it runs just fine, other times it produces a segmentation fault. I don't understand why. Can someone explain what might be going on?
Basically what the code does is try to read S&P companies from Wikipedia and write the list of tickers to a file in the same directory as the script. If no connection to Wikipedia can be established, the script tries instead to read an existing list from file.
from urllib import request
from urllib.error import URLError
from bs4 import BeautifulSoup
import os
import pickle
import dateutil.relativedelta as dr
import sys
sys.setrecursionlimit(100000)
def get_standard_and_poors_500_constituents():
fname = (
os.path.abspath(os.path.dirname(__file__)) + "/sp500_constituents.pkl"
)
try:
# URL request, URL opener, read content.
req = request.Request(
"http://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
)
opener = request.urlopen(req)
# Convert bytes to UTF-8.
content = opener.read().decode()
soup = BeautifulSoup(content, "lxml")
# HTML table we actually need is the first.
tables = soup.find_all("table")
external_class = tables[0].findAll("a", {"class":"external text"})
c = [ext.string for ext in external_class if not "reports" in ext]
with open(fname, "wb") as f:
pickle.dump(c, f)
except URLError:
with open(fname, "rb") as f:
c = pickle.load(f)
finally:
return c
sp500_constituents = get_standard_and_poors_500_constituents()
spdr_etf = "SPY"
sp500_index = "^GSPC"
def main():
X = get_standard_and_poors_500_constituents()
print(X)
if __name__ == "__main__":
main()