I'm trying to connect a wildlife camera to my SMTP server but it keeps dropping the connection after being asked for it's username. I've verified that this server works with other wildlife cameras and email clients but always seems to fail with this specific model of wildlife camera. I've tried with no authentication, basic authentication and TLS but none of them work (The camera works with gmail SMTP though).
This is the simple code I'm using.
It seems like I need to modify the challenge_auth method. My question is how do I do that, do I just add another method to the custom handler with handle_DATA in?
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult
import os
import sys
import time
import signal
import logging
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
def onExit( sig, func=None):
print("*************Stopping program*****************")
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
#this deliberately lets everything through
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class CustomHandler:
def handle_exception(self, error):
print("exception occured")
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg))
print("Data:{}".format(data))
print("All of the relevant data has been extracted from the email")
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
#update hostname to your IP
controller = Controller(handler, hostname='0.0.0.0', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=False)
# Run the event loop in a separate thread.
controller.start()
while True:
time.sleep(10)
Here's the logs from a reolink go camera that can connect successfully. (I've updated the format 'Username' is being send .e.g from 'User Name:' to 'Username' by editing the library but that hasn't seemed to help with the suntek camera. I thought it might be more pick with the format due to cheaper, less robust firmware.
Related
I'm trying to implement an async RPC client within a Flask server.
The idea is that each request spawn a thread with an uuid, and each request is going to wait until there is a response in the RpcClient queue attribute object with the correct uuid.
The problem is that one request out of two fails. I think that might be a problem with multi-threading, but I don't see where it comes from.
Bug can be seen here.
Using debug print, it seems that the message with the correct uuid is received in the _on_response callback and update the queue attribute in this instance correctly, but the queue attribute within the /rpc_call/<payload> endpoint doesn't synchronize (so queue[uuid] has a value of response in the RpcClient callback but still None in the scope of the endpoint).
My code:
from flask import Flask, jsonif
from gevent.pywsgi import WSGIServer
import sys
import os
import pika
import uuid
import time
import threading
class RpcClient(object):
"""Asynchronous Rpc client."""
internal_lock = threading.Lock()
queue = {}
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
.
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
self.channel.basic_consume(self.callback_queue, self._on_response, auto_ack=True)
while True:
with self.internal_lock:
self.connection.process_data_events()
time.sleep(0.1)
def _on_response(self, ch, method, props, body):
"""On response we simply store the result in a local dictionary."""
self.queue[props.correlation_id] = body
def send_request(self, payload):
corr_id = str(uuid.uuid4())
self.queue[corr_id] = None
with self.internal_lock:
self.channel.basic_publish(exchange='kaldi_expe',
routing_key="kaldi_expe.web.request",
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=corr_id,
),
body=payload)
return corr_id
def flask_app():
app = Flask("kaldi")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
corr_id = app.config['RPCclient'].send_request(payload)
while app.config['RPCclient'].queue[corr_id] is None:
#print("queue server: " + str(app.config['RPCclient'].queue))
time.sleep(0.1)
return app.config['RPCclient'].queue[corr_id]
if __name__ == '__main__':
while True:
try:
rpcClient = RpcClient()
app = flask_app()
app.config['RPCclient'] = rpcClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=True, host='0.0.0.0')
except pika.exceptions.AMQPConnectionError as e:
print("Waiting for RabbitMq startup" + str(e), file=sys.stderr)
time.sleep(1)
except Exception as e:
worker.log.error(e)
exit(e)
I found where the bug came from:
Thedebug=True of the line app.run(debug=True, threaded=True, host='0.0.0.0') restart the server at the beginning.
The whole script is then restarted from the beginning. Because of it, another rpcClient is initialized and consume from the same queue. Problem is that the previous thread is also running. This cause two rpcClient to consume from the same thread, with one that is virtually useless.
Hello i am playing around with RabbitMq and i have a question. I have many parallel nodes running and are connected on rabbitmq-server. The thing is that in conncetions are stated like ip:port Also under ip:port there is a ?. Is there a way to set up a friendly name for python 3.6 ?
P.S I have tried something like this without success
parameters = pika.ConnectionParameters(
host='localhost',
virtual_host='/',
client_properties={
'connection_name': 'random name',
},
)
connection = pika.BlockingConnection(parameters)
P.S Also i found a related question about golang tho. Here How to set connection friendly name
Versions
pika = '1.1.0'
RabbitMq = '3.8.0'
Erlang = '22.1.1'
Python = ' 3.6.8'
Any ideas? :)
This code sets the connection name as you can see here:
import functools
import logging
import pika
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
def on_message(chan, method_frame, _header_frame, body, userdata=None):
"""Called when a message is received. Log message and ack it."""
LOGGER.info('Userdata: %s Message body: %s', userdata, body)
chan.basic_ack(delivery_tag=method_frame.delivery_tag)
def main():
"""Main method."""
credentials = pika.PlainCredentials('guest', 'guest')
props = { 'connection_name' : 'https://stackoverflow.com/q/58275505/1466825' }
parameters = pika.ConnectionParameters('localhost', credentials=credentials, client_properties=props)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(
exchange='test_exchange',
exchange_type='direct',
passive=False,
durable=True,
auto_delete=False)
channel.queue_declare(queue='standard', auto_delete=True)
channel.queue_bind(
queue='standard', exchange='test_exchange', routing_key='standard_key')
channel.basic_qos(prefetch_count=1)
on_message_callback = functools.partial(
on_message, userdata='on_message_userdata')
channel.basic_consume('standard', on_message_callback)
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
if __name__ == '__main__':
main()
NOTE: the RabbitMQ team monitors the rabbitmq-users mailing list and only sometimes answers questions on StackOverflow.
I have a ChatBot application running, just want to hook this application with Slack-api as it's interface.
I used Slack RTM and maintained user-session with its slack user-id.
finally solved and written a client(API) which can easily connect to any conversation engine.
Github repo link-
https://github.com/csemanmohan/Slack_api_client
import time
import re
from slackclient import SlackClient
import requests
# 'url', chatbot endpoint and 'slack_token' is slack application user-access-token
url = "http://127.0.0.1:****/*******/v2/api"
slack_token = "xoxb-**********-***********-*************lipO8hoI"
# instantiate Slack client
slack_client = SlackClient(slack_token)
# starterbot's user ID in Slack: value is assigned after the bot starts up
starterbot_id = None
# constants
RTM_READ_DELAY = 1 # 1 second delay between reading from RTM
EXAMPLE_COMMAND = "do"
MENTION_REGEX = "^<#(|[WU].+?)>(.*)"
def parse_bot_commands(slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
# below var msg and channel_var will be used/
# when no trigger(#app-name) passed from application
msg = ""
channel_def = ""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
msg = event["text"]
channel_def = event["channel"]
user_id, message = parse_direct_mention(event["text"])
print("there is an event here...", user_id, message)
if user_id == starterbot_id:
return message, event["channel"]
channel_def = channel_def
return msg, channel_def
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def handle_command(command, channel):
"""
Executes bot command if the command is known
"""
# Default response is help text for the user
default_response = "Not sure what you mean. Try *{}*.".format(EXAMPLE_COMMAND)
# Implemented below code-snippet for making API call to ChatBot
input_text = command
payload = {"text": input_text, "email": "manmohan#m******.com"}
headers = {'content-type': "application/json"}
resp = requests.request("POST", url, json=payload, headers=headers)
result = eval(resp.json())
print("result is: ", result)
response = result['text']
# Sends the response back to the channel
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response or default_response
)
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
while True:
command, channel = parse_bot_commands(slack_client.rtm_read())
if command:
handle_command(command, channel)
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
The Python3 fetch_token method in this library does not check the response status before consuming the response. If the API call it makes fails, then the response will be invalid and the script crashes. Is there something I can set so that an exception will be raised on a non-success response before the library can read the response?
import requests
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient
from oauthlib.oauth2 import OAuth2Error
AUTH_TOKEN_URL = "https://httpstat.us/500" # For testing
AUTH = HTTPBasicAuth("anID", "aSecret")
CLIENT = BackendApplicationClient(client_id="anID")
SCOPES = "retailer.orders.write"
MAX_API_RETRIES = 4
class MyApp:
def __init__(self):
"""Initialize ... and obtain initial auth token for request"""
self.client = OAuth2Session(client=CLIENT)
self.client.headers.update(
{
"Content-Type": "application/json"
}
)
self.__authenticate()
def __authenticate(self):
"""Obtain auth token."""
server_errors = 0
# This needs more work. fetch_token is not raising errors but failing
# instead.
while True:
try:
self.token = self.client.fetch_token(
token_url=AUTH_TOKEN_URL, auth=AUTH, scope=SCOPES
)
break
except (OAuth2Error, requests.exceptions.RequestException) as e:
server_errors = MyApp.__process_retry(
server_errors, e, None, MAX_API_RETRIES
)
#staticmethod
def __process_retry(errors, exception, resp, max_retries):
# Log and process retries
# ...
return errors + 1
MyApp() # Try it out
You can add a "compliance hook" that will be passed the Response object from requests before the library attempts to parse it, like so:
def raise_on_error(response):
response.raise_for_status()
return response
self.client.register_compliance_hook('access_token_response', raise_on_error)
Depending on exactly when you may get errors, you might want to do this with 'refresh_token_response' and/or 'protected_request' as well. See the docstring for the register_compliance_hook method for more info.
I'm a Flask newbie trying to create a simple app. I'm currently stuck at user registration where I'm trying to save data in database but it's not happening. However, the logging I'm doing indicates that the operation was a success. Could someone tell me what I'm doing wrong?
Here are portions of code that'll help you understand what I'm trying to do:
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
from flask.ext.mysqldb import MySQL
# Configuration
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'root'
MYSQL_DB = 'up2date'
DEBUG = True
SECRET_KEY =
'\xc6)\x0f\\\xc5\x86*\xd7[\x92\x89[\x95\xcfD\xfd\xc1\x18\x8e\xf1P\xf7_\r'
# Create the flask app
app = Flask(__name__)
app.config.from_object(__name__)
# Create instance for working with MySQL
mysql = MySQL(app)
# Function to connect to DB
def connect_db():
return mysql.connection.cursor()
# define functions that will make DB available automatically on each request
#app.before_request
def before_request():
g.db = connect_db()
#app.teardown_request
def teardown_request(exception):
g.db.close()
And finally, the code that performs user registration:
#app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
result = g.db.execute('INSERT INTO users (email, password) VALUES (%s, %s)', [email, password])
print(email, password)
print(result, " rows affected")
flash('Registration successful! You may log in now.')
return redirect(url_for('show_home'))
The two print statements confirm that the email address and password were captured correctly, and the result variable contains 1, indicating 1 row affected. But still there's no row in the DB. I earlier thought this had something to do with committing, but g.db.commit() throws error: AttributeError: 'Cursor' object has no attribute 'commit'
I assume you use MySQL-python.
connect_db() returns the cursor and not the connection. The cursor does not have a commit() function, as the exception says, however the connection has the commit function you need. I think you need to do this:
def connect_db():
return mysql.connection
For more info you can take a look at the code.