trying to run python traceroute script
trace stuck after 5-7 hops (not 30 as should be).
attached my script
#!/usr/bin/env python
from netmiko import ConnectHandler
from auto_encrypter_new import *
ios = {
'device_type': 'cisco_ios',
'ip': 'my_ip_device',
'username': user_encrypt,
'password': passwd_encrypt,
}
net_connect = ConnectHandler(**ios)
output = net_connect.send_command_timing('trace dest_ip')
print (output)
get only 6 hops
while doing this via our router i get more hops
any suggestion why ?
Thanks
this might be due to the timeout issue. Try:
output = net_connect.send_command_timing(f'trace {dest_ip}', delay_factor=4)
delay factor is explained here
if it still doesn't work try:
import time
from netmiko import ConnectHandler
from auto_encrypter_new import *
ios = {
'device_type': 'cisco_ios',
'ip': 'my_ip_device',
'username': user_encrypt,
'password': passwd_encrypt,
}
net_connect = ConnectHandler(**ios)
dest_ip = '8.8.8.8'
net_connect.write_channel(f'trace {dest_ip}')
time.sleep(10) # this is needed for the device to send a response. Test it and try to adjust timing if needed
output = net_connect.read_channel()
print(output)
Related
I'm trying to connect a wildlife camera to my SMTP server but it keeps dropping the connection after being asked for it's username. I've verified that this server works with other wildlife cameras and email clients but always seems to fail with this specific model of wildlife camera. I've tried with no authentication, basic authentication and TLS but none of them work (The camera works with gmail SMTP though).
This is the simple code I'm using.
It seems like I need to modify the challenge_auth method. My question is how do I do that, do I just add another method to the custom handler with handle_DATA in?
import email
from email.header import decode_header
from email import message_from_bytes
from email.policy import default
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import LoginPassword, AuthResult
import os
import sys
import time
import signal
import logging
##setting timezone
os.environ['TZ'] = "Europe/London"
time.tzset()
def onExit( sig, func=None):
print("*************Stopping program*****************")
controller.stop()
exit()
signal.signal(signal.SIGTERM, onExit)
# removes the spaces and replaces with _ so they're valid folder names
def clean(text):
return "".join(c if c.isalnum() else "_" for c in text)
log = logging.getLogger('mail.log')
auth_db = {
b"TestCamera1#gmail.com": b"password1",
b"user2": b"password2",
b"TestCamera1": b"password1",
}
def authenticator_func(server, session, envelope, mechanism, auth_data):
#this deliberately lets everything through
assert isinstance(auth_data, LoginPassword)
username = auth_data.login
password = auth_data.password
return AuthResult(success=True)
def configure_logging():
file_handler = logging.FileHandler("aiosmtpd.log", "a")
stderr_handler = logging.StreamHandler(sys.stderr)
logger = logging.getLogger("mail.log")
fmt = "[%(asctime)s %(levelname)s] %(message)s"
datefmt = None
formatter = logging.Formatter(fmt, datefmt, "%")
stderr_handler.setFormatter(formatter)
logger.addHandler(stderr_handler)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class CustomHandler:
def handle_exception(self, error):
print("exception occured")
print(error)
return '542 Internal Server Error'
async def handle_DATA(self, server, session, envelope):
peer = session.peer
data = envelope.content # type: bytes
msg = message_from_bytes(envelope.content, policy=default)
# decode the email subject
print("Msg:{}".format(msg))
print("Data:{}".format(data))
print("All of the relevant data has been extracted from the email")
return '250 OK'
if __name__ == '__main__':
configure_logging()
handler = CustomHandler()
#update hostname to your IP
controller = Controller(handler, hostname='0.0.0.0', port=587, authenticator=authenticator_func, auth_required=True,auth_require_tls=False)
# Run the event loop in a separate thread.
controller.start()
while True:
time.sleep(10)
Here's the logs from a reolink go camera that can connect successfully. (I've updated the format 'Username' is being send .e.g from 'User Name:' to 'Username' by editing the library but that hasn't seemed to help with the suntek camera. I thought it might be more pick with the format due to cheaper, less robust firmware.
I have a question to ask about sanic / asyncpg performance.
During a testing a weird things kept happening (Maybe it is by design).
First let me explain testing procedure. It is simple.
I use locust to push the server as much as possible by setting max user count.
The testing script is:
from locust import HttpLocust, TaskSet, task, between
class UserActions(TaskSet):
#task(1)
def test_point_1(self):
self.client.get(
'/json_1',
headers={'Content-Type': 'application/json'}
)
#task(2)
def test_point_2(self):
self.client.get(
'/json_2',
headers={'Content-Type': 'application/json'}
)
class ApplicationUser(HttpLocust):
task_set = UserActions
wait_time = between(0, 0)
It is used to test the following code. Notice asyncpg is calling potgresql sleep function to simulate a load:
import asyncio
import uvloop
from asyncpg import create_pool
from sanic import Sanic, response
from sanic.log import logger
import aiotask_context as context
app = Sanic(__name__)
DATABASE = {
'type': 'postgresql',
'host': '127.0.0.1',
'user': 'test_user',
'port': '5432',
'password': 'test_password',
'database': 'test_database'
}
conn_uri = '{0}://{1}:{2}#{3}:{4}/{5}'.format(
'postgres',
DATABASE['user'], DATABASE['password'], DATABASE['host'],
DATABASE['port'], DATABASE['database'])
#app.route("/json_1")
async def handler_json_1(request):
async with request.app.pg.acquire() as connection:
await connection.fetchrow('SELECT pg_sleep(0.85);')
return response.json({"foo": "bar"})
#app.route("/json_2")
async def handler_json_2(request):
async with request.app.pg.acquire() as connection:
await connection.fetchrow('SELECT pg_sleep(0.2);')
return response.json({"foo": "bar"})
#app.listener("before_server_start")
async def listener_before_server_start(*args, **kwargs):
try:
pg_pool = await create_pool(
conn_uri, min_size=2, max_size=10,
server_settings={'application_name': 'test_backend'})
app.pg = pg_pool
except Exception as bss_error:
logger.error('before_server_start_test erred with :{}'.format(bss_error))
app.pg = None
#app.listener("after_server_start")
async def listener_after_server_start(*args, **kwargs):
# print("after_server_start")
pass
#app.listener("before_server_stop")
async def listener_before_server_stop(*args, **kwargs):
# print("before_server_stop")
pass
#app.listener("after_server_stop")
async def listener_after_server_stop(*args, **kwargs):
# print("after_server_stop")
pass
if __name__ == '__main__':
asyncio.set_event_loop(uvloop.new_event_loop())
server = app.create_server(host="0.0.0.0", port=8282, return_asyncio_server=True)
loop = asyncio.get_event_loop()
loop.set_task_factory(context.task_factory)
task = asyncio.ensure_future(server)
try:
loop.run_forever()
except Exception as lerr:
logger.error('Loop run error: {}'.format(lerr))
loop.stop()
The issue is, after a random amount of time server becomes unresponsive
(Does not return 503 or any other code) for a cca. 60 seconds.
Also process hangs (I can see it with ps aux and CTRL+C cannot kill it.)
That might be problematic because for one it is hard to detect and it is difficult to determine a rate at which we can send request to the server.
Could that be an issue with the configuration (sanic/asyncpg)?
Could setting nginx / sanic request timeout be the only option to circumvent this problem ?
Your aiopg pool is limited to 10 connections. So 10 requests at a time max, each takes 0.2 sec, your max possible load would be 1 sec / 0.2 sec * 10 pool size = 50 RPS. After that all incoming requests would just wait for a connection and queue of requests to serve would grow much faster than your ability to serve and your server would become irresponsive.
When I run the script it only returns output from the first device.
#!/usr/local/bin/python3.6
import netmiko
from netmiko import ConnectHandler
import getpass
from getpass import getpass
exceptions = (netmiko.ssh_exception.NetMikoTimeoutException, netmiko.ssh_exception.NetMikoAuthenticationException)
router = {
'device_type': 'cisco_ios',
'ip': '10.5.5.1',
'username': 'admin',
'password': getpass(),
'secret': getpass("Enable: "),
'global_delay_factor': 2,
}
switch = {
'device_type': 'cisco_ios',
'ip': '10.5.5.2',
'username': 'admin',
'password': getpass(),
'secret': getpass("Enable: "),
'global_delay_factor': 2,
}
list_of_devices = [router, switch]
for devices in list_of_devices:
connector = ConnectHandler(**devices)
connector.enable()
print(connector)
output = connector.find_prompt()
output += connector.send_command('show ip arp', delay_factor=2)
print(output)
connector.disconnect()
You need to have all of the Netmiko actions inside of the for loop. With your current code, you establish the connection on the first device, then move on to the second device and do something with it. You don't actually do anything with the first device (since the only thing inside the for loop is the ConnectHandler call):
So something like this (for the for-loop section):
list_of_devices = [router, switch]
for devices in list_of_devices:
connector = ConnectHandler(**devices)
connector.enable()
print(connector)
output = connector.find_prompt()
output += connector.send_command('show ip arp', delay_factor=2)
print(output)
connector.disconnect()
My program is to get parameter(URL) from web and in my tornado API, i want to accept them into one bye one. Now my code accepts a single tuple. My api want to fetch the 4 parameters(south-lat,south-long,east-lat,east-long) which comes from the web URL request. And i will implement them into get method of Tornado and find them in my database. I want to know how to accept them in tornado in seperately. This is my current working codes.
import socket
import tornado.web
from datetime import datetime
import pymongo
from pymongo import MongoClient
from pprint import pprint
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define,options
#apicall/v1/geoloc.json?geoquery=True&southwest_lat=''&southwest_lng=''&northeast_lat=''&northeast_lng=''
#tornado port
port = 8088
host = "127.0.0.1"
#make a connection with mongodb
#client=MongoClient()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("hello APIs")
#what this works is here
class VisualizationHandler(tornado.web.RequestHandler):
def get(self,*args):#how to accept the input from map?
self.open_args=args
#self.open_kwargs=kwargs
print(self.open_args)
print(type(self.open_args))
#self.write("Data is at the terminal")
#client = MongoClient("mongodb://localhost:27017")
#db=client.test
#docs=db.neighborhoods.findOne()
#if docs is None:
#print("Not found")
#else:
#print(docs)
#lat=self.get_argument('lat',True)
#long=self.get_argument('long',True)
#self.write(lat)
#self.write(long)
#var neighborhood = db.neighborhoods.findOne( { geometry: { $geoIntersects: { $geometry: { type: "Point", coordinates: [ slong, slat] } } } } )
def main():
application = tornado.web.Application([
(r"/", MainHandler),
(r"/geo/(.*)",VisualizationHandler),
])
try:
sockets = tornado.netutil.bind_sockets(port, address=host)
print("Socket binding successful to {0}:{1}".format(host, port))
except socket.error:
print("Socket binding failed to {0}:{1}".format(host, port))
return
try:
_id = tornado.process.fork_processes(0, max_restarts=3)
print("Process forked : {}".format(_id))
server = tornado.httpserver.HTTPServer(application)
server.add_sockets(sockets)
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt: # stop with Ctrl+C from shell
print("Tornado is stopping")
if __name__ == "__main__":
print("Tornado is starting")
main()
get() method's args/kwargs are retrieved from the url (url regex's match groups, to be specific). Of course, you can create something like this to pass arguments to your function:
(r"/geo/(.*)/(.*)", VisualizationHandler)
or
(r"/geo/(?P<south_lat>.*)/(?P<south_long>.*)", VisualizationHandler)
and args/kwargs south_lat and south_long would be passed to your function from a URL like /geo/123/456, but it's inconvenient. I'd advise you to pass your arguments as URL params /geo?south_lat=123&south_long=456 and read them using get_argument().
i need to play sound on the raspberry pi with a node.js script. Everything is fine when i'm starting the script from the commandline myself. When i run the same script after startup out of the /etc/rc.local script i see my running process when doing "ps aux" but i cant hear any sound. I also tried to start the node script after 40 seconds, because i thought that there was too less time for some initialization stuff or something, but within this time i could start the script from the command line and hear sound...
i tried both users: root and pi. They both work from the cmd (because the user that runs the script after the autostart is the root user)
i linked my programm to /usr/bin/node because if not the process wasnt able to start on startup.
i forced the raspberryPi to use the sereo jack: amixer cset numid=3 1
my node.js code is:
var fs = require("fs");
var lame = require("lame");
var Speaker = require("speaker");
var SerialPort = require("serialport").SerialPort;
var playing = false;
var stream = [];
stream[0] = "sound1.mp3";
stream[1] = "sound2.mp3";
stream[2] = "sound3.mp3";
stream[3] = "sound4.mp3";
var getCurrentStream = function(){
var i = Math.round( Math.random() * 3)
return stream[i];
}
var serialPort = new SerialPort("/dev/ttyACM0", {
baudrate: 9600
}, false);
serialPort.open(function(){
console.log("open");
serialPort.on("data", function(data){
console.log("data received"+data);
if(!playing){
try{
var currentStream = fs.createReadStream( getCurrentStream() );
var speaker = new Speaker();
speaker.on('finish', function(){
playing = false;
});
currentStream.pipe(new lame.Decoder()).pipe(speaker);
playing = true;
}
catch(e){
console.log("Error: "+e);
}
}
});
});
for the startup i tried:
as a cronjob, after crontab -e i attached:
#reboot /opt/node/bin/forever start /var/www/node/residenz/server.js
i also tried the same inside the file /etc/rc.local :
/opt/node/bin/forever start /var/www/node/residenz/server.js
thanks for any help!
I had the same problem, and this question (and analyzing the answer) gave me hope that it was possible, but for me it was the paths that was a problem - I was using a relative path but the working directory (and perhaps user?) being executed under cron needed the path to the file be absolute. BTW, I used cron, python, and pygame (pygame.mixer.music) and was able to make it work.
My testing program (pygame.mixer.Sound did not work but I believe that was because I was using an MP3 instead of a WAV)
import pygame
import time
import os
import sys
#soundFile = "alarm.mp3" # BAD
soundFile = "/home/pi/alarm.mp3" # GOOD
channel = None
if len(sys.argv) > 1:
pygame.mixer.init(44100, -16, 2, 4096)
if sys.argv[1] == "music":
print "Testing pygame.mixer"
pygame.mixer.music.load(soundFile)
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play()
elif sys.argv[1] == "sound":
print "Testing pygame.sound"
pygame.mixer.init()
s = pygame.mixer.Sound(soundFile)
s.set_volume(1.0)
channel = s.play()
elif sys.argv[1] == "mpg":
print "Using mpg321 Player"
os.system("mpg321 " + soundFile)
else:
print "Using OMX Player"
os.system("omxplayer " + soundFile)
print "Execution control has returned"
while pygame.mixer.get_busy() or pygame.mixer.music.get_busy() or \
(channel is not None and channel.get_busy()):
continue
pygame.mixer.quit()
else:
print "Unknown option. Options are omx, mpg, music, or sound"
In cron, I had #reboot python /home/pi/soundtest.py music & and it played the file on boot up.
Just in case, that anyone else has the same Problem i want to share my final solution. I simply did the functionality with python. For the startup, i put the line which starts the python script into the file /etc/rc.local
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# server.py
import RPi.GPIO as GPIO
import subprocess
import time
import serial
from random import randint
port = serial.Serial("/dev/ttyACM0", baudrate=9600, timeout=0)
PATH=[4]
PATH.append("/var/www/node/Boom1.mp3")
PATH.append("/var/www/node/Boom2.mp3")
PATH.append("/var/www/node/Boom3.mp3")
PATH.append("/var/www/node/Boom4.mp3")
def main():
count = 0
while True:
value=0
line = port.readlines()
if( len(line)!= 0 and count < 4 ):
try:
job=subprocess.Popen(["mpg321", returnPath()], stdin=subprocess.PIPE)
time.sleep( float(line[0])/10 )
except:
print("cant play soundfile")
def returnPath():
x = randint(1,4)
return PATH[x]
if __name__ == '__main__':
main()