pysnmp-make agent side for sending needed traps - agent

I already write my trap receiver program for get traps but now I want to write custom agent for my servers which send traps to trap receiver
but the sample is too poor and can't help me much. sample is:
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6, unix
from pyasn1.codec.ber import decoder
from pysnmp.proto import api
# noinspection PyUnusedLocal
def cbFun(transportDispatcher, transportDomain, transportAddress, wholeMsg):
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
print('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
print('Notification message from %s:%s: ' % (
transportDomain, transportAddress
)
)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
print('Enterprise: %s' % (pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()))
print('Agent Address: %s' % (pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()))
print('Generic Trap: %s' % (pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()))
print('Specific Trap: %s' % (pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()))
print('Uptime: %s' % (pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()))
varBinds = pMod.apiTrapPDU.getVarBinds(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBinds(reqPDU)
print('Var-binds:')
for oid, val in varBinds:
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode(('localhost', 162))
)
# UDP/IPv6
transportDispatcher.registerTransport(
udp6.domainName, udp6.Udp6SocketTransport().openServerMode(('::1', 162))
)
## Local domain socket
# transportDispatcher.registerTransport(
# unix.domainName, unix.UnixSocketTransport().openServerMode('/tmp/snmp-manager')
# )
transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
transportDispatcher.runDispatcher()
except:
transportDispatcher.closeDispatcher()
raise
I need my agent send up-time, temp, CPU usage, RAM usage and etc

Related

Function that would push, add/commit a database file to Github using Python 3?

Anybody knows a better way to create a function that would add/commit a database file and push it to Github?
The code that I am using has been giving me errors.
TypeError: a bytes-like object is required, not 'str'
Code:
import requests
import base64
import json
import datetime
def push_to_repo_branch(gitHubFileName, fileName, repo_slug, branch, user, token):
message = "Automated update " + str(datetime.datetime.now())
path = "https://api.github.com/repos/%s/branches/%s" % (repo_slug, branch)
r = requests.get(path, auth=(user,token))
if not r.ok:
print("Error when retrieving branch info from %s" % path)
print("Reason: %s [%d]" % (r.text, r.status_code))
rjson = r.json()
treeurl = rjson['commit']['commit']['tree']['url']
# print(treeurl)
r2 = requests.get(treeurl, auth=(user,token))
if not r2.ok:
print("Error when retrieving commit tree from %s" % treeurl)
print("Reason: %s [%d]" % (r2.text, r2.status_code))
r2json = r2.json()
sha = None
for file in r2json['tree']:
# Found file, get the sha code
if file['path'] == gitHubFileName:
sha = file['sha']
# if sha is None after the for loop, we did not find the file name!
if sha is None:
print ("Could not find " + gitHubFileName + " in repos 'tree' ")
with open(fileName) as data:
byte_content = data.read()
content = base64.b64encode(byte_content)
# gathered all the data, now let's push
inputdata = {}
inputdata["path"] = gitHubFileName
inputdata["branch"] = branch
inputdata["message"] = message
inputdata["content"] = content
if sha:
inputdata["sha"] = str(sha)
updateURL = "https://api.github.com/repos/EBISPOT/RDF-platform/contents/" + gitHubFileName
try:
rPut = requests.put(updateURL, auth=(user,token), data = json.dumps(inputdata))
if not rPut.ok:
print("Error when pushing to %s" % updateURL)
print("Reason: %s [%d]" % (rPut.text, rPut.status_code))
except requests.exceptions.RequestException as e:
print('Something went wrong! I will print all the information that is available so you can figure out what happend!')
print(rPut)
print(rPut.headers)
print(rPut.text)
print(e)
With some changes, I was able to make it work!!
GitHub doc https://docs.github.com/en/rest/reference/repos#create-or-update-file-contents%22 was so helpful.
I will put my code with other files on GitHub https://github.com/Kamuzinzi/Auto_push for whoever wants to use it and/or contribute.
But if you are rushing .... check this code:
import base64
import requests
import base64
import json
import datetime
from credentials import GITHUB_TOKEN
def push_file(fileName, repo_slug, branch, user, token):
'''
Push file update to GitHub repo
:param fileName: the name of the file on the local branch
:param repo_slug: the github repo slug, i.e. username/repo
:param branch: the name of the branch to push the file to
:param user: github username
:param token: github user token
:return None
:raises Exception: if file with the specified name cannot be found in the repo
'''
message = f"Automated backup created for the file {fileName} as of {str(datetime.date.today())}"
path = "https://api.github.com/repos/%s/branches/%s" % (repo_slug, branch)
r = requests.get(path, auth=(user,token))
if not r.ok:
print("Error when retrieving branch info from %s" % path)
print("Reason: %s [%d]" % (r.text, r.status_code))
rjson = r.json()
treeurl = rjson['commit']['commit']['tree']['url']
# print(treeurl)
r2 = requests.get(treeurl, auth=(user,token))
if not r2.ok:
print("Error when retrieving commit tree from %s" % treeurl)
print("Reason: %s [%d]" % (r2.text, r2.status_code))
r2json = r2.json()
sha = None
for file in r2json['tree']:
# Found file, get the sha code
if file['path'] == fileName:
sha = file['sha']
# if sha is None after the for loop, we did not find the file name!
if sha is None:
print ("\nThe file " + fileName + " is not in repos 'tree'. \nLet's create a new one .. \n", end=",\n 1 \n 2 \n 3 \n")
with open(fileName, 'rb') as data:
byte_content = data.read()
content = base64.b64encode(byte_content).decode("ascii")
# gathered all the data, now let's push
inputdata = {}
inputdata["branch"] = branch
inputdata["message"] = message
inputdata["content"] = content
if sha:
inputdata["sha"] = str(sha)
updateURL = f"https://api.github.com/repos/{repo_slug}/contents/{fileName}"
try:
rPut = requests.put(updateURL, auth=(user,token), data = json.dumps(inputdata))
if not rPut.ok:
print("Error when pushing to %s" % updateURL)
print("Reason: %s [%d]" % (rPut.text, rPut.status_code))
print("Done!!\n")
except requests.exceptions.RequestException as e:
print('Something went wrong! I will print all the information that is available so you can figure out what happend!')
print(rPut)
print(rPut.headers)
print(rPut.text)
print(e)
fileName = "updatedFile.txt"
repositoryName = "username/repository"
branch = "branchName"
username = "Git_username"
token = GITHUB_TOKEN #please check from credentials.py and remember it has to be confidential
push_file(fileName,repositoryName,branch,user=username,token=token)
remember to create a file "credentials.py" and save your token "GITHUB_TOKEN" inside it.

resolve 'Can't export GPIO problem on Rpi using python script

I am writing a python application which is programming and testing atmel microcontrollers through a SPI port.
The application is running on RaspberryPi model 3B+ and I use the command line application 'avrdude' to do the job. I use subprocess.Popen() from within my python script and in general this runs just fine.
Sometimes, the SPI port gets in a blocked state. The avrdude application then typically reports something like 'Can't export GPIO 8, already exported/busy?: Device or resource busy'
One can observe the exported GPIO's by:
pi#LeptestPost:/ $ ls /sys/class/gpio/
export gpio10 gpio11 gpio8 gpiochip0 gpiochip504 unexport
I get out of this situation by invoking:
pi#LeptestPost:/ $ sudo echo 8 > /sys/class/gpio/unexport
resulting in:
pi#LeptestPost:/ $ ls /sys/class/gpio/
export gpio10 gpio11 gpiochip0 gpiochip504 unexport
So I can unexport them all and move on manually but I would like to have this automated in the application with the following code (after detecting the error in the avrdude output):
args = ['sudo', 'echo', '8', '>', '/sys/class/gpio/unexport']
result, error = self.runCommand(args, wait=True)
def runCommand(self, args, wait = False, outputFileStr = "", errorFileStr = "", workingDir = ""):
# documentation:
#class subprocess.Popen(args,
# bufsize=-1,
# executable=None,
# stdin=None,
# stdout=None,
# stderr=None,
# preexec_fn=None,
# close_fds=True,
# shell=False,
# cwd=None,
# env=None,
# universal_newlines=None,
# startupinfo=None,
# creationflags=0,
# restore_signals=True,
# start_new_session=False,
# pass_fds=(),
# *,
# encoding=None,
# errors=None,
# text=None)
print("Working on executing command " + str(args))
if (outputFileStr != ""):
stdoutFile = open(outputFileStr,'w')
else:
stdoutFile = None
if (errorFileStr != ""):
stderrFile = open(errorFileStr,'w')
else:
stderrFile = None
if (workingDir != ""):
cwdStr = workingDir
else:
cwdStr = None
try:
if (wait):
p = subprocess.Popen(args, stdout = subprocess.PIPE, cwd = cwdStr)
print("started subprocess with PID " + str(p.pid))
p.wait() # Wait for child process to terminate, This will deadlock when using stdout=PIPE or stderr=PIPE
else:
#p = subprocess.Popen(args, stdin = None, stdout = None, stderr = None, close_fds = True)
p = subprocess.Popen(args, stdin = None, stdout = stdoutFile, stderr = stderrFile, close_fds = True, cwd = cwdStr)
print("started subprocess with PID " + str(p.pid))
(result, error) = p.communicate(timeout=15) # Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
except subprocess.CalledProcessError as e:
sys.stderr.write("common::run_command() : [ERROR]: output = %s, error code = %s\n" % (e.output, e.returncode))
return e.output, e.returncode
except FileNotFoundError as e:
self.master.printERROR("common::run_command() : [ERROR]: output = %s, error code = " + str(e) + "\n")
return "error", str(e)
except subprocess.TimeoutExpired as e:
self.master.printERROR("Process timeout on PID "+ str(p.pid) + ", trying to kill \n")
p.kill()
outs, errs = p.communicate()
return "error", str(e)
if (outputFileStr != ""):
stdoutFile.close()
if (errorFileStr != ""):
stderrFile.close()
return result, error
But that does not do the job (no error, not the wanted result). I can imagine it's related to how the process is started within its shell or environment - but that's beyond my knowledge.
Any idea how to get this working?
Note: the avrdude application is also called using the 'runcommand' method and running fine.

Python IP logger taking in, and outputting data in csv format

Hello I'm looking for some help with a project I recently took up to help check pc connectivity in my workplace. I am fairly new to python and programming in general so a large portion of this may be wrong. I'm trying to create a simple IP logger that will ping the destination address and return a value to determine the connection state of the machine. It will take the data from a CSV file (excell.xlsx) and splice the IP address from the information provided in cell style format, then output the connection state with a simple "connected" or "disconnected" after relisting the input file. Here's what I've come up with so far:
import csv, platform, subprocess, os as sys
#Defines filepath locations (Adjustment may be required)
in_path = sys.os.path['C:\Users\User\Documents\IpLog\log1.xlsx']
out_path = sys.os.path['C:\Users\User\Documents\IpLog\ip_log.xlsx']
try:
ip = 0
reference = out_path
host = '?'
line_count = 0
with open(in_path, dialect='excel', delimeter=',', newline='') as csvfile:
for row in csvfile:
if line_count == 0:
ip_inp = csv.reader(in_path, dialect='excel') #adjust filename as needed.
ip = ip_inp[ip_inp.index('10.21')]
ip = ip[5::]
line_count += 1
for line in ip_inp:
def ping(ip):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower() == 'windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', ip]
return subprocess.call(command) == 0
if subprocess.call(command) == 0:
status = subprocess.call(command)
else:
status = 1
if status == 0:
if line_count == 0:
net_state = 'Connected'
with open(out_path, delimiter=',') as csvfile2:
print(csvfile)
csv.writer(csvfile2, dialect='excel')
print(net_state)
else:
net_state = 'Disconnected'
with open(out_path, delimiter=',') as csvfile2:
print(csvfile)
csv.writer(csvfile2, dialect='excel')
print(net_state)
except IOError:
print('Error opening the input file please check file input name and file location and try again.')
finally:
raise SystemExit('Operation Completed. Results have been written to (%s)' % out_path)
Right now the error I keep running into is truncation in my global file path variables. Can anyone tell me why this may be happening?

python3 pexpect strange behaviour

I have a single threaded program that simply executes commands over ssh and simply looks for output over ssh. But after a while I start getting extrodinarily strange behaviour:
ssh_cmd = 'ssh %s#%s %s' % (user, addr, options)
ssh = pexpect.spawn(ssh_cmd, timeout=60)
lgsuc = ['(?i)(password)')]
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
for cmd in cmdlist:
time.sleep(0.1)
#this is regex to match anything. Essentially clears the buffer so you don't get an invalid match from before
ssh.expect(['(?s).*'])
ssh.sendline(cmd)
foundind = ssh.expect([re.escape("root#")], 30) #very slow
#repr escape all the wierd stuff so madness doesn't happen with ctr chars
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
print("We Found:" + rettxt
And it will be fine for about 20 commands or so then madness occurs Assume the right echo is blablabla each time:
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
... about 20 commands...
We found 'bl\r\nroot#' # here it just missed part of the string in the middle
We found 'alala\r\nroot#'
here is the remainder of the echo from the previous command!!! and the echo of the current command will show up some time later!! and it gets worse and worse. The thing that is strange is it is in the middle of the return byte array.
Now there are some wierd control codes coming back from this device so if I replace:
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
with
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
then
print("We Found:" + rettxt)
returns:
root#e Found lala
Anyway there is really strange stuff going on with pexpect and the buffers, and I can't figure out what it is so any help would be appreciated. I should mention I never get teh timeout, the dive always responds. Also the total number of "root:#"in the log file exceedes the total number of lines sent.
If I go through and remove all ctl codes, the output looks cleaner but the problem still persists, its as if pextect cant handle ctl coodes in its buffer correctly. Any help is appreciated
UPDATE Minimum verifiable example
Ok I have been able to recreate PART of the problem on an isolated ubuntu environment sshing into itself.
first I need to create 4 commands that can be run on a host target, so put the follwing for files in ~/ I did this in ubuntu
~/check.py
#!/usr/bin/python3
import time
import io
#abcd^H^H^H^H^MABC
#mybytes = b'\x61\x62\x63\x64\x08\x08\x08\x0D\x41\x42\x43'
#abcdACB
mybytes = b'\x61\x62\x63\x64\x41\x42\x43'
f = open('test.txt', 'wb')
#time.sleep(1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
~/check2.py
#!/usr/bin/python3
import time
import io
#0123^H^H^H^H^MABC
mybytes = b'\x30\x31\x32\x33\x08\x0D\x0D\x08\x08\x08\x08\x0D\x41\x42\x43'
f = open('test2.txt', 'wb')
#time.sleep(0.1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
~/check3.py
#!/usr/bin/python3
import time
import io
#789:^H^H^H^H^DABC
mybytes = b'\x37\x38\x39\x3A\x08\x08\x08\x08\x08\x08\x08\x0D\x0D\x41\x42\x43'
f = open('test3.txt', 'wb')
#time.sleep(0.1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
And lastly check4.py Sorry it took a wierd combination for the problem to show back up
#!/usr/bin/python3
import time
import io
#abcd^H^H^H^HABC
mybytes = b'\x61\x62\x63\x64\x08\x08\x08\x0D\x41\x42\x43'
f = open('test.txt', 'wb')
time.sleep(4)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
Noticing that the last one has a bigger sleep, this is to encounter texpect timeout. Though on my actual testing this doesn't occue, I have commands that take over 6 minutes to return any text so this might be part of it. Ok and the final file to run everything. It might look ugly but I did a massive trim so I could post it here:
#! /usr/bin/python3
#import paramiko
import time
import sys
import xml.etree.ElementTree as ET
import xml
import os.path
import traceback
import re
import datetime
import pexpect
import os
import os.path
ssh = None
respFile = None
#Error Codes:
DEBUG = True
NO_ERROR=0
EXCEPTION_THROWS=1
RETURN_TEXT_NEVER_FOUND = 2
LOG_CONSOLE = 1
LOG_FILE = 2
LOG_BOTH = 3
def log(out, dummy=None):
print(str(log))
def connect(user, addr, passwd):
global ssh
fout = open('session.log', 'wb')
#options = '-q -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oPubkeyAuthentication=no'
options = ' -oUserKnownHostsFile=/dev/null '
#options = ''
#REPLACE WITH YOU LOCAL USER NAME
#user = 'user'
#REPLACE WITH YOUR LOCAL PASSWORD
#passwd = '123TesT321'
#addr = '127.0.0.1'
ssh_cmd = 'ssh %s#%s %s' % (user, addr, options)
public = None
private = None
retval = 0
try:
ssh = pexpect.spawn(ssh_cmd, timeout=60)
ssh.logfile = fout
#the most common prompts
loginsuccess = [re.escape("#"), re.escape("$")]
lgsuc = ['(?i)(password)', re.escape("connecting (yes/no)? ")]
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
except pexpect.TIMEOUT as exc:
log("Server never connected to SSH tunnel")
return 0
print('where here ret val = ' + str(retval))
try:
if(retval > 1):
return 1
elif(retval == 1):
hostkey = ssh.before.decode("utf-8")
ssh.sendline("yes")
log("Warning! new host key was added to the database: " + hostkey.split("\n")[1])
lgsuc = ['password: ']
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
if(retval > 0):
return 1
else:
if(public is not None):
log("Warning public key authentication failed trying password if available...")
else:
if public is not None:
log("Warning public key authentication failed trying password if available...")
if(passwd is None):
log("No password and certificate authentication failed...")
return 0
ssh.sendline(passwd)
login = ['password: ' ]
for item in loginsuccess:
login.append(item)
retval = ssh.expect(login)
except pexpect.TIMEOUT as exc:
log("Server Never responded with expected login prompt: "+lgstr)
return 0
#return 0
if retval > 0:
retval = 1
if retval == 0:
log("Failed to connect to IP:"+addr +" User:"+user+" Password:"+passwd)
return retval
def disconnect():
log("Disconnecting...")
global ssh
if ssh is not None:
ssh.close()
else:
log("Something wierd happened with the SSH client while closing the session. Shouldn't really matter", False)
def RunCommand(cmd, waitTXT, timeout = 5):
global ssh
Error = 0
if DEBUG:
print('Debugging: cmd: '+ cmd+'. timeout: '+str(timeout) +'. len of txt tags: '+ str(len(waitTXT)))
if(type(waitTXT) is str):
waitTXT = [re.excape(waitTXT)]
elif(not hasattr(waitTXT ,'__iter__')):
waitTXT = [re.escape(str(waitTXT))]
else:
cnter = 0
for TXT in waitTXT:
waitTXT[cnter] = re.escape(str(TXT))
cnter +=1
#start = time.time()
#print("type3: "+str(type(ssh)))
#time.sleep(1)
#this is regex to match anything. Essentially clears the buffer so you don't get an invalid match from before
ssh.expect(['(?s).*'])
ssh.sendline(cmd)
print("Debugging: sent: "+cmd)
#GoOn = True
rettxt = ""
try:
foundind = ssh.expect(waitTXT, timeout)
allbytes = ssh.before
newbytes = bytearray()
for onebyte in allbytes:
if onebyte > 31:
newbytes.append(onebyte)
allbytes = bytes(newbytes)
rettxt = repr(allbytes.decode("us-ascii") + waitTXT[foundind])
#rettxt = ssh.before + waitTXT[foundind]
if DEBUG:
print("Debugging: We found "+rettxt)
except pexpect.TIMEOUT as exc:
if DEBUG:
txtret = ""
for strtxt in waitTXT:
txtret += strtxt +", "
print("ERROR Debugging: we timed out waiting for text:"+txtret)
pass
return (rettxt, Error)
def CloseAndExit():
disconnect()
global respFile
if respFile is not None and '_io.TextIOWrapper' in str(type(respFile)):
if not respFile.closed:
respFile.close()
def main(argv):
try:
cmds = ['~/check.py', '~/check2.py', '~/check3.py', '~/check2.py', '~/check3.py','~/check.py', '~/check2.py', '~/check3.py', '~/check2.py', '~/check3.py', '~/check4.py', '~/check3.py','~/check.py', '~/check2.py',]
##CHANGE THESE TO MTACH YOUR SSH HOST
ret = connect('user', '127.0.0.1', 'abcd1234')
for cmd in cmds:
cmdtxt = str(cmd)
#rett = RunCommand(ssh, "ls", "root", 0, 5)
strlen = (170 - (len(cmdtxt)))/2
dashval = ''
starval = ''
tcnt = 0
while(tcnt < strlen):
dashval +='-'
starval +='*'
tcnt +=1
if DEBUG:
print(dashval+cmdtxt+dashval)
#checkval = ['ABC']
#REPLACE THE FOLLOWING LINE WITH YOUR TARGET PROMPT
checkval = ['user-virtual-machine:~$']
rett = RunCommand(cmdtxt, checkval, 2)
if DEBUG:
print(starval+cmdtxt+starval)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(traceback.format_exc())
CloseAndExit()
#disconnect()
#respFile.close()
main(sys.argv)
Make sure that all for checks and the main python script are executble in permission via sudo chmod 774 or similar. In the main function call set your username ipaddress and password to where your target is that has the check.py and make sure they are in your ~/ directory.
Once you run this you can look at the session.log and at least on mind there is some wierd stuff going on with the buffer:
~/check4.py^M
~/check3.py
~/check3.py^M
abcd^H^H^H^MABC^M
^[]0;user#user-virtual-machine: ~^Guser#user-virtual-machine:~$ ~/check.py
~/check3.py^M
789:^H^H^H^H^H^H^H^M^MABC^M
^[]0;user#user-virtual-machine: ~^Guser#user-virtual-machine:~$ ~/check.py~/check2.py
And unfortunately its not as corrupt as my actual prbolem but I have several hundred commands I an embedded custom linux kernal that I obviously can't recreate for everyone. But anyway any help is greatly appreciated. Hopefully these examples work you you, I am just on ubuntu 16.04 lts. Also make sure to replace 'user-virtual-machine:~$' with whatever your target login prompt looks like

cisco switch enable port pysnmp

trying to convert this line in to pysnmp
snmpset -v 2c -On -r 5 -t 2 -c private ip-address .1.3.6.1.2.1.2.2.1.7.369098771 i 1
I am trying to take a working walk function and modify it but my knowledge with SNMP makes it very hard to understand pysnmp doc
this is just part of the code
from pysnmp.entity.rfc3413.oneliner import cmdgen
device_target = (self.ip, self.port)
res = None
# Create a PYSNMP cmdgen object
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.setCmd(
cmdgen.CommunityData(community_string),
cmdgen.UdpTransportTarget(device_target), '.1.3.6.1.2.1.2.2.1.7.369098771', 1
lookupNames=True, lookupValues=True)
I know I am missing something, can any one help please
I'd highly recommend upgrading your pysnmp to the latest released version and use "hlapi" interface.
from pysnmp.hlapi import *
device_target = (self.ip, self.port)
community_string = 'private'
cmd_gen = setCmd(SnmpEngine(),
CommunityData(community_string),
UdpTransportTarget(device_target, timeout=2.0, retries=5),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.7.369098771'), Integer32(1)),
lookupMib=False
)
res = None # True denotes a failure
errorIndication, errorStatus, errorIndex, varBinds = next(cmd_gen)
if errorIndication:
res = errorIndication
elif errorStatus:
res = '%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?')
if res:
print('SNMP failed: %s' % res)
References: here

Resources