Re-implementation of Secured Lightweight Link Discovery Protocol
Kindly help debug the code below.
The helps provided in the private post have been noted. But i still have new challenges.
Thanks
#Re-implementation of SLDP
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv4
from ryu.topology import switches
from ryu.lib.packet import lldp
import logging
import time
from ryu.lib import hub
LOG = logging.getLogger(__name__)
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'switches': switches.Switches,
}
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.is_active = True
#set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
#print("lldp packet")
return
#dst = eth.dst
dst = 'ff:ff:ff:ff:ff:ff'
src = eth.src
#print("not lldp packet")
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
# eth_type=0xabcd, eth_dst='ff:ff:ff:ff:ff:ff',
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
self.add_flow(datapath, 1, match, actions)
self.logger.info("not Flood")
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
match = parser.OFPMatch(in_port=in_port, eth_type=0xabcd, eth_dst='ff:ff:ff:ff:ff:ff', eth_src=src)
self.add_flow(datapath, 1, match, actions)
self.logger.info("Flood")
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
#set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def lldp_packet_in_handler(self, ev):
if not self.link_discovery:
return
msg = ev.msg
try:
src_dpid, src_port_no = LLDPPacket.lldp_parse(msg.data)
except LLDPPacket.LLDPUnknownFormat:
# This handler can receive all the packets which can be
# not-LLDP packet. Ignore it silently
return
dst_dpid = msg.datapath.id
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dst_port_no = msg.in_port
elif msg.datapath.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dst_port_no = msg.match['in_port']
else:
LOG.error('cannot accept LLDP. unsupported version. %x',
msg.datapath.ofproto.OFP_VERSION)
src = self._get_port(src_dpid, src_port_no)
print("im in here")
if not src or src.dpid == dst_dpid:
return
try:
self.ports.lldp_received(src)
except KeyError:
# There are races between EventOFPPacketIn and
# EventDPPortAdd. So packet-in event can happend before
# port add event. In that case key error can happend.
# LOG.debug('lldp_received error', exc_info=True)
pass
dst = self._get_port(dst_dpid, dst_port_no)
if not dst:
return
old_peer = self.links.get_peer(src)
# LOG.debug("Packet-In")
# LOG.debug(" src=%s", src)
# LOG.debug(" dst=%s", dst)
# LOG.debug(" old_peer=%s", old_peer)
if old_peer and old_peer != dst:
old_link = Link(src, old_peer)
del self.links[old_link]
self.send_event_to_observers(event.EventLinkDelete(old_link))
link = Link(src, dst)
if link not in self.links:
self.send_event_to_observers(event.EventLinkAdd(link))
# remove hosts if it's not attached to edge port
host_to_del = []
for host in self.hosts.values():
if not self._is_edge_port(host.port):
host_to_del.append(host.mac)
for host_mac in host_to_del:
del self.hosts[host_mac]
if not self.links.update_link(src, dst):
# reverse link is not detected yet.
# So schedule the check early because it's very likely it's up
self.ports.move_front(dst)
self.lldp_event.set()
if self.explicit_drop:
self._drop_packet(msg)
I got the protocol setup nicely. The host-switch linking was successful
I have imported all the needed modules, yet it couldnt find link_discovery
The helps provided in the private post have been noted. But i still have new challenges.
Thanks
Related
I using PJSUA2 to make a python3 script for calling, my script is making the registration and calling but i'm getting "zsh: segmentation fault" I wonder if anyone can help me out, here is my code.
python
import time
import pjsua2 as pj
distAddrs = "192.168.1.202"
port = 5060
ext = "101"
passwd = "123456"
# Create my endpoint
endPoint = pj.Endpoint()
endPointConfig = pj.EpConfig()
endPoint.libCreate()
endPoint.libInit(endPointConfig)
# Create SIP transport
sipTransportConfig = pj.TransportConfig()
sipTransportConfig.port = port
endPoint.transportCreate(pj.PJSIP_TRANSPORT_UDP, sipTransportConfig)
# Setting my audio device
endPoint.audDevManager().setNullDev()
for codec in endPoint.codecEnum2():
priority = 0
if "PCMA/8000" in codec.codecId:
priority = 255
endPoint.codecSetPriority(codec.codecId, priority)
# Start the library
endPoint.libStart()
# Subclass to extend the Call and get notifications etc.
class Call(pj.Call):
def onRegState(self, prm):
print("***onRegState == " + prm.reason)
# pjsua2 registration and calling function
def pbxReg(distAddrs, ext, passwd):
accountConfig = pj.AccountConfig()
accountConfig.idUri = "sip:%s#%s" % (ext,distAddrs,)
accountConfig.regConfig.registrarUri = "sip:%s" % (distAddrs,)
credentials = pj.AuthCredInfo("digest","*",ext,0,passwd,)
accountConfig.sipConfig.authCreds.append(credentials)
# Creating the account
account = pj.Account()
account.create(accountConfig)
time.sleep(1)
if account.getInfo().regIsActive == True:
call = Call(account)
prm = pj.CallOpParam(True)
prm.opt.audioCount = 1
prm.opt.videoCount = 0
call.makeCall("sip:103#192.168.1.202", prm)
time.sleep(1)
prm = pj.CallOpParam()
call.hangup(prm)
endPoint.hangupAllCalls()
# Destroy the library
def libDestroy():
endPoint.libDestroy()
def main():
pbxReg(distAddrs, ext, passwd)
libDestroy()
main()
The first obvious thing I can see is that you are using time.sleep(1) instead of endPoint.libHandleEvents(1000) which was an issue for me.
# your code
time.sleep(1)
# possible fix
endPoint.libHandleEvents(1000)
I have a TCP communication where my client continuously sends images as a byte array to a server and receives a response back, my problem is that when the server receives the images they are not done being received even though I've added a flag to indicate the end of the image.
I'd like to know a better way to ensure that the image file is received completely before receiving a new one
EDIT: My new attempt:
Client.py
import numpy as np
import cv2
from PIL import Image
import base64
import socket
def main(data):
s = socket.socket()
s.connect(("127.0.0.1", 999))
decoded_data = base64.b64decode(data)
print("Sending...")
s.sendall(decoded_data)
s.shutdown(s.SHUTWR)
b_data = b''
while True:
txt_data = s.recv(2048)
if not txt_data: break
b_data += txt_data
print('response received from the server: ' + b_data.decode())
return b_data.decode()
Server.py
import socket
from PIL import Image
import io
import numpy as np
import cv2
import uuid
IP = '127.0.0.1'
PORT = 999
with socket.socket() as s:
s.bind((IP,PORT))
s.listen(1)
count = 0
print ('The server is ready')
while True:
con, addr = s.accept()
filename = str(uuid.uuid4())
count = count + 1
img_dir = 'C:/Users/my_user/stream_images/'
img_format = '.png'
with con:
img = b''
while True:
data = con.recv(2048)
if not data:
break
img += data
image_name = img_dir+'frame'+str(count)+img_format
pil_image = io.BytesIO(img)
img = np.array(Image.open(pil_image))
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
cCode = str('Thank you for connecting')
con.sendall(cCode.encode())
print("called con.sendall")
cv2.imshow('frame', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
Currently, I am now able to fully send the images and receive them properly at the server, the only problem is that I am no longer sending a response back after the image is received, so there is something wrong with how I am receiving the reply message at the client side.
As user207421 suggested you can shutdown the socket for writing after sending the image on the client-side while still being able to receive an confirmatory answer from the server. Another problem you're facing here is the blocking nature of cv2.waitKey, which essentially halts the server until the user presses q in the cv2 window (the server will not be able to handle any other requests). I'd generally recommend to separate your network/IO logic from user interface logic. To circumvent the blocking behaviour of I've implemented a very basic image_viewer, which waits for incoming images in a thread that runs separately from the server loop by passing images through a Queue.
The client code looks as follows:
import socket
from PIL import Image
def send_image(img: Image, host: str = '127.0.0.1', port: int = 999):
s = socket.socket()
s.connect((host, port))
img_data = img._repr_png_()
s.sendall(img_data)
s.shutdown(socket.SHUT_WR) # close socket for writing, receiving is still possible
print(f'Sent {len(img_data) / 1024:,.1f} kB of image data.')
b_data = b''
while recv_data := s.recv(2048):
b_data += recv_data
print(f'Server response: {b_data.decode()}')
# maybe check server response for server side errors etc. and add return value for this function?
# use like: send_image(Image.open('test.png'))
The server code is:
import io
import queue
import socket
import threading
import cv2
import numpy as np
from PIL import Image
def image_viewer(q: queue.Queue):
while True:
try:
img_name, img = q.get(block=True, timeout=.1) # poll every 0.1 seconds
print(f'Image viewer: displaying `{img_name}`!')
cv2.imshow('Image preview', img)
except queue.Empty:
... # no new image to display
key = cv2.pollKey() # non-blocking
if key & 0xff == ord('q'):
cv2.destroyAllWindows()
print('Image viewer was closed')
return
def serve_forever(host: str, port: int, img_dir: str = 'C:/Users/my_user/stream_images/', img_format: str = '.png'):
q = queue.Queue()
img_viewer = threading.Thread(target=image_viewer, args=(q,))
img_viewer.start()
with socket.socket() as s:
s.bind((host, port))
s.listen(1)
count = 0
print('The server is ready')
while True:
conn, addr = s.accept()
count = count + 1
img_name = img_dir + 'frame' + str(count) + img_format
print (f'Client connected: {addr}')
img = b''
while data := conn.recv(2048):
img += data
conn.sendall('Thank you for connecting'.encode()) # maybe use return codes for success, error etc.?
conn.close()
pil_img = Image.open(io.BytesIO(img)) # might want to save to disk?
np_img = np.asarray(pil_img)
np_img = cv2.rotate(np_img, cv2.ROTATE_90_CLOCKWISE)
q.put((img_name, np_img))
print (f'Client at {addr} disconnected after receiving {len(img) / 1024:,.1f} kB of data.')
if __name__ == '__main__':
serve_forever('127.0.0.1', 999)
I'm been trying to create a csv file from this code, but it fails every time, I have tried different ways to place it inside the code but nothing has work so far.
I'm new to python and to Stack overflow.
If somebody can explain what I'm doing wrong it will be helpful.
Thanks in advance for any help.
from time import sleep
import os
import sys
from bleson import get_provider, Observer, UUID16
import csv
GOVEE_BT_mac_OUI_PREFIX = "A4:C1:38"
H5075_UPDATE_UUID16 = UUID16(0xEC88)
govee_devices = {}
# ###########################################################################
FORMAT_PRECISION = ".2f"
# Decode H5075 Temperature into degrees Fahrenheit
def decode_temp_in_f(encoded_data):
return format((((encoded_data / 10000) * 1.8) + 32), FORMAT_PRECISION)
# Decode H5075 percent humidity
def decode_humidity(encoded_data):
return format(((encoded_data % 1000) / 10), FORMAT_PRECISION)
#focus here
with open('temp.csv','w',newline='') as record:
record = csv.writer(record)
record.writerow(['Device Name','Device Address','Temp','Humidity'])
def print_values(mac):
govee_device = govee_devices[mac]
print(govee_device['name'],govee_device['address'],govee_device['tempInF'],govee_device['humidity'],govee_device['battery'])
record.writerow(govee_device['name'])
# On BLE advertisement callback
def on_advertisement(advertisement):
if advertisement.address.address.startswith(GOVEE_BT_mac_OUI_PREFIX):
mac = advertisement.address.address
if mac not in govee_devices:
govee_devices[mac] = {}
if H5075_UPDATE_UUID16 in advertisement.uuid16s:
# HACK: Proper decoding is done in bleson > 0.10
name = advertisement.name.split("'")[0]
encoded_data = int(advertisement.mfg_data.hex()[6:12], 16)
battery = int(advertisement.mfg_data.hex()[12:14], 16)
govee_devices[mac]["address"] = mac
govee_devices[mac]["name"] = name
govee_devices[mac]["mfg_data"] = advertisement.mfg_data
govee_devices[mac]["data"] = encoded_data
govee_devices[mac]["tempInF"] = decode_temp_in_f(encoded_data)
govee_devices[mac]["humidity"] = decode_humidity(encoded_data)
govee_devices[mac]["battery"] = battery
print_values(mac)
if advertisement.rssi is not None and advertisement.rssi != 0:
govee_devices[mac]["rssi"] = advertisement.rssi
# ###########################################################################
adapter = get_provider().get_adapter()
observer = Observer(adapter)
observer.on_advertising_data = on_advertisement
try:
while True:
observer.start()
sleep(2)
observer.stop()
except KeyboardInterrupt:
try:
observer.stop()
sys.exit(0)
except SystemExit:
observer.stop()
os._exit(0)
Error that Im getting is:
File "/home/pi/GoveeWatcher-master/python/goveeWatcher.py", line 37, in print_values
record.writerow(govee_device['name'])
ValueError: I/O operation on closed file.
I would be tempted to put the CSV writing functionality inside of the print_values function so it opens the file, writes the data, and then closes the file on each value that is found by the observer.
For example:
#focus here
def print_values(mac):
govee_device = govee_devices[mac]
print(govee_device['name'], govee_device['tempInF'])
with open('temp.csv','a',newline='') as record:
writer = csv.DictWriter(record, fieldnames=govee_device.keys())
writer.writerow(govee_device)
I recently had an interview and was asked to write a code that logs into 3 routers and reloads each, one at a time - checks that BGP is established and the interfaces are up before moving to reload the next device. We have been provided an imported module that can SSH into the router and reload it. Thoughts anyone? I'm new to Python
Though a module was provided for SSH, I started by coding it out and here is what I tired; just to give an idea of how the router works and what I am checking for:
import socket
import paramiko
def run_ssh_command(username, password, host, command):
"""
Using paramiko to SSH into remote locations and run commands
"""
port = 22
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
s.connect(host, port, username, password, look_for_keys=False, timeout=5.0)
except paramiko.AuthenticationException as e:
print("authentication failed: ", e)
s.close()
return
except socket.timeout as e:
print("socket timeout: ", e)
s.close()
return
except paramiko.SSHException as e:
print("SSH Exception: ", e)
s.close()
return
except socket.error as e:
print("socket error: ", e)
s.close()
return
(stdin, stdout, stderr) = s.exec_command(command)
print ("ran command ", command)
# for line in stdout.readlines():
# print(line)
s.close()
if __name__ == "__main__":
while True:
run_ssh_command("CompRouter", "backbonedevice", "10.10.10.25", "show ip bgp summary")
So my line of thought is to SSH to the device, issue a "show ip bgp summary" command - this is what the table typically looks like:
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down Statd
10.10.10.2 4 65536 7 7 1 0 0 00:03:04 0
The idea is to check that the InQ/OutQ values are zero or non of the BGP neighbor are "Down", then and only then do I move to reload the next router.
This is particularly where I am stuck. Given a table like this, how to I check the entire InQ/OutQ column (there might be more than one neighbor) and then take the necessary action?
Opted to use napalm and netmiko - works like a charm:
from simplecrypt import encrypt, decrypt
from pprint import pprint
from netmiko import ConnectHandler
from napalm import get_network_driver
import json
#from time import time
import time
def read_device_data(devices_filename):
devices = {} #Create a dictionary for each IP's unique info
with open(devices_filename) as device_info:
for lines in device_info:
this_dev = lines.strip().split(',')
dev = {'ipaddr': this_dev[0],
'type': this_dev[1],
'name': this_dev[2]}
devices[dev['ipaddr']] = dev
print('Displaying info for all devices below: ')
pprint(devices)
return devices
#Create a function to decrypt and read file containing credentials encrypted in the format ip,username,password
def read_device_creds(device_cred_filename, key):
print('\n Decoding encrypted device credentials....')
with open(device_cred_filename, 'rb') as device_creds_file:
device_creds_decry = decrypt(key, device_creds_file.read())
device_creds_list = json.loads(device_creds_decry.decode('utf-8'))
pprint(device_creds_list)
print('\n Displaying device credentials in dictionary format:')
"""
Convert device_creds_list to dictionary using list comprehension
"""
device_creds_dict = {this_dev[0]: this_dev for this_dev in device_creds_list}
print(device_creds_dict)
return device_creds_dict
def check_BGP(net_device, cred):
print(f"Connecting to {net_device['ipaddr']} right now to check BGP status.....")
while True:
try:
driver = get_network_driver('ios')
iosv = driver(net_device['ipaddr'], cred[1], cred[2])
iosv.open()
except:
print('Waiting to establish a socket...')
else:
time.sleep(30)
ios_output = iosv.get_bgp_neighbors()
for k,v in ios_output.items():
for y in v.values():
if type(y) == dict:
for z in y.values():
print(f"BGP peer is up? {z['is_up']}")
return z['is_up'] == True
def reload(creds):
iosv_device = {
'device_type': 'cisco_ios',
'ip': creds[0],
'username': creds[1],
'password': creds[2]
}
net_connect = ConnectHandler(**iosv_device)
output = net_connect.send_command_timing('wr mem')
time.sleep(10)
output += net_connect.send_command_timing('reload')
output += net_connect.send_command_timing('y')
print(output)
def is_alive(alive_dev, alive_cred): #check if device is back online after reload
while True:
try:
driver = get_network_driver('ios')
iosvl2 = driver(alive_dev['ipaddr'], alive_cred[1], alive_cred[2])
iosvl2.open()
except:
print(f"Attempting to reconnect to {alive_cred[0]}")
else:
alive_output = iosvl2.is_alive()
print(alive_output)
return alive_output['is_alive'] == True
break
if __name__ == '__main__':
net_devices = read_device_data('devices_data')
net_creds = read_device_creds('encrypted_device_creds', 'cisco')
# starting_time = time()
for ipadd, device_info in net_devices.items():
print(net_devices.items())
while True:
print (f'Connecting to: {ipadd}')
if check_BGP(device_info, net_creds[ipadd]) == True:
print(f'Reloading {ipadd} now')
reload(net_creds[ipadd])
else:
print(f'Re-checking BGP on {ipadd}')
if is_alive(device_info, net_creds[ipadd]) == True and check_BGP(device_info, net_creds[ipadd]) == True:
print(f'{ipadd} back online and BGP OK!')
break
else:
print('Router down or BGP failed to reconverged; exiting script')
break
# print ('\n---- End get config sequential, elapsed time=', time() - starting_time)
In the example below, I wrote a code that detects BGP route limits. Its purpose is to calculate the route limit rate by learning the information under the Interfaces. In this regard, I recommend the TTP module, where you can create your own templates.
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
from ttp import ttp
from genie.testbed import load
from pprint import pprint
import json
import time
from multiprocessing.dummy import Pool as ThreadPool
from netmiko import Netmiko
#**************************************************************************************************************************
with open("user_pass.txt", "r") as f5:
user_pass = f5.readlines()
for list_user_pass in user_pass:
if "username" in list_user_pass:
username = list_user_pass.split(":")[1].strip()
if "password" in list_user_pass:
password = list_user_pass.split(":")[1].strip()
def _ssh_(nodeip):
try:
huawei = {
'device_type': 'huawei', 'ip': nodeip, 'username':
username, 'password': password, }
con = Netmiko(**huawei)
print(nodeip.strip() + " " + "basarili giris")
except Exception as e:
print(e)
f_3.write(nodeip.strip() + "\n")
return
#**************************************************************************************************************************
data_to_parse_0 = con.send_command_timing('display ip vpn-instance | ignore-case i Customer_A')
print(data_to_parse_0)
ttp_template_0 ="""
{{Customer_Name}} {{nodeip}} {{IPV4}}
"""
parser_0 = ttp(data=data_to_parse_0, template=ttp_template_0)
parser_0.parse()
#print result in JSON format
results_0 = parser_0.result(format='json')[0]
print(results_0)
#str to list **convert with json.loads
result_0 = json.loads(results_0)
print(result_0[0]["Customer_Name"])
#**************************************************************************************************************************
data_to_parse = con.send_command_timing("display current-configuration configuration vpn-instance {}".format(result_0[0]["Customer_Name"]))
print(data_to_parse)
ttp_template ="""
{{routing-table}} limit {{ total_number | DIGIT }} {{total_number2}}
"""
parser = ttp(data=data_to_parse, template=ttp_template)
parser.parse()
#print result in JSON format
results = parser.result(format='json')[0]
print(results)
#str to list **convert with json.loads
result = json.loads(results)
print(result)
#**************************************************************************************************************************
data_to_parse_2 = con.send_command_timing('dis ip routing-table vpn-instance' + " " + result_0[0]["Customer_Name"] + " " + " statistics | i Summary Prefixes")
print(data_to_parse_2)
ttp_template_2 ="""
Summary Prefixes : {{ used_number | DIGIT }}
"""
parser2 = ttp(data=data_to_parse_2, template=ttp_template_2)
parser2.parse()
#print result in JSON format
results2 = parser2.result(format='json')[0]
print(results2)
#str to list **convert with json.loads
result2 = json.loads(results2)
print(result2[0]["used_number"])
#**************************************************************************************************************************
result3 = (int(result2[0]["used_number"]) / int(result[0]["total_number"])) * 100
print(int(result3))
with open("vrf_limit_result.txt", "a") as f:
f.write("Customer_Result" +"_" + nodeip +"==>" + str(result3)+ "\n")
f.close()
#**************************************************************************************************************************
f_2 = open("ip_list.txt", "r")
ip_list = f_2.readlines()
f_2.close()
f_3 = open("Ssh_unconnected_2.txt", "w")
# Therading method
myPool = ThreadPool(100)
result = myPool.map(_ssh_, ip_list)
I try to make Async ping process using subprocess.Popen , I try to understand how i implement it in this case
aList = []
async def sn(frm, to):
i = 0
for i in list(range(frm, to)):
aList.append(i)
cmd = "ping -n 1 " + '10.0.0.'
coroutines = [subprocess.Popen(cmd + str(i), stdout=subprocess.PIPE) for i in aList]
results = await asyncio.gather(*coroutines)
print(results)
loop = asyncio.get_event_loop()
loop.run_until_complete(sn(frm, to))
loop.close()
You can find simpler code for pinging host without async-await. But if necessary you can try the following working example to ping with async-await
import platform
import subprocess
import aiohttp
import asyncio
async def getPingedHost(host, netTimeout=3):
""" Description: Function to ping a host and get string of outcome or False
Import: from shared.getPingedHost import getPingedHost
Testing: python -m shared.getPingedHost
"""
args = ['ping']
platformOs = platform.system().lower()
if platformOs == 'windows':
args.extend(['-n', '1'])
args.extend(['-w', str(netTimeout * 1000)])
elif platformOs in ('linux', 'darwin'):
args.extend(['-c', '1'])
args.extend(['-W', str(netTimeout)])
else:
raise NotImplemented('Unsupported OS: {}'.format(platformOs))
args.append(host)
output = ''
try:
outputList = []
if platformOs == 'windows':
output = subprocess.run(args, check=True, universal_newlines=True,
stdout=subprocess.PIPE, # Capture standard out
stderr=subprocess.STDOUT, # Capture standard error
).stdout
outputList = str(output).split('\n')
if output and 'TTL' not in output:
output = False
else:
subprocess.run(args, check=True)
output = outputList[2]
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
output = False
return output
async def main():
async with aiohttp.ClientSession() as client:
output = await getPingedHost('google.com')
print(output)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
class rscan(object):
state = {'online': [], 'offline': []} # Dictionary with list
ips = [] # Should be filled by function after taking range
# Amount of pings at the time
thread_count = 8
# Lock object to prevent race conditions
lock = threading.Lock()
# Using Windows ping command
def ping(self, ip):
answer = subprocess.call(['ping','-n','1',ip],stdout = open('1.txt','w'))
return answer == 0 and ip
def pop_queue(self):
ip = None
self.lock.acquire() # lock !!!
if self.ips:
ip = self.ips.pop()
self.lock.release()
return ip
def noqueue(self):
while True:
ip = self.pop_queue()
if not ip:
return None
result = 'online' if self.ping(ip) else 'offline'
self.state[result].append(ip) ### check again
def start(self):
threads = []
for i in range(self.thread_count):
t = threading.Thread(target=self.noqueue)
t.start()
threads.append(t)
# Wait for all threads
[ t.join() for t in threads ]
return self.state
def rng(self, frm, to, ip3):
self.frm = frm
self.to = to
self.ip3 = ip3
for i in range(frm, to):
ip = ip3 + str(i)
self.ips.append(ip)
if __name__== '__main__':
scant = rscan()
scant.thread_count = 8
edited a bit class i have found also used threads instead of Async & await
Credit: http://blog.boa.nu/2012/10/python-threading-example-creating-pingerpy.html