Difficulty inputting training dataset with python code - python-3.x

Please, I need help with this. Tried almost everything I know and nothing is working.
So I am working on carrying out performance evaluation on dendritic cell algorithm used to detect anomalies by accepting input signals(PAMP, Danger signals, Safe Signals) the project is based on using parameter tuning to carry out this analysis. Changing the algorithm parameters and evaluate each result.
Got the algorithm's ruby code from the book "clever algorithms" and converted it to python.
My problem is that I am finding it difficult inputting my dataset on the python code, I am using the Nsl-kdd'cup dataset.
Also, help on using this with Matlab will be much appreciated.
Please, I need help.
The python code
from random import randrange,random,randint
def rand_in_bounds(min,max):
return randrange(min, max)
# def random_vector(search_space): unused function
# arr=np.zeroes(5)
# return arr
def construct_pattern(class_label,domain,p_safe,p_danger):
set_=domain[class_label]
selection=randrange(len(set_))
pattern={}
pattern["class_label"]=class_label
pattern["input_"]=set_[selection]
pattern["safe"]=(random()*p_safe*100)
pattern["danger"]=(random()*p_danger*100)
return pattern
def generate_pattern(domain,p_anomaly,p_normal,prob_create_anom=0.5):
pattern=None
if random() < prob_create_anom:
pattern=construct_pattern("Anomaly",domain,1-p_normal,p_anomaly)
print(">Generated Anomaly",pattern["input_"])
else:
pattern=construct_pattern("Normal",domain,p_normal,1-p_anomaly)
return pattern
def initialize_cell(thresh):
cell={}
cell["lifespan"]=1000
cell["k"]=0
cell["cms"]=0
cell["migration_threshold"]=thresh[0]+((thresh[1]-thresh[0])*random())
cell["antigen"]={}
# print(cell)
return cell
def store_antigen(cell,input_):
cell["antigen"].setdefault(input_,0)
cell["antigen"][input_]+=1
def expose_cell(cell,cms,k,pattern,threshold):
cell["cms"]+=cms
cell["k"]+=k
cell["lifespan"]-=cms
store_antigen(cell,pattern["input_"])
if cell["lifespan"]<=0 : cell= initialize_cell(threshold)
def can_cell_migrate(cell):
return (cell["cms"]>=cell["migration_threshold"]) and
(len((cell["antigen"]))!=0)
def expose_all_cells(cells,pattern,threshold):
migrate=[]
cms=(pattern["safe"]+pattern["danger"])
k=pattern["danger"]-(pattern["safe"]*2)
for cell in cells:
expose_cell(cell,cms,k,pattern,threshold)
if can_cell_migrate(cell):
cell["class_label"]=("Anomaly" if cell["k"]>0 else "Normal")
migrate.append(cell)
print("________________________")
return migrate
def train_system(domain,max_iter,num_cells,p_anomaly,p_normal,thresh):
immature_cells=[]
immature_cells=[initialize_cell(thresh) for x in range(num_cells)]
migrated=[]
c=0
for x in range(max_iter):
pattern=generate_pattern(domain,p_anomaly,p_normal)
migrants=expose_all_cells(immature_cells,pattern,thresh)
for cell in migrants:
immature_cells=[ x for x in immature_cells if x is not cell]
immature_cells.append(initialize_cell(thresh))
migrated.append(cell)
c+=1
print(f'> iter= {c} new={len(migrants)} migrated={len(migrated)}')
return migrated
def classify_pattern(migrated,pattern):
input_=pattern["input_"]
num_cells,num_antigen=0,0
for cell in migrated:
if ((cell["class_label"]=="Anomaly") and (input_ in cell["antigen"])):
num_cells+=1
num_antigen+=cell["antigen"][input_]
if num_antigen==0:
return "Normal"
mcav=num_cells/num_antigen
return "Anomaly" if mcav>0.5 else "Normal"
def test_system(migrated,domain,p_anomaly,p_normal,num_trial=100):
correct_norm=0
for _ in range(num_trial):
pattern=construct_pattern("Normal",domain,p_normal,1-p_anomaly)
class_label=classify_pattern(migrated,pattern)
correct_norm += 1 if class_label=="Normal" else 0
print(f"Finished testing Normal inputs {correct_norm}/{num_trial}")
correct_anom=0
for _ in range(num_trial):
pattern=construct_pattern("Anomaly",domain,1-p_normal,p_anomaly)
class_label=classify_pattern(migrated,pattern)
correct_anom += 1 if class_label=="Anomaly" else 0
print(f"Finished testing Anomaly inputs {correct_anom}/{num_trial}")
return [correct_norm,correct_anom]
def execute(domain,max_iter,num_cells,p_anom,p_norm,thresh):
migrated=train_system(domain,max_iter,num_cells,p_anom,p_norm,thresh)
test_system(migrated,domain,p_anom,p_norm)
return migrated
if __name__ =="__main__":
# problem configuration
domain = {}
domain["Normal"]=[x for x in range(1,51)]
domain["Anomaly"]=[x*10 for x in range(1,6)]
domain["Normal"]=[x for x in domain["Normal"] if x not in domain["Anomaly"]]
p_anomaly=0.70
p_normal=0.95
#algorithm configuration
iterations=100
num_cells=10
thresh=[5,15]
execute(domain,iterations,num_cells,p_anomaly,p_normal,thresh)

Related

Multicall taking 300 seconds to return result

I am using python multicall library (https://github.com/banteg/multicall.py) to get the ERC20 balances with multiple wallet addresses at once with multiprocessing.
Once the process starts multicall returns the result in less than 1 sec, but once this process continues & run for hours it starts giving results in more than 1 min sometime it takes up to 300 secs too.
Can anyone answer the reason behind the latency in response by time.
Below is the code sample:
block_number = 11374651
GET_BALANCE_FN = "balanceOf(address)(uint256)"
def call_back_obj(success, value):
"""
Callback to process results from multicall for a function.
If call fails returns False (if changed to string throws error)
"""
if success is True and type(value) == bytes:
return value.decode("utf-8")
elif success is True:
return value
else:
return False
def get_instance():
web3_instance = Web3(
Web3.HTTPProvider(
node_provider,
request_kwargs={"timeout": 10},
)
)
web3_instance.middleware_onion.inject(geth_poa_middleware, layer=0)
return web3_instance
w3 = get_instance()
def token_balance_handler(addresses, block_number=None):
calls = []
for address_map in addresses:
contract_addr = address_map.get("tokenAddress")
wallet_addr = address_map.get("walletAddress")
calls.append(
Call(
contract_addr,
[GET_BALANCE_FN, (wallet_addr)],
[[f"{wallet_addr}-{contract_addr}", call_back_obj]],
)
)
return Multicall(
calls, _w3=w3, block_id=block_number, require_success=False
)
print(token_balance_handler(addresses, block_number)())

python function with modular variable

hie,
I'm writing my first big python program (3.8) and I try to use a function for several uses (same work but with different targets from existing attributes)
I hope it's clear enough.
here the wanted Job :
it's inside a QT5 GUI (QApplication)
class GuiSuperQuizz(QWidget, QApplication):
...
...
def ajout_pts_blindtest(self, nbr):
x = nbr
x = str(x)
eval("team" + x).ajou_pts(int(self.point_blindtest))
eval("self.score_equip_" + x).setText(str(eval("team" + x).point)) # bug is here
eval("self.gest_score_equip_" + x).setText(str(eval("team" + x).point))
print(eval("team" + x).point)
self.continu[0] = False
self.en_pause[0] = False
self.records_scores()
The interpreter do not recognize the attribute "score_equip_1" and give me an error
AttributeError: 'GuiSuperQuizz' object has no attribute 'score_equip_1'
Yet, I know that attribute works well with this other function that work fine
def ajout_pts_rap_team1(self):
team1.ajou_pts(int(self.point_rap))
self.score_equip_1.setText(str(team1.point))
self.gest_score_equip_1.setText(str(team1.point))
print(team1.point)
self.continu[0] = False
self.en_pause[0] = False
self.aff_ligne4()
self.records_scores()
For not writing 4 functions to target 4 variables that are just incremented (it's a Quizz game with 4 players) I try try to concatenate in 1 function that arrange targets.
if I test the same logic on a very simple lines that works:
test1 = 456
def test(nbr):
x = nbr
x=str(x)
print(eval("test"+x))
test(1)
456
If anyone got some explanations ....

PaddingFIFOQueue.enqueue_many throwing Value Error: Shapes must be equal rank

I have an RNN model, to which I'm trying to feed in varying length input sequences through input pipelines randomly sampling from multiple TF record files containing serialized SequenceExamples, batch padding and shuffling across multiple batches
each sequence example has 3 elements, as length: constant, input:1-D array, labels:1-D array
Procedure is as follows
def read_file_queue(self,filename_queue):
reader = tf.TFRecordReader()
key, ex = reader.read(filename_queue)
context_features = {
"seq-len": tf.FixedLenFeature([],dtype=tf.int64)
}
sequence_features = {
"tokens": tf.FixedLenSequenceFeature([],dtype=tf.int64),
"labels": tf.FixedLenSequenceFeature([],dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(serialized=ex,
context_features=context_features,
sequence_features=sequence_features)
return context_parsed["seq-len"], sequence_parsed["tokens"],sequence_parsed["labels"]
def get_batch_data(self):
fqueue = tf.train.string_input_producer(self.data_filelist,
shuffle=True,
num_epochs=self.num_epochs)
# read from multiple tf records as defined by read_threads
ex = [self.read_file_fmt(fqueue) for _ in range(self.read_threads)]
print(ex)
# ex = self.read_file_fmt(fqueue)
pad_output = self.padding_pipeline(ex)
shuffle_output = self.shuffle_pipeline(pad_output)
return shuffle_output
def padding_pipeline(self, input):
padding_queue = tf.PaddingFIFOQueue(
capacity=self.pad_capacity,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=[[], [None], [None]])
# use enqueue_many instead enqueue because
# the input is list of tuples from each tf record reader thread
padding_enqueue_op = padding_queue.enqueue_many(input) # <<< !!!!! error here !!!!!
padding_queue_runner = tf.train.QueueRunner(padding_queue, [padding_enqueue_op] * self.pad_threads)
tf.train.add_queue_runner(padding_queue_runner)
padding_dequeue_op = padding_queue.dequeue_up_to(self.batch_size)
return padding_dequeue_op
def shuffle_pipeline(self,input):
shuffle_queue = tf.RandomShuffleQueue(
capacity=self.shuffle_capacity,
min_after_dequeue=self.shuffle_min_after_dequeue,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=None)
shuffle_enqueue_op = shuffle_queue.enqueue(input)
shuffle_queue_runner = tf.train.QueueRunner(
shuffle_queue, [shuffle_enqueue_op] * self.shuffle_threads)
tf.train.add_queue_runner(shuffle_queue_runner)
shuffle_dequeue_op = shuffle_queue.dequeue()
return shuffle_dequeue_op
For which I'm getting the following error:
ValueError: Shapes must be equal rank, but are 0 and 1 From merging
shape 0 with other shapes. for
'padding_fifo_queue_EnqueueMany/component_0' (op: 'Pack') with input
shapes: [], [?], [?].
I'm sure I'm doing something silly here, however, I could not find what is that im doing wrong..
Taking a hint from here, maybe you should have the following?
padding_queue = tf.PaddingFIFOQueue(
capacity=self.pad_capacity,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=[None, [None], [None]])
By the way, if you could add some basic script for generating random data in the format you are using, it would be easier to replicate. Thanks.

Assign Class attributes from list elements

I'm not sure if the title accurately describes what I'm trying to do. I have a Python3.x script that I wrote that will issue flood warning to my facebook page when the river near my home has reached it's lowest flood stage. Right now the script works, however it only reports data from one measuring station. I would like to be able to process the data from all of the stations in my county (total of 5), so I was thinking that maybe a class method may do the trick but I'm not sure how to implement it. I've been teaching myself Python since January and feel pretty comfortable with the language for the most part, and while I have a good idea of how to build a class object I'm not sure how my flow chart should look. Here is the code now:
#!/usr/bin/env python3
'''
Facebook Flood Warning Alert System - this script will post a notification to
to Facebook whenever the Sabine River # Hawkins reaches flood stage (22.3')
'''
import requests
import facebook
from lxml import html
graph = facebook.GraphAPI(access_token='My_Access_Token')
river_url = 'http://water.weather.gov/ahps2/river.php?wfo=SHV&wfoid=18715&riverid=203413&pt%5B%5D=147710&allpoints=143204%2C147710%2C141425%2C144668%2C141750%2C141658%2C141942%2C143491%2C144810%2C143165%2C145368&data%5B%5D=obs'
ref_url = 'http://water.weather.gov/ahps2/river.php?wfo=SHV&wfoid=18715&riverid=203413&pt%5B%5D=147710&allpoints=143204%2C147710%2C141425%2C144668%2C141750%2C141658%2C141942%2C143491%2C144810%2C143165%2C145368&data%5B%5D=all'
def checkflood():
r = requests.get(river_url)
tree = html.fromstring(r.content)
stage = ''.join(tree.xpath('//div[#class="stage_stage_flow"]//text()'))
warn = ''.join(tree.xpath('//div[#class="current_warns_statmnts_ads"]/text()'))
stage_l = stage.split()
level = float(stage_l[2])
#check if we're at flood level
if level < 22.5:
pass
elif level == 37:
major_diff = level - 23.0
major_r = ('The Sabine River near Hawkins, Tx has reached [Major Flood Stage]: #', stage_l[2], 'Ft. ', str(round(major_diff, 2)), ' Ft. \n Please click the link for more information.\n\n Current Warnings and Alerts:\n ', warn)
major_p = ''.join(major_r)
graph.put_object(parent_object='me', connection_name='feed', message = major_p, link = ref_url)
<--snip-->
checkflood()
Each station has different 5 different catagories for flood stage: Action, Flood, Moderate, Major, each different depths per station. So for Sabine river in Hawkins it will be Action - 22', Flood - 24', Moderate - 28', Major - 32'. For the other statinos those depths are different. So I know that I'll have to start out with something like:
class River:
def __init__(self, id, stage):
self.id = id #station ID
self.stage = stage #river level'
#staticmethod
def check_flood(stage):
if stage < 22.5:
pass
elif stage.....
but from there I'm not sure what to do. Where should it be added in(to?) the code, should I write a class to handle the Facebook postings as well, is this even something that needs a class method to handle, is there any way to clean this up for efficiency? I'm not looking for anyone to write this up for me, but some tips and pointers would sure be helpful. Thanks everyone!
EDIT Here is what I figured out and is working:
class River:
name = ""
stage = ""
action = ""
flood = ""
mod = ""
major = ""
warn = ""
def checkflood(self):
if float(self.stage) < float(self.action):
pass
elif float(self.stage) >= float(self.major):
<--snip-->
mineola = River()
mineola.name = stations[0]
mineola.stage = stages[0]
mineola.action = "13.5"
mineola.flood = "14.0"
mineola.mod = "18.0"
mineola.major = "21.0"
mineola.alert = warn[0]
hawkins = River()
hawkins.name = stations[1]
hawkins.stage = stages[1]
hawkins.action = "22.5"
hawkins.flood = "23.0"
hawkins.mod = "32.0"
hawkins.major = "37.0"
hawkins.alert = warn[1]
<--snip-->
So from here I'm tring to stick all the individual river blocks into one block. What I have tried so far is this:
class River:
... name = ""
... stage = ""
... def testcheck(self):
... return self.name, self.stage
...
>>> for n in range(num_river):
... stations[n] = River()
... stations[n].name = stations[n]
... stations[n].stage = stages[n]
...
>>> for n in range(num_river):
... stations[n].testcheck()
...
<__main__.River object at 0x7fbea469bc50> 4.13
<__main__.River object at 0x7fbea46b4748> 20.76
<__main__.River object at 0x7fbea46b4320> 22.13
<__main__.River object at 0x7fbea46b4898> 16.08
So this doesn't give me the printed results that I was expecting. How can I return the string instead of the object? Will I be able to define the Class variables in this manner or will I have to list them out individually? Thanks again!
After reading many, many, many articles and tutorials on class objects I was able to come up with a solution for creating the objects using list elements.
class River():
def __init__(self, river, stage, flood, action):
self.river = river
self.stage = stage
self.action = action
self.flood = flood
self.action = action
def alerts(self):
if float(self.stage < self.flood):
#alert = "The %s is below Flood Stage (%sFt) # %s Ft. \n" % (self.river, self.flood, self.stage)
pass
elif float(self.stage > self.flood):
alert = "The %s has reached Flood Stage(%sFt) # %sFt. Warnings: %s \n" % (self.river, self.flood, self.stage, self.action)
return alert
'''this is the function that I was trying to create
to build the class objects automagically'''
def riverlist():
river_list = []
for n in range(len(rivers)):
station = River(river[n], stages[n], floods[n], warns[n])
river_list.append(station)
return river_list
if __name__ == '__main__':
for x in riverlist():
print(x.alerts())

scapy - how to display bit fields flags as on off?

scapy provides with FlagsField which show as
flag1+flag2+flag5
Can this be printed as ?
flag1=on
flag2=on
flag3=off
flag4=off
flag5=on
flagField take array-
but class can be a enum/dictionary which indexes a bit position.
worked it
class CFlagsField(FlagsField):
def i2repr(self, pkt, x):
flgs = []
for (k, v) in self.names.iteritems():
tf = 'True' if x & (1<<k) else 'False'
flgs.append('{:<20}={:^7}'.format(v, tf))
return "\n".join(flgs)

Resources