i have created simple threading function but i am getting error that 0 arguments are passed even though I have passed all 6 arguments. I have tried both using args and kwargs and still I have having same error
below is my code
import time
import datetime
import threading
def get_time():
datestart = datetime.datetime.now() - datetime.timedelta(minutes = 60)
dateend = datetime.datetime.now()
timeprevious = int(time.mktime(datestart.timetuple()) * 1000)
timenow = int(time.mktime(dateend.timetuple()) * 1000)
return timeprevious, timenow
def customer(src_ip,dst_ip,host_ip,index_name,timeprevious,timenow):
print(src_ip)
print(dst_ip)
print(host_ip)
print(index_name)
print(timeprevious)
print(timenow)
host_name = ['host_name1', 'host_name2', 'host_name3']
host_ip = ['host_ip1', 'host_ip2', 'host_ip3']
index_name = ['index_name1',' index_name2', 'index_name3']
src_ip = ['Src_IP', 'source_ip', 'SourceAddress']
dst_ip = ['Dst_IP', 'destination_ip', 'DestinationAddress']
timeprevious, timenow = get_time()
threads = []
for i in range(len(host_name)):
try:
# t = threading.Thread(target=customer(), args=(src_ip[i],dst_ip[i],host_ip[i],index_name[i],timeprevious,timenow))
t = threading.Thread(target=customer(), kwargs={'src_ip': src_ip[i],'dst_ip':dst_ip[i],'host_ip': host_ip[i],'index_name': index_name[i],'timeprevious': timeprevious,'timenow': timenow })
threads.append(t)
t.start()
except Exception as e:
print('error ' + host_name[i])
print(e)
for t in threads:
t.join()
This is the error I am having
customer() takes exactly 6 arguments (0 given). You can see in the comment that I have also used kwargs to solve the error still I am having no luck. Also is this the correct way to pass multiple arguments?
You have an issue on the following line:
t = threading.Thread(target=customer(), kwargs={'src_ip': src_ip[i],'dst_ip':dst_ip[i],'host_ip': host_ip[i],'index_name': index_name[i],'timeprevious': timeprevious,'timenow': timenow })*emphasized text*
target=customer() sets the value of the target function to the return value of calling customer(). To pass a reference to the customer function use target=customer
Related
Here I am trying to get only two fields from cassandra using only() but when I am passing field names it's giving me above error, I have also tried passing self but did't work. After getting those two fields I need to convert them in two arrays so that I can plot the graph with names and marks.
Here is the code:
from flask import *
from flask_cqlalchemy import CQLAlchemy
app = Flask(__name__)
app.config['CASSANDRA_HOSTS'] = ['127.0.0.1']
app.config['CASSANDRA_KEYSPACE'] = "emp"
db = CQLAlchemy(app)
class Student(db.Model):
uid = db.columns.Integer(primary_key=True)
marks = db.columns.Integer(primary_key=True)
username = db.columns.Text(required=True)
password = db.columns.Text()
#app.route('/meriting')
def show_meritlist():
ob = Student.objects().filter().only(Student.marks, Student.username)
ob = ob.filter(Student.marks >= 65).allow_filtering()
return render_template('merit.html', ml = ob)
db.sync_db()
if __name__ == '__main__':
app.run(debug = True)
only() takes only one parameter that should be an iterable. Try:
ob = Student.objects().filter().only((Student.marks, Student.username))
Running the following code and trying to thread a batch of calls through google cloud-speech using python I get the following error: IndexError: list index out of range.
I cannot figure out where it is falling down, it works is small batches of 2 threads and 10 files. But when I scale beyond that I get the following error. Any help would be very appreciated.
def transcribe_c_gcs(gcs_uri):
from google.cloud import speech_v1p1beta1 as speech
client = speech.SpeechClient()
audio = speech.types.RecognitionAudio(uri=gcs_uri)
config = speech.types.RecognitionConfig(
language_code='en-US',
enable_word_time_offsets=True,
enable_automatic_punctuation=True
)
operation = client.long_running_recognize(config, audio)
reading = operation.result(timeout=90000)
transcript_dict = {'Word':[], 'start_time': [], 'end_time':[]}
for result in reading.results:
alternative = result.alternatives[0]
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
transcript_dict['Word'].append(word)
transcript_dict['start_time'].append(
start_time.seconds + start_time.nanos * 1e-9)
transcript_dict['end_time'].append(
end_time.seconds + end_time.nanos * 1e-9)
comp = pd.DataFrame(transcript_dict)
comp['id'] = [gcs_uri.split('_')[2] for _ in range(len(comp.index))]
comp['version'] = [gcs_uri.split('_')[3] for _ in range(len(comp.index))]
return comp
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=2) as executor:
results = executor.map(transcribe_c_gcs, filelist)
asyncres = pd.concat(results, axis=0)
asyncres.to_csv('out.csv')
I wanted to write small program that would symulate for me lottery winning chances. After that i wanted to make it a bit faster by implemening multiprocessing like this
But two weird behaviors started
import random as r
from multiprocessing.pool import ThreadPool
# winnerSequence = []
# mCombinations = []
howManyLists = 5
howManyTry = 1000000
combinations = 720/10068347520
possbilesNumConstantsConstant = []
for x in range(1, 50):
possbilesNumConstantsConstant.append(x)
def getTicket():
possbilesNumConstants = list(possbilesNumConstantsConstant)
toReturn = []
possiblesNum = list(possbilesNumConstants)
for x in range(6):
choice = r.choice(possiblesNum)
toReturn.append(choice)
possiblesNum.remove(choice)
toReturn.sort()
return toReturn
def sliceRange(rangeNum,num):
"""returns list of smaller ranges"""
toReturn = []
rest = rangeNum%num
print(rest)
toSlice = rangeNum - rest
print(toSlice)
n = toSlice/num
print(n)
for x in range(num):
toReturn.append((int(n*x),int(n*(x+1)-1)))
print(toReturn,"<---range")
return toReturn
def Job(tupleRange):
"""Job returns list of tickets """
toReturn = list()
print(tupleRange,"Start")
for x in range(int(tupleRange[0]),int(tupleRange[1])):
toReturn.append(getTicket())
print(tupleRange,"End")
return toReturn
result = list()
First one when i add Job(tupleRange) to pool it looks like job is done in main thread before another job is added to pool
def start():
"""this fun() starts program"""
#create pool of threads
pool = ThreadPool(processes = howManyLists)
#create list of tuples with smaller piece of range
lista = sliceRange(howManyTry,howManyLists)
#create list for storing job objects
jobList = list()
for tupleRange in lista:
#add job to pool
jobToList = pool.apply_async(Job(tupleRange))
#add retured object to list for future callback
jobList.append(jobToList)
print('Adding to pool',tupleRange)
#for all jobs in list get returned tickes
for job in jobList:
#print(job.get())
result.extend(job.get())
if __name__ == '__main__':
start()
Consol output
[(0, 199999), (200000, 399999), (400000, 599999), (600000, 799999), (800000, 999999)] <---range
(0, 199999) Start
(0, 199999) End
Adding to pool (0, 199999)
(200000, 399999) Start
(200000, 399999) End
Adding to pool (200000, 399999)
(400000, 599999) Start
(400000, 599999) End
and second one when i want to get data from thread i got this exception on this line
for job in jobList:
#print(job.get())
result.extend(job.get()) #<---- this line
File "C:/Users/CrazyUrusai/PycharmProjects/TestLotka/main/kopia.py", line 79, in start
result.extend(job.get())
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 644, in get
raise self._value
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
TypeError: 'list' object is not callable
Can sombody explain this to me?(i am new to multiprocessing)
The problem is here:
jobToList = pool.apply_async(Job(tupleRange))
Job(tupleRange) executes first, then apply_async gets some returned value, list type (as Job returns list). There are two problems here: this code is synchronous and async_apply gets list instead of job it expects. So it try to execute given list as a job but fails.
That's a signature of pool.apply_async:
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
...
So, you should send func and arguments args to this function separately, and shouldn't execute the function before you will send it to the pool.
I fix this line and your code have worked for me:
jobToList = pool.apply_async(Job, (tupleRange, ))
Or, with explicitly named args,
jobToList = pool.apply_async(func=Job, args=(tupleRange, ))
Don't forget to wrap function arguments in tuple or so.
I'm new to using the asyncio module. I have the following code that is querying a service to return IDs. How do I set a variable to return the results from the 'findIntersectingFeatures' function?
Also, how can I have the print statements execute after run_in_executor has finished. They are currently printing immediately after the first iteration.
import json, requests, time
import asyncio
startTime = time.clock()
out_json = "UML10kmbuffer.json"
intersections = []
def findIntersectingFeatures(coordinate):
coordinates = '{"rings":' + str(coordinate) + '}'
forestCoverURL = 'http://server1.ags.com/server/rest/services/Forest_Cover/MapServer/0/query'
params = {'f': 'json', 'where': "1=1", 'outFields': '*', 'geometry': coordinates, 'geometryType': 'esriGeometryPolygon', 'returnIdsOnly': 'true'}
r = requests.post(forestCoverURL, data = params, verify=False)
response = json.loads(r.content)
if response['objectIds'] != None:
intersections.append(response['objectIds'])
return intersections
with open(out_json, "r") as f_in:
for line in f_in:
json_res = json.loads(line)
coordinates = []
# Get features
feat_json = json_res["features"]
for item in feat_json:
coordinates.append(item["geometry"]["rings"])
loop = asyncio.get_event_loop()
for coordinate in coordinates:
loop.run_in_executor(None, findIntersectingFeatures, coordinate)
print("Intersecting Features: " + str(intersections))
endTime = time.clock()
elapsedTime =(endTime - startTime) / 60
print("Elapsed Time: " + str(elapsedTime))
To use asyncio, you shouldn't just get the event loop, you must also run it. You can use run_until_complete to run a coroutine to completion. Since you need to run many coroutines in parallel, you can use asyncio.gather to combine them into a single parallel task:
coroutines = []
for coordinate in coordinates:
coroutines.append(loop.run_in_executor(
None, findIntersectingFeatures, coordinate))
intersections = loop.run_until_complete(asyncio.gather(*coroutines))
Also, how can I have the print statements execute after run_in_executor has finished.
You can await the call to run_in_executor and place your print after it:
def find_features(coordinate):
inter = await loop.run_in_executor(None, findInterestingFeatures, coordinate)
print('found', inter)
return inter
# in the for loop, replace coroutines.append(loop.run_in_executor(...))
# with coroutines.append(find_features(coordinate)).
I have a single threaded program that simply executes commands over ssh and simply looks for output over ssh. But after a while I start getting extrodinarily strange behaviour:
ssh_cmd = 'ssh %s#%s %s' % (user, addr, options)
ssh = pexpect.spawn(ssh_cmd, timeout=60)
lgsuc = ['(?i)(password)')]
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
for cmd in cmdlist:
time.sleep(0.1)
#this is regex to match anything. Essentially clears the buffer so you don't get an invalid match from before
ssh.expect(['(?s).*'])
ssh.sendline(cmd)
foundind = ssh.expect([re.escape("root#")], 30) #very slow
#repr escape all the wierd stuff so madness doesn't happen with ctr chars
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
print("We Found:" + rettxt
And it will be fine for about 20 commands or so then madness occurs Assume the right echo is blablabla each time:
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
We found 'blablabla \r\n\r\n[edit]\r\nroot#'
... about 20 commands...
We found 'bl\r\nroot#' # here it just missed part of the string in the middle
We found 'alala\r\nroot#'
here is the remainder of the echo from the previous command!!! and the echo of the current command will show up some time later!! and it gets worse and worse. The thing that is strange is it is in the middle of the return byte array.
Now there are some wierd control codes coming back from this device so if I replace:
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
with
rettxt = repr(ssh.before.decode("us-ascii") + "root:#")
then
print("We Found:" + rettxt)
returns:
root#e Found lala
Anyway there is really strange stuff going on with pexpect and the buffers, and I can't figure out what it is so any help would be appreciated. I should mention I never get teh timeout, the dive always responds. Also the total number of "root:#"in the log file exceedes the total number of lines sent.
If I go through and remove all ctl codes, the output looks cleaner but the problem still persists, its as if pextect cant handle ctl coodes in its buffer correctly. Any help is appreciated
UPDATE Minimum verifiable example
Ok I have been able to recreate PART of the problem on an isolated ubuntu environment sshing into itself.
first I need to create 4 commands that can be run on a host target, so put the follwing for files in ~/ I did this in ubuntu
~/check.py
#!/usr/bin/python3
import time
import io
#abcd^H^H^H^H^MABC
#mybytes = b'\x61\x62\x63\x64\x08\x08\x08\x0D\x41\x42\x43'
#abcdACB
mybytes = b'\x61\x62\x63\x64\x41\x42\x43'
f = open('test.txt', 'wb')
#time.sleep(1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
~/check2.py
#!/usr/bin/python3
import time
import io
#0123^H^H^H^H^MABC
mybytes = b'\x30\x31\x32\x33\x08\x0D\x0D\x08\x08\x08\x08\x0D\x41\x42\x43'
f = open('test2.txt', 'wb')
#time.sleep(0.1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
~/check3.py
#!/usr/bin/python3
import time
import io
#789:^H^H^H^H^DABC
mybytes = b'\x37\x38\x39\x3A\x08\x08\x08\x08\x08\x08\x08\x0D\x0D\x41\x42\x43'
f = open('test3.txt', 'wb')
#time.sleep(0.1)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
And lastly check4.py Sorry it took a wierd combination for the problem to show back up
#!/usr/bin/python3
import time
import io
#abcd^H^H^H^HABC
mybytes = b'\x61\x62\x63\x64\x08\x08\x08\x0D\x41\x42\x43'
f = open('test.txt', 'wb')
time.sleep(4)
f.write(mybytes)
print(mybytes.decode('ascii'))
f.close()
Noticing that the last one has a bigger sleep, this is to encounter texpect timeout. Though on my actual testing this doesn't occue, I have commands that take over 6 minutes to return any text so this might be part of it. Ok and the final file to run everything. It might look ugly but I did a massive trim so I could post it here:
#! /usr/bin/python3
#import paramiko
import time
import sys
import xml.etree.ElementTree as ET
import xml
import os.path
import traceback
import re
import datetime
import pexpect
import os
import os.path
ssh = None
respFile = None
#Error Codes:
DEBUG = True
NO_ERROR=0
EXCEPTION_THROWS=1
RETURN_TEXT_NEVER_FOUND = 2
LOG_CONSOLE = 1
LOG_FILE = 2
LOG_BOTH = 3
def log(out, dummy=None):
print(str(log))
def connect(user, addr, passwd):
global ssh
fout = open('session.log', 'wb')
#options = '-q -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oPubkeyAuthentication=no'
options = ' -oUserKnownHostsFile=/dev/null '
#options = ''
#REPLACE WITH YOU LOCAL USER NAME
#user = 'user'
#REPLACE WITH YOUR LOCAL PASSWORD
#passwd = '123TesT321'
#addr = '127.0.0.1'
ssh_cmd = 'ssh %s#%s %s' % (user, addr, options)
public = None
private = None
retval = 0
try:
ssh = pexpect.spawn(ssh_cmd, timeout=60)
ssh.logfile = fout
#the most common prompts
loginsuccess = [re.escape("#"), re.escape("$")]
lgsuc = ['(?i)(password)', re.escape("connecting (yes/no)? ")]
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
except pexpect.TIMEOUT as exc:
log("Server never connected to SSH tunnel")
return 0
print('where here ret val = ' + str(retval))
try:
if(retval > 1):
return 1
elif(retval == 1):
hostkey = ssh.before.decode("utf-8")
ssh.sendline("yes")
log("Warning! new host key was added to the database: " + hostkey.split("\n")[1])
lgsuc = ['password: ']
for item in loginsuccess:
lgsuc.append(item)
retval = ssh.expect(lgsuc)
if(retval > 0):
return 1
else:
if(public is not None):
log("Warning public key authentication failed trying password if available...")
else:
if public is not None:
log("Warning public key authentication failed trying password if available...")
if(passwd is None):
log("No password and certificate authentication failed...")
return 0
ssh.sendline(passwd)
login = ['password: ' ]
for item in loginsuccess:
login.append(item)
retval = ssh.expect(login)
except pexpect.TIMEOUT as exc:
log("Server Never responded with expected login prompt: "+lgstr)
return 0
#return 0
if retval > 0:
retval = 1
if retval == 0:
log("Failed to connect to IP:"+addr +" User:"+user+" Password:"+passwd)
return retval
def disconnect():
log("Disconnecting...")
global ssh
if ssh is not None:
ssh.close()
else:
log("Something wierd happened with the SSH client while closing the session. Shouldn't really matter", False)
def RunCommand(cmd, waitTXT, timeout = 5):
global ssh
Error = 0
if DEBUG:
print('Debugging: cmd: '+ cmd+'. timeout: '+str(timeout) +'. len of txt tags: '+ str(len(waitTXT)))
if(type(waitTXT) is str):
waitTXT = [re.excape(waitTXT)]
elif(not hasattr(waitTXT ,'__iter__')):
waitTXT = [re.escape(str(waitTXT))]
else:
cnter = 0
for TXT in waitTXT:
waitTXT[cnter] = re.escape(str(TXT))
cnter +=1
#start = time.time()
#print("type3: "+str(type(ssh)))
#time.sleep(1)
#this is regex to match anything. Essentially clears the buffer so you don't get an invalid match from before
ssh.expect(['(?s).*'])
ssh.sendline(cmd)
print("Debugging: sent: "+cmd)
#GoOn = True
rettxt = ""
try:
foundind = ssh.expect(waitTXT, timeout)
allbytes = ssh.before
newbytes = bytearray()
for onebyte in allbytes:
if onebyte > 31:
newbytes.append(onebyte)
allbytes = bytes(newbytes)
rettxt = repr(allbytes.decode("us-ascii") + waitTXT[foundind])
#rettxt = ssh.before + waitTXT[foundind]
if DEBUG:
print("Debugging: We found "+rettxt)
except pexpect.TIMEOUT as exc:
if DEBUG:
txtret = ""
for strtxt in waitTXT:
txtret += strtxt +", "
print("ERROR Debugging: we timed out waiting for text:"+txtret)
pass
return (rettxt, Error)
def CloseAndExit():
disconnect()
global respFile
if respFile is not None and '_io.TextIOWrapper' in str(type(respFile)):
if not respFile.closed:
respFile.close()
def main(argv):
try:
cmds = ['~/check.py', '~/check2.py', '~/check3.py', '~/check2.py', '~/check3.py','~/check.py', '~/check2.py', '~/check3.py', '~/check2.py', '~/check3.py', '~/check4.py', '~/check3.py','~/check.py', '~/check2.py',]
##CHANGE THESE TO MTACH YOUR SSH HOST
ret = connect('user', '127.0.0.1', 'abcd1234')
for cmd in cmds:
cmdtxt = str(cmd)
#rett = RunCommand(ssh, "ls", "root", 0, 5)
strlen = (170 - (len(cmdtxt)))/2
dashval = ''
starval = ''
tcnt = 0
while(tcnt < strlen):
dashval +='-'
starval +='*'
tcnt +=1
if DEBUG:
print(dashval+cmdtxt+dashval)
#checkval = ['ABC']
#REPLACE THE FOLLOWING LINE WITH YOUR TARGET PROMPT
checkval = ['user-virtual-machine:~$']
rett = RunCommand(cmdtxt, checkval, 2)
if DEBUG:
print(starval+cmdtxt+starval)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(traceback.format_exc())
CloseAndExit()
#disconnect()
#respFile.close()
main(sys.argv)
Make sure that all for checks and the main python script are executble in permission via sudo chmod 774 or similar. In the main function call set your username ipaddress and password to where your target is that has the check.py and make sure they are in your ~/ directory.
Once you run this you can look at the session.log and at least on mind there is some wierd stuff going on with the buffer:
~/check4.py^M
~/check3.py
~/check3.py^M
abcd^H^H^H^MABC^M
^[]0;user#user-virtual-machine: ~^Guser#user-virtual-machine:~$ ~/check.py
~/check3.py^M
789:^H^H^H^H^H^H^H^M^MABC^M
^[]0;user#user-virtual-machine: ~^Guser#user-virtual-machine:~$ ~/check.py~/check2.py
And unfortunately its not as corrupt as my actual prbolem but I have several hundred commands I an embedded custom linux kernal that I obviously can't recreate for everyone. But anyway any help is greatly appreciated. Hopefully these examples work you you, I am just on ubuntu 16.04 lts. Also make sure to replace 'user-virtual-machine:~$' with whatever your target login prompt looks like