Hello I try port odoo 12 module to odoo 13 and getting such error message.
maybe can someone explain how to troubleshoot case of error?
maybe there is easy solution?
I'm really new to odoo so please give me knowledge's as most possible. In that case may be one more great module developer will grow and develop some free modules :)
There is code:
#api.model
def create_from_ui(self, orders):
submitted_references = [o['data']['name'] for o in orders]
pos_order = self.search([('pos_reference', 'in', submitted_references)])
existing_orders = pos_order.read(['pos_reference'])
existing_references = set([o['pos_reference'] for o in existing_orders])
orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]
order_ids = []
order_to_update = [o for o in orders if o['data']['name'] in existing_references]
# Keep only new orders
for tmp_order in orders_to_save:
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
if to_invoice:
self._match_payment_to_invoice(order)
pos_order = self._process_order(order)
order_ids.append(pos_order.id)
try:
pos_order.action_pos_order_paid()
except psycopg2.OperationalError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
pos_order.action_pos_order_invoice()
pos_order.invoice_id.sudo().action_invoice_open()
pos_order.account_move = pos_order.invoice_id.move_id
# Update draft orders
for tmp_order in order_to_update:
for order in pos_order:
if order.pos_reference == tmp_order['data']['name']:
pos_line_ids = self.env['pos.order.line'].search([('order_id', '=', order.id)])
if pos_line_ids:
pos_cids = []
new_cids = []
for line_id in pos_line_ids:
pos_cids.append(line_id.pos_cid)
for line in tmp_order['data']['lines']:
if line_id.pos_cid == line[2].get('pos_cid'):
new_cids.append(line[2].get('pos_cid'))
order.write({'lines': [(1, line_id.id, line[2])]})
for line in tmp_order['data']['lines']:
if line[2].get('pos_cid') not in pos_cids:
order.write({'lines': [(0, 0, line[2])]})
pos_cids.append(line[2].get('pos_cid'))
new_cids.append(line[2].get('pos_cid'))
newList = []
for item in pos_cids:
if item not in new_cids:
newList.append(item)
order_line_ids = self.env['pos.order.line'].search([('pos_cid', 'in', newList)])
if order_line_ids:
for each_line in order_line_ids:
each_line.unlink()
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
if to_invoice:
self._match_payment_to_invoice(order)
pos_order = self._process_order(order)
order_ids.append(pos_order.id)
try:
pos_order.action_pos_order_paid()
except psycopg2.OperationalError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
pos_order.action_pos_order_invoice()
pos_order.invoice_id.sudo().action_invoice_open()
pos_order.account_move = pos_order.invoice_id.move_id
self.broadcast_order_data(True)
return order_ids
And there is error what i am getting:
File "/usr/lib/python3/dist-packages/odoo/api.py", line 356, in _call_kw_model
result = method(recs, *args, **kwargs)
TypeError: create_from_ui() takes 2 positional arguments but 3 were given
Klasik
As the Error Log suggests that the method which you adapts changes in odoo13 with terms of arguments.
you can check here the new signature method for odoo13.
Thanks
Related
vpp_json_dir = '/usr/share/vpp/api/'
jsonfiles = []
for root, dirnames, filenames in os.walk(vpp_json_dir):
for filename in fnmatch.filter(filenames, '*.api.json'):
jsonfiles.append(os.path.join(vpp_json_dir, filename))
if not jsonfiles:
print('Error: no json api files found')
exit(-1)
Code to show jsonfiles
vpp = VPPApiClient(jsonfiles)
I keep getting the error
File "/home/vector/Desktop/vas/py/try3.py", line 29, in <module>
vpp = VPPApiClient(jsonfiles)
TypeError: __init__() takes 1 positional argument but 2 were given
The code for the class VPPApiclient is
class VPPApiClient:
"""VPP interface.This class provides the APIs to VPP."""
apidir = None
VPPApiError = VPPApiError
VPPRuntimeError = VPPRuntimeError
VPPValueError = VPPValueError
VPPNotImplementedError = VPPNotImplementedError
VPPIOError = VPPIOError
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=False,
server_address='/run/vpp/api.sock'):
Can someone kindly tell me on how to resolve this error?
Found the answer.
vpp=VPP(apifiles=jsonfiles)
I have an issue converting a chunked list into multiple dictionaries in order to send my request batched:
fd = open(filename, 'r')
sqlFile = fd.read()
fd.close()
commands = sqlFile.split(';')
for command in commands:
try:
c = conn.cursor()
c.execute(command)
// create a list with the query results with batches of size 100
for batch in grouper(c.fetchall(),100):
// This is where the error occurs:
result = [dict(zip([key[0] for key in c.description], i)) for i in batch]
# TODO: Send the json with 100 items to API
except RuntimeError:
print('Error.')
The issue is that it only iterates through the batches once and gives the following error. Actually, the number of rows are 167. So there should be a result of 100 items to be sent in a first request, while the second iteration should contain 67 items to be sent in a second request.
TypeError: zip argument #2 must support iteration
I solved the issue by making a dictionary right away with c.rowfactory = makeDictFactory(c):
def makeDictFactory(cursor):
columnNames = [d[0] for d in cursor.description]
def createRow(*args):
return dict(zip(columnNames, args))
return createRow
def getAndConvertDataFromDatabase:(filename)
fd = open(filename, 'r')
sqlFile = fd.read()
fd.close()
commands = sqlFile.split(';')
for command in commands:
try:
c = conn.cursor()
c.execute(command)
c.rowfactory = makeDictFactory(c)
data = c.fetchall()
for batch in [data[x:x+100] for x in range(0, len(data), 100)]:
return postBody(json.dumps(batch,default = myconverter), dataList[filename])
except RuntimeError:
print('Error.')
I am trying to create a dataframe with python's pandas library utilizing data obtained with a requests response. The problem is when there is not that item available on the API so it raises a KeyError and crashes the program.
The source data frame is being iterated over each product name. It then takes the product name of that row and finds how many different SKUs exists, creating a row in a new dataframe for each SKU and adding some quantities and other needed information to the new dataframe. The idea is to have a row with ALL the same information on the first dataframe repeated however many SKUs there are updated with the quantity and package ID for that SKU.
If the length of the response returned is 0, I still want it to append the row from the first data frame
def create_additional_rows_needed(comb_data):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.debug("test")
new_combined_data = pd.DataFrame(columns=comb_data.columns)
COVA_DATA_LEN = 2993
row = 0
current_item = ''
while row < len(comb_data):
number_of_skus = 0
current_item = comb_data.iloc[row, 1]
if (len(current_item)) is not None:
number_of_skus = len(find_gb_product(current_item))
else:
number_of_skus = 0
current_quantity = find_gb_product(current_item).iloc[number_of_skus - 1, find_gb_product(current_item).columns.get_loc('quantity')]
logger.info('Current Quantity: {}'.format(current_quantity))
current_package = find_gb_product(current_item)['lot_number'][number_of_skus - 1]
if number_of_skus == 0:
pass
while number_of_skus > 0:
logger.info('Current Item: {}'.format(current_item))
logger.info('Number of Skus: {}'.format(number_of_skus))
logger.info('Appending: {}'.format(comb_data.iloc[row, 1]))
new_combined_data = new_combined_data.append([comb_data.iloc[row, :]])
new_combined_data.iloc[-1, new_combined_data.columns.get_loc('TotalOnHand')] = current_quantity
new_combined_data.iloc[-1, new_combined_data.columns.get_loc('PackageId')] = current_package
number_of_skus = number_of_skus - 1
logger.info('Finished index {}'.format(row))
row = row + 1
logger.info('Moving to index {}'.format(row))
return new_combined_data
It goes well for every item with the exception of a few. Here is the error I get.
KeyError
2889 return self._engine.get_loc(casted_key)
2890 except KeyError as err:
-> 2891 raise KeyError(key) from err
2892
2893 if tolerance is not None:
KeyError: 'quantity'
This has taken up my entire weekend and all my sleep and is due Monday Morning at 10am MST with only two days notice. Please help me.
Catching the error and continuing should work. Something along the lines of:
while row < len(comb_data):
....
try:
current_quantity = find_gb_product(current_item).iloc[number_of_skus - 1, find_gb_product(current_item).columns.get_loc('quantity')]
except KeyError:
continue
....
I wanted to write small program that would symulate for me lottery winning chances. After that i wanted to make it a bit faster by implemening multiprocessing like this
But two weird behaviors started
import random as r
from multiprocessing.pool import ThreadPool
# winnerSequence = []
# mCombinations = []
howManyLists = 5
howManyTry = 1000000
combinations = 720/10068347520
possbilesNumConstantsConstant = []
for x in range(1, 50):
possbilesNumConstantsConstant.append(x)
def getTicket():
possbilesNumConstants = list(possbilesNumConstantsConstant)
toReturn = []
possiblesNum = list(possbilesNumConstants)
for x in range(6):
choice = r.choice(possiblesNum)
toReturn.append(choice)
possiblesNum.remove(choice)
toReturn.sort()
return toReturn
def sliceRange(rangeNum,num):
"""returns list of smaller ranges"""
toReturn = []
rest = rangeNum%num
print(rest)
toSlice = rangeNum - rest
print(toSlice)
n = toSlice/num
print(n)
for x in range(num):
toReturn.append((int(n*x),int(n*(x+1)-1)))
print(toReturn,"<---range")
return toReturn
def Job(tupleRange):
"""Job returns list of tickets """
toReturn = list()
print(tupleRange,"Start")
for x in range(int(tupleRange[0]),int(tupleRange[1])):
toReturn.append(getTicket())
print(tupleRange,"End")
return toReturn
result = list()
First one when i add Job(tupleRange) to pool it looks like job is done in main thread before another job is added to pool
def start():
"""this fun() starts program"""
#create pool of threads
pool = ThreadPool(processes = howManyLists)
#create list of tuples with smaller piece of range
lista = sliceRange(howManyTry,howManyLists)
#create list for storing job objects
jobList = list()
for tupleRange in lista:
#add job to pool
jobToList = pool.apply_async(Job(tupleRange))
#add retured object to list for future callback
jobList.append(jobToList)
print('Adding to pool',tupleRange)
#for all jobs in list get returned tickes
for job in jobList:
#print(job.get())
result.extend(job.get())
if __name__ == '__main__':
start()
Consol output
[(0, 199999), (200000, 399999), (400000, 599999), (600000, 799999), (800000, 999999)] <---range
(0, 199999) Start
(0, 199999) End
Adding to pool (0, 199999)
(200000, 399999) Start
(200000, 399999) End
Adding to pool (200000, 399999)
(400000, 599999) Start
(400000, 599999) End
and second one when i want to get data from thread i got this exception on this line
for job in jobList:
#print(job.get())
result.extend(job.get()) #<---- this line
File "C:/Users/CrazyUrusai/PycharmProjects/TestLotka/main/kopia.py", line 79, in start
result.extend(job.get())
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 644, in get
raise self._value
File "C:\Users\CrazyUrusai\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
TypeError: 'list' object is not callable
Can sombody explain this to me?(i am new to multiprocessing)
The problem is here:
jobToList = pool.apply_async(Job(tupleRange))
Job(tupleRange) executes first, then apply_async gets some returned value, list type (as Job returns list). There are two problems here: this code is synchronous and async_apply gets list instead of job it expects. So it try to execute given list as a job but fails.
That's a signature of pool.apply_async:
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
...
So, you should send func and arguments args to this function separately, and shouldn't execute the function before you will send it to the pool.
I fix this line and your code have worked for me:
jobToList = pool.apply_async(Job, (tupleRange, ))
Or, with explicitly named args,
jobToList = pool.apply_async(func=Job, args=(tupleRange, ))
Don't forget to wrap function arguments in tuple or so.
seems like for some of the tweets with the keyword 'follow'
it will follow and for some of them it wont...
other than that it works fine(I didn't notice something else)
can someone pinpoint where is the problem?
class Listener():
def search(self, twts):
global numoftwts
for i in twts:
names = ['follow', 'following']
txt = i.text.lower()
if not any(k in txt for k in keywords) or any(k in txt for k in bannedwords):
continue
if not self.is_user_bot_hunter(str(i.author.screen_name)):
if not i.retweeted:
try:
print("Trying to retweet status_id:{}".format(i.id))
res = api.retweet(i.id)
if res.retweeted:
api.create_favorite(i.id)
print('retweeted', numoftwts, 'times', '-',
str(datetime.datetime.fromtimestamp(time.time()).strftime('%d-%m-%Y %H:%M:%S')))
print(i.text)
print('\n')
else:
print("retweet failed")
if any(c in txt for c in names):
# print("Trying to follow something")
# if hasattr(i, 'retweeted_status'):
# print("trying to fetch user_id")
user_id = i.retweeted_status.user.id_str
res = api.create_friendship(user_id)
res = api.get_user(user_id)
if res.following:
print("Successfully followed :{}".format(user_id))
print('\n')
except Exception as e:
print("Exception:".format(str(e)))
continue
sleep(600)
def run(self):
for eachkey in keywords:
tweets = api.search(q=eachkey, result_type='mixed', lang='en')
self.search(tweets)
if __name__ == '__main__':
while True:
r = Listener()
r.run()
where did I go wrong?
AttributeError: 'Status' object has no attribute 'retweeted_status'
> c:\users\x\desktop\twitterbot\twtbotcopy.py(64)search()
-> user_id = i.retweeted_status.user.id_str
(Pdb) n
> c:\users\x\desktop\twitterbot\twtbotcopy.py(70)search()
-> except Exception as e:
(Pdb) n
If your getting any error where you are unable to get tweets from a particular user then use:
try:
specific_tweets = tweepy.Cursor(api.search, tweet_mode='extended', q= <some query>, lang='en').items(500)
except tweepy.error.TweepError:
pass
And if you want to access the retweeted attribute of a tweet then do this:
if hasattr(tweet, 'retweeted_status'):
extracted_author = tweet.retweeted_status.user.screen_name
else: extracted_author = tweet.user.screen_name
basically check whether hasattr(tweet, 'retweeted_status') of a tweet is true or not. It checks whether the tweet has the attribute named "retweeted_status"
AttributeError: 'Status' object has no attribute 'retweeted_status'
-> user_id = i.retweeted_status.user.id_str
It means that you want to get the user ID of a retweet, for a tweet that is not a retweet.
I you want to know if a tweet is a RT, the test is :
if hasattr(tweet, 'retweeted_status'):
# this tweet is a RT