Airflow task_id not found - python-3.x

I am trying to set unique id for each of the celery task in airflow. but for some reason it throws me the following error. I even tried with uuid module and the same error pops up.
Traceback (most recent call last):
File "/home/mahesh/anaconda3/bin/airflow", line 37, in <module>
args.func(args)
File "/home/mahesh/anaconda3/lib/python3.7/site-packages/airflow/utils/cli.py", line 76, in wrapper
return f(*args, **kwargs)
File "/home/mahesh/anaconda3/lib/python3.7/site-packages/airflow/bin/cli.py", line 547, in run
task = dag.get_task(task_id=args.task_id)
File "/home/mahesh/anaconda3/lib/python3.7/site-packages/airflow/models/dag.py", line 1263, in get_task
raise TaskNotFound("Task {task_id} not found".format(task_id=task_id))
airflow.exceptions.TaskNotFound: Task 893370 not found
Following is my DAG
dag_id = 'test'
dag = DAG(dag_id, description='test DAG',
schedule_interval=None, start_date=datetime(2018, 11, 1), catchup=False)
def my_sleeping_function(**context):
url = context['url']
r = requests.head(url)
return {url : r.status_code}
def fetch_final_result(**context):
task_instance = context['task_instance']
ans = []
print("paypal is ", uuid_list)
for i in uuid_list:
data = task_instance.xcom_pull(task_ids=i)
ans.append(data)
print("uber is ", ans)
return ans
run_this_bash_last = PythonOperator(
task_id= 'last',
python_callable=fetch_final_result,
# op_kwargs={'url': 'asd'},
dag=dag,
provide_context=True)
urls = [website1, website2, website3, website4, website5, website6]
for i in urls:
index += 1
ind_id = str(random.randint(1, 100000000000000))
uuid_list.append(ind_id)
task_python = PythonOperator(
# task_id = ind_id,
task_id = ind_id,
python_callable=my_sleeping_function,
op_kwargs={'url': i},
dag=dag,
provide_context=True)
task_python.set_downstream(run_this_bash_last)
Please suggest how to overcome this

Related

An HTTP Client raised an unhandled exception: 'int' object is not callable

I'm facing the issue in two python libraries used in my code, Getting these randomly on prod on some of the pods and not all pods
Trace of The Error
Traceback (most recent call last):\n File \"/usr/local/lib/python3.10/site-packages/botocore/httpsession.py\",
line 448, in send\n
urllib_response = conn.urlopen(\n File \"/usr/local/lib/python3.10/site-packages/urllib3/connectionpool.py\",
line 703, in urlopen\n
httplib_response = self._make_request(\n File \"/usr/local/lib/python3.10/site-packages/newrelic/hooks/external_urllib3.py\",
line 32, in _nr_wrapper_make_request_\n
return wrapped(*args, **kwargs)\n
File \"/usr/local/lib/python3.10/site-packages/urllib3/connectionpool.py\",
line 386, in _make_request\n self._validate_conn(conn)\n
File \"/usr/local/lib/python3.10/site-packages/urllib3/connectionpool.py\",
line 1040, in _validate_conn\n
conn.connect()\n File \"/usr/local/lib/python3.10/site-packages/urllib3/connection.py\",
line 416, in connect\n
self.sock = ssl_wrap_socket(\n File \"/usr/local/lib/python3.10/site-packages/urllib3/util/ssl_.py\",
line 424, in ssl_wrap_socket\n context.set_alpn_protocols(ALPN_PROTOCOLS)\n
File \"/usr/local/lib/python3.10/ssl.py\",
line 566, in set_alpn_protocols\n
if len(b) == 0 or len(b) > 255:\nTypeError: 'int' object is not callable\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n
File \"/code/app/aws_utils/dynamo_db.py\", line 176, in update_multi_attributes\n
response = table.update_item(\n File \"/usr/local/lib/python3.10/site-packages/boto3/resources/factory.py\",
line 580, in do_action\n
response = action(self, *args, **kwargs)\n
File \"/usr/local/lib/python3.10/site-packages/boto3/resources/action.py\",
line 88, in __call__\n response = getattr(parent.meta.client, operation_name)(*args, **params)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/client.py\",
line 514, in _api_call\n
return self._make_api_call(operation_name, kwargs)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/client.py\",
line 921, in _make_api_call\n
http, parsed_response = self._make_request(\n
File \"/usr/local/lib/python3.10/site-packages/botocore/client.py\",
line 944, in _make_request\n
return self._endpoint.make_request(operation_model, request_dict)\n
File \"/usr/local/lib/python3.10/site-packages/newrelic/hooks/external_botocore.py\",
line 108, in _nr_endpoint_make_request_\n
result = wrapped(*args, **kwargs)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/endpoint.py\",
line 119, in make_request\n
return self._send_request(request_dict, operation_model)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/endpoint.py\",
line 231, in _send_request\n
raise exception\n File \"/usr/local/lib/python3.10/site-packages/botocore/endpoint.py\",
line 281, in _do_get_response\n
http_response = self._send(request)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/endpoint.py\",
line 377, in _send\n return self.http_session.send(request)\n
File \"/usr/local/lib/python3.10/site-packages/botocore/httpsession.py\",
line 493, in send\n
raise HTTPClientError(error=e)\nbotocore.exceptions.HTTPClientError: An HTTP Client raised an unhandled exception: 'int' object is not callable\n",
Getting this in all dynamodb calls , adding one of the codes here, exception comes in the packages and not in the code as far as i understood
def update_multi_attributes(
self,
table,
partition_key,
sort_key,
attr_value_dict,
delete_attr=[],
):
try:
if not partition_key or not sort_key:
return None
if not attr_value_dict and len(delete_attr) == 0:
return None
if not attr_value_dict and len(delete_attr) > 0:
return self.delete_attributes(
table=table,
partition_key=partition_key,
sort_key=sort_key,
delete_attr=delete_attr,
)
expression_attribute_values = {}
expression_attribute_names = {}
update_expression = "SET "
remove_attr_expression = "REMOVE "
index = 0
attr_value_dict.pop("partition_key", None)
attr_value_dict.pop("sort_key", None)
for key, value in attr_value_dict.items():
index += 1
var_name = "#var" + str(index)
var_value_name = ":var" + str(index)
expression_attribute_values[var_value_name] = value
expression_attribute_names[var_name] = key
if index < len(attr_value_dict.keys()):
update_expression = (
update_expression + var_name + "=" + var_value_name + ","
)
else:
update_expression = (
update_expression + var_name + "=" + var_value_name
)
index = 0
for attr in delete_attr:
index += 1
var_name = "#rvar" + str(index)
expression_attribute_names[var_name] = attr
if index < len(delete_attr):
remove_attr_expression = remove_attr_expression + var_name + ","
else:
remove_attr_expression = remove_attr_expression + var_name
if len(delete_attr) > 0:
update_expression = update_expression + " " + remove_attr_expression
response = table.update_item(
Key=self.get_key(partition_key, sort_key),
ExpressionAttributeNames=expression_attribute_names,
UpdateExpression=update_expression,
ConditionExpression="attribute_exists(partition_key)"
"and attribute_exists(sort_key)",
ExpressionAttributeValues=expression_attribute_values,
)
return response
except botocore.exceptions.ClientError as e:
logger.info(
"Dynamo: Error Update MultiAttribute boto3{} error{},{}for{},{}".format(
attr_value_dict,
e,
e.response["Error"],
partition_key,
traceback.format_exc(),
)
)
except Exception as e:
logger.info(
"Dynamo: Error Update MultiAttribute {} error {} for {}, {}".format(
attr_value_dict, e, partition_key, traceback.format_exc()
)
)
return None
Adding Package versions used
urllib3-1.26.8
botocore-1.27.77
boto3-1.24.77
Had put a trace print, thinking of upgrading the urllib version but not sure if that would solve the problem, Suddenly this error started coming up, code was working fine for a month after integration.`

why i got "rospy.ServiceException"?

i am working with ros about 3 month! I work on a robot controller.
i got error in ui about this client:
import sys
import rospy
from database_helper.srv import get_moves, get_movesRequest, get_movesResponse, get_move
def get_data(name: str):
"""This function can extract model names from DB"""
try:
rospy.init_node("get_moves_client")
except:
pass
rospy.wait_for_service('get_moves')
try:
moves = rospy.ServiceProxy('get_moves', get_moves)
except rospy.ServiceException as e:
print(31)
print(e)
return
id = 0
try:
for i in (moves(True).moves):
if i.name == name:
id = i.id
#print(id)
break
except:
print(43)
return
rospy.wait_for_service('get_move')
move = rospy.ServiceProxy('get_move', get_move)
wps = move(id).waypoints
list_of_data = []
try:
for i in range(len(wps)):
print(i)
data = {}
data['x_traj'] = wps[i].x
data['y_traj'] = wps[i].y
data['z_traj'] = wps[i].z
data['time_tarj'] = wps[i].displacement_time_to_next_waypoint
data['order_traj'] = wps[i].order
data['pitch_traj'] = wps[i].pitch
data['roll_traj'] = wps[i].roll
data['yaw_traj'] = wps[i].yaw
data['focus_camer'] = wps[i].camera_focus
data['iris_camera'] = wps[i].camera_iris
data['rail_direction'] = wps[i].rail_direction
data['rail_speed'] = wps[i].rail_speed
data['zoom_camera'] = wps[i].camera_zoom
data['rail_time'] = wps[i].rail_time
data['rail_displacement'] = wps[i].rail_displacement
list_of_data.append(data)
except rospy.ServiceException:
pass
print(list_of_data)
return list_of_data
this client can get data from DB and save with dict and save all dict in a list.
i most write "try/except" and i see 43 number! so i know my except is in for i in range(len(wps)):
the amazing point is, i can run this script and i get answer, but if call this script in my ui, after i using save waypoint and i try to load waypoint, i get ServiceException!
my "add_move.py" code:
from typing import List, Dict
import rospy
from database_helper.srv import add_move
from database_helper.msg import move, waypoint
def _add_move(name: str, wp: List[Dict]):
"""This Function can for send insert query for DB"""
rospy.init_node('add_move_client', anonymous=True)
rospy.wait_for_service('add_move')
_move = move()
_move.name = name
for i in range(len(wp)):
_waypoint = waypoint()
_waypoint.x = wp[i]['x_traj']
_waypoint.y = wp[i]['y_traj']
_waypoint.z = wp[i]['z_traj']
_waypoint.displacement_time_to_next_waypoint = wp[i]['time_tarj']
_waypoint.pitch = wp[i]['pich_traj']
_waypoint.roll = wp[i]['roul_traj']
_waypoint.yaw = wp[i]['ya_traj']
_waypoint.camera_focus = wp[i]['focus_camer']
_waypoint.camera_iris = wp[i]['iris_camera']
_waypoint.camera_zoom = wp[i]['zoom_camera']
_waypoint.rail_speed = wp[i]['speed_rail']
_waypoint.rail_displacement = wp[i]['disp_or_time_rail']
_waypoint.rail_direction = wp[i]['direction_rail']
_move.waypoints.append(_waypoint)
add = rospy.ServiceProxy('add_move', add_move)
return add(_move).id
if __name__ == "__main__":
from random import randint
data = {'x_traj': 12, 'y_traj': 12, 'z_traj': 33, 'time_tarj': 11,
'pich_traj': 13, 'roul_traj': 43, 'ya_traj': 21,
'focus_camer': 11, 'iris_camera': 55, 'zoom_camera': 32,
'disp_or_time_rail': 21, 'speed_rail': 109, 'direction_rail':44,
'joint1_slider': 12, 'joint2_slider': 666, 'joint3_slider': 567,
'joint4_slider': 32, 'joint5_slider': 79, 'joint6_spin': 100
}
wp = []
wp.append(data)
print(_add_move("t1", wp))
my error in terminal without "try/except" is:
Traceback (most recent call last):
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/msg.py", line 223, in deserialize_messages
msg_queue.append(data.deserialize(q))
File "/home/ajax/Documents/iotive/devel/lib/python3/dist-packages/database_helper/srv/_get_moves.py", line 247, in deserialize
val2 = database_helper.msg.waypoint()
File "/home/ajax/Documents/iotive/devel/lib/python3/dist-packages/database_helper/msg/_waypoint.py", line 95, in __init__
self.rail_displacement = 0
AttributeError: 'waypoint' object attribute 'rail_displacement' is read-only
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_base.py", line 735, in receive_once
p.read_messages(b, msg_queue, sock)
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_service.py", line 361, in read_messages
rospy.msg.deserialize_messages(b, msg_queue, self.recv_data_class, queue_size=self.queue_size, max_msgs=1, start=1) #rospy.msg
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/msg.py", line 245, in deserialize_messages
raise genpy.DeserializationError("cannot deserialize: %s"%str(e))
genpy.message.DeserializationError: cannot deserialize: 'waypoint' object attribute 'rail_displacement' is read-only
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_service.py", line 522, in call
responses = transport.receive_once()
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_base.py", line 751, in receive_once
raise TransportException("receive_once[%s]: DeserializationError %s"%(self.name, str(e)))
rospy.exceptions.TransportException: receive_once[/get_moves]: DeserializationError cannot deserialize: 'waypoint' object attribute 'rail_displacement' is read-only
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./benchmark_new_version_4_1.py", line 730, in get_data_query
self.wp_saver = get_data(response)
File "/home/ajax/Documents/iotive/src/gui/ui/scripts/get_moves_clinet.py", line 36, in get_data
for i in (moves(True).moves):
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_service.py", line 442, in __call__
return self.call(*args, **kwds)
File "/opt/ros/noetic/lib/python3/dist-packages/rospy/impl/tcpros_service.py", line 532, in call
raise ServiceException("transport error completing service call: %s"%(str(e)))
rospy.service.ServiceException: transport error completing service call: receive_once[/get_moves]: DeserializationError cannot deserialize: 'waypoint' object attribute 'rail_displacement' is read-only
what is my wrong?
srv/get_move:
int32 id
---
waypoint[] waypoints
srv/get_moves:
bool add_waypoints
---
move[] moves
msg/move:
int32 id
string name
waypoint[] waypoints
msg/waypoint:
int32 id
int32 order
float64 x
float64 y
float64 z
float64 roll
float64 pitch
float64 yaw
int32 camera_focus
int32 camera_iris
int32 camera_zoom
int32 rail_displacement
int32 rail_time
int32 rail_speed
bool rail_direction
int32 displacement_time_to_next_waypoint
So we need to remove service variables when we do not have anything else.
like this:
rospy.wait_for_service('get_moves')
moves = rospy.ServiceProxy('get_moves', get_moves)
id = 0
for i in (moves(True).moves):
if i.name == name:
id = i.id
#print(id)
break
rospy.wait_for_service('get_move')
move = rospy.ServiceProxy('get_move', get_move)
wps = move(id).waypoints
list_of_data = []
for i in range(len(wps)):
print(i)
data = {}
data['order_traj'] = wps[i].order
data['x_traj'] = wps[i].x
data['y_traj'] = wps[i].y
data['z_traj'] = wps[i].z
data['time_tarj'] = wps[i].displacement_time_to_next_waypoint
data['order_traj'] = wps[i].order
data['pitch_traj'] = wps[i].pitch
data['roll_traj'] = wps[i].roll
data['yaw_traj'] = wps[i].yaw
data['focus_camer'] = wps[i].camera_focus
data['iris_camera'] = wps[i].camera_iris
data['rail_direction'] = wps[i].rail_direction
data['rail_speed'] = wps[i].rail_speed
data['zoom_camera'] = wps[i].camera_zoom
data['rail_time'] = wps[i].rail_time
data['rail_displacement'] = wps[i].rail_displacement
list_of_data.append(data)
del move
del moves
print(list_of_data)
return list_of_data
I discovered this last night!

Spark streaming Twitter API

I start the following .py which create socket and wait for a connection:
def get_tweets():
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
query_data = [('language', 'en'), ('locations', '-130,-20,100,50'),('track','#')]
query_url = url + '?' + '&'.join([str(t[0]) + '=' + str(t[1]) for t in query_data])
response = requests.get(query_url, auth=my_auth, stream=True)
print(query_url, response)
return response
def send_tweets_to_spark(http_resp, tcp_connection):
for line in http_resp.iter_lines():
try:
full_tweet = json.loads(line)
tweet_text = full_tweet['text']
print("Tweet Text: " + tweet_text)
print ("------------------------------------------")
tcp_connection.send(tweet_text + '\n')
except:
e = sys.exc_info()[0]
print("Error: %s" % e)
TCP_IP = '127.0.0.1'
TCP_PORT = 9009
conn = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Waiting for TCP connection...")
conn, addr = s.accept()
print("Connected... Starting getting tweets.")
resp = get_tweets()
print(resp, conn)
send_tweets_to_spark(resp, conn)
Then I run spark-submit the following Spark streaming script which should count the tweet every 2 seconds:
def aggregate_tags_count(new_values, total_sum):
return sum(new_values) + (total_sum or 0)
def get_sql_context_instance(spark_context):
if ('sqlContextSingletonInstance' not in globals()):
globals()['sqlContextSingletonInstance'] = SQLContext(spark_context)
return globals()['sqlContextSingletonInstance']
def process_rdd(time, rdd):
print("----------- %s -----------" % str(time))
sql_context = get_sql_context_instance(rdd.context)
row_rdd = rdd.map(lambda w: Row(hashtag=w[0], hashtag_count=w[1]))
hashtags_df = sql_context.createDataFrame(row_rdd)
hashtags_df.registerTempTable("hashtags")
hashtag_counts_df = sql_context.sql("select hashtag, hashtag_count from hashtags order by hashtag_count desc limit 10")
hashtag_counts_df.show()
send_df_to_dashboard(hashtag_counts_df)
conf = SparkConf()
conf.setAppName("TwitterStreamApp")
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
ssc = StreamingContext(sc, 2)
ssc.checkpoint("checkpoint_TwitterApp")
dataStream = ssc.socketTextStream("127.0.0.1",9009)
words = dataStream.flatMap(lambda line: line.split(" "))
hashtags = words.filter(lambda w: '#' in w).map(lambda x: (x, 1))
tags_totals = hashtags.updateStateByKey(aggregate_tags_count)
tags_totals.foreachRDD(process_rdd)
ssc.start()
ssc.awaitTermination()
This starts the apps as I can see it in the WEB UI running. My Problem is when I run the spark apps, it hit the first script which send the Tweet but It output empty RDD. The error is as below:
20/12/30 08:53:56 INFO StandaloneAppClient$ClientEndpoint: Executor updated: app-20201230085356-0012/0 is now RUNNING
20/12/30 08:53:57 INFO StandaloneSchedulerBackend: SchedulerBackend is ready for scheduling beginning after reached minRegisteredResourcesRatio: 0.0
----------- 2020-12-30 08:54:20 -----------
20/12/30 08:54:24 ERROR JobScheduler: Error running job streaming job 1609318460000 ms.0
org.apache.spark.SparkException: An exception was raised by Python:
Traceback (most recent call last):
File "/opt/spark/python/lib/pyspark.zip/pyspark/streaming/util.py", line 68, in call
r = self.func(t, *rdds)
File "/home/ubuntu/market_risk/utils/spark_twitter_count.py", line 26, in process_rdd
hashtags_df = sql_context.createDataFrame(row_rdd)
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/context.py", line 320, in createDataFrame
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/session.py", line 605, in createDataFrame
return self._create_dataframe(data, schema, samplingRatio, verifySchema)
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/session.py", line 628, in _create_dataframe
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/session.py", line 425, in _createFromRDD
struct = self._inferSchema(rdd, samplingRatio, names=schema)
File "/opt/spark/python/lib/pyspark.zip/pyspark/sql/session.py", line 396, in _inferSchema
first = rdd.first()
File "/opt/spark/python/lib/pyspark.zip/pyspark/rdd.py", line 1467, in first
raise ValueError("RDD is empty")
ValueError: RDD is empty

IMAP4LIB When using the store command I get the error "BAD [b'Could not parse command']"

I am new to all of this so I'm sorry if I mess this up or have already made a mess. I have two classes a GUI and my MailSorter class in the GUI class I have method which logins, then one that fetches all the EmailIds then finally fetches all the From emails and stores it in a dict. which stores the From email and amount of times it appears and an array with the From email and the ID.
def fetchFrom(self,emailIDs):
EmailAmount = dict()
Email = []
count = 0
for emailId in emailIDs:
#Converts email into string
result2,email_data = self.mail.fetch(emailId,'(RFC822)')
try:
raw_email = email_data[0][1].decode("utf-8")
email_message = email.message_from_string(raw_email)
#Fetches email address sent from
From = email_message["From"]
Email.append((From,emailId))
#print(From)
if From in EmailAmount:
EmailAmount[From] = EmailAmount[From] + 1
else:
EmailAmount[From] = 1
count += 1
if count > 10:
break
except Exception as e:
self.log.append((emailId,e))
def mainScreenInterface(self):
#Process
print("Loading program")
EmailIds = self.Mail.fetchEmailId()
EmailDict, self.EmailArray = self.Mail.fetchFrom(EmailIds)
self.master.geometry("750x600")
self.master.title("Main Screen")
self.destoryWidget()
#New Frame
self.mainScreen = tk.Frame(self.master)
self.mainScreen.pack()
#Labels
mainText = tk.Label(self.mainScreen,text = "All Emails")
mainText.config(font=("Courier","25"))
#Buttons
delete = tk.Button(self.mainScreen,text="Delete", command = self.Delete)
deleteAll = tk.Button(self.mainScreen,text="Delete All", command = self.DeleteAll)
Help = tk.Button(self.mainScreen,text="Help", command = self.Help_)
#Scrollbar
scrollbar = tk.Scrollbar(root)
scrollbar.pack(side="right",fill="y")
#Listbox
self.listbox = tk.Listbox(root,width = root.winfo_screenwidth(), height = 25)
#Attach a scrool wheel to the listbox
self.listbox.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
#Add items to the list box
count = 1
for x,y in EmailDict.items():
self.listbox.insert(count,(x,y))
count += 1
#Placement
paddingValue = 40
mainText.pack(side="top")
self.listbox.pack(side="top")
delete.pack(side="left",padx=paddingValue)
deleteAll.pack(side="left",padx=paddingValue)
Help.pack(side="left",padx=paddingValue)
def Delete(self):
emailName = self.listbox.get(tk.ANCHOR)[0]
self.Mail.deleteEmail(emailName,self.EmailArray)
So the fetchFrom is from the mailSorter class and the other two are the GUI class, when I call the deleteEmail I get the error:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Python\lib\tkinter\__init__.py", line 1705, in __call__
return self.func(*args)
File "C:\Users\******\Desktop\Email Sorter v3.py", line 197, in Delete
self.Mail.deleteEmail(emailName,self.EmailArray)
File "C:\Users\******\Desktop\Email Sorter v3.py", line 66, in deleteEmail
self.mail.store(Id[1].strip(), '+X-GM-tk.LabelS', '\\Trash')
File "C:\Python\lib\imaplib.py", line 840, in store
typ, dat = self._simple_command('STORE', message_set, command, flags)
File "C:\Python\lib\imaplib.py", line 1196, in _simple_command
return self._command_complete(name, self._command(name, *args))
File "C:\Python\lib\imaplib.py", line 1027, in _command_complete
raise self.error('%s command error: %s %s' % (name, typ, data))
imaplib.IMAP4.error: STORE command error: BAD [b'Could not parse command']
but when I run it as a text base with no GUI and use an example email it all works fine:
test = MailSorter("hamadnassor5#gmail.com","snerfulbubble1.")
test.login()
EmailIds = test.fetchEmailId()
EmailDict, EmailArray = test.fetchFrom(EmailIds)
test.displayEmails(EmailDict)
test.deleteEmail("Xbox <Xbox#outlook.com>",EmailArray)
test.closeCon()
DeleteMail code
def deleteEmail(self, emailName, EmailArray):
for Id in EmailArray:
if Id[0] == emailName:
print(Id[0])
print(emailName)
print(Id[1])
self.mail.store(Id[1].strip(), '+X-GM-tk.LabelS', '\\Trash')

How to resolve the error "IndexError: list index out of range"?

I'm using Odoo 11 and I have installed openHRMS module but every time when I press on the dashboard menu it appears an error on this method:
IndexError: list index out of range
Ps: i tried to install this module on bitnami VM and it works whithout any problem but when i tried to installe it on ubuntu 18.04 it shows error
Any ideas on how to fix it?
def join_resign_trends(self):
cr = self._cr
month_list = []
join_trend = []
resign_trend = []
for i in range(11, -1, -1):
last_month = datetime.now() - relativedelta(months=i)
text = format(last_month, '%B %Y')
month_list.append(text)
for month in month_list:
vals = {
'l_month': month,
'count': 0
}
join_trend.append(vals)
for month in month_list:
vals = {
'l_month': month,
'count': 0
}
resign_trend.append(vals)
cr.execute('''select to_char(joining_date, 'Month YYYY') as l_month, count(id) from hr_employee
WHERE joining_date BETWEEN CURRENT_DATE - INTERVAL '12 months'
AND CURRENT_DATE + interval '1 month - 1 day'
group by l_month;''')
join_data = cr.fetchall()
cr.execute('''select to_char(resign_date, 'Month YYYY') as l_month, count(id) from hr_employee
WHERE resign_date BETWEEN CURRENT_DATE - INTERVAL '12 months'
AND CURRENT_DATE + interval '1 month - 1 day'
group by l_month;''')
resign_data = cr.fetchall()
for line in join_data:
match = list(filter(lambda d: d['l_month'].replace(' ', '') == line[0].replace(' ', ''), join_trend))
match[0]['count'] = line[1]
for line in resign_data:
match = list(filter(lambda d: d['l_month'].replace(' ', '') == line[0].replace(' ', ''), resign_trend))
match[0]['count'] = line[1]
for join in join_trend:
join['l_month'] = join['l_month'].split(' ')[:1][0].strip()[:3]
for resign in resign_trend:
resign['l_month'] = resign['l_month'].split(' ')[:1][0].strip()[:3]
graph_result = [{
'name': 'Join',
'values': join_trend
}, {
'name': 'Resign',
'values': resign_trend
}]
return graph_result
def get_attrition_rate(self):
month_attrition = []
monthly_join_resign = self.join_resign_trends()
month_join = monthly_join_resign[0]['values']
month_resign = monthly_join_resign[1]['values']
sql = """
SELECT (date_trunc('month', CURRENT_DATE))::date - interval '1' month * s.a AS month_start
FROM generate_series(0,11,1) AS s(a);"""
self._cr.execute(sql)
month_start_list = self._cr.fetchall()
for month_date in month_start_list:
self._cr.execute("""select count(id), to_char(date '%s', 'Month YYYY') as l_month from hr_employee
where resign_date> date '%s' or resign_date is null and joining_date < date '%s'
""" % (month_date[0], month_date[0], month_date[0],))
month_emp = self._cr.fetchone()
# month_emp = (month_emp[0], month_emp[1].split(' ')[:1][0].strip()[:3])
match_join = list(filter(lambda d: d['l_month'] == month_emp[1].split(' ')[:1][0].strip()[:3], month_join))[0]['count']
match_resign = list(filter(lambda d: d['l_month'] == month_emp[1].split(' ')[:1][0].strip()[:3], month_resign))[0]['count']
month_avg = (month_emp[0]+match_join-match_resign+month_emp[0])/2
attrition_rate = (match_resign/month_avg)*100 if month_avg != 0 else 0
vals = {
# 'month': month_emp[1].split(' ')[:1][0].strip()[:3] + ' ' + month_emp[1].split(' ')[-1:][0],
'month': month_emp[1].split(' ')[:1][0].strip()[:3],
'attrition_rate': round(float(attrition_rate), 2)
}
month_attrition.append(vals)
return month_attrition
traceback
Traceback (most recent call last):
File "/opt/openhrms/odoo/http.py", line 651, in _handle_exception
return super(JsonRequest, self)._handle_exception(exception)
File "/opt/openhrms/odoo/http.py", line 310, in _handle_exception
raise pycompat.reraise(type(exception), exception, sys.exc_info()[2])
File "/opt/openhrms/odoo/tools/pycompat.py", line 87, in reraise
raise value
File "/opt/openhrms/odoo/http.py", line 693, in dispatch
result = self._call_function(**self.params)
File "/opt/openhrms/odoo/http.py", line 342, in _call_function
return checked_call(self.db, *args, **kwargs)
File "/opt/openhrms/odoo/service/model.py", line 97, in wrapper
return f(dbname, *args, **kwargs)
File "/opt/openhrms/odoo/http.py", line 335, in checked_call
result = self.endpoint(*a, **kw)
File "/opt/openhrms/odoo/http.py", line 937, in __call__
return self.method(*args, **kw)
File "/opt/openhrms/odoo/http.py", line 515, in response_wrap
response = f(*args, **kw)
File "/opt/openhrms/addons/web/controllers/main.py", line 934, in call_kw
return self._call_kw(model, method, args, kwargs)
File "/opt/openhrms/addons/web/controllers/main.py", line 926, in _call_kw
return call_kw(request.env[model], method, args, kwargs)
File "/opt/openhrms/odoo/api.py", line 687, in call_kw
return call_kw_model(method, model, args, kwargs)
File "/opt/openhrms/odoo/api.py", line 672, in call_kw_model
result = method(recs, *args, **kwargs)
File "/opt/openhrms/openhrms/hrms_dashboard/models/hrms_dashboard.py", line 384, in get_attrition_rate
monthly_join_resign = self.join_resign_trends()
File "/opt/openhrms/openhrms/hrms_dashboard/models/hrms_dashboard.py", line 364, in join_resign_trends
match[0]['count'] = line[1]
IndexError: list index out of range

Resources