IndexError:: array index out of range - python-3.x

python3
def __init__(self):
super().__init__('object_tracking')
# Declare ROS parameters
self.declare_parameters(namespace='',
parameters=[('qos_length',0),
('topic.untracked_obj',''),
('topic.rgb_image',''),
('topic.tracked_obj',''),
('obj_class.id',[]),
('obj_class.name',[]),
('display',True),
('frame_id.tracked_obj','')])
self.nodeParams()
qos_length = self.get_parameter('qos_length').get_parameter_value().integer_value
qos_profile = QoSProfile(depth=qos_length,
history=QoSHistoryPolicy.KEEP_LAST,
reliability=QoSReliabilityPolicy.RELIABLE)
# Load cv_bridge
self.bridge = CvBridge()
# Create instance of SORT
self.mot_tracker = Sort()
# Create Subscribers
obj_topic = self.get_parameter('topic.untracked_obj').get_parameter_value().string_value
self.obj_sub = mf.Subscriber(self,ObjectArray,obj_topic,qos_profile=qos_profile)
rgb_topic = self.get_parameter('topic.rgb_image').get_parameter_value().string_value
self.rgb_sub = mf.Subscriber(self,Image,rgb_topic,qos_profile=qos_profile)
# Apply message filter
self.timestamp_sync = mf.TimeSynchronizer([self.obj_sub,self.rgb_sub],queue_size=qos_length)
self.timestamp_sync.registerCallback(self.objCallback)
# Create Publishers
obj_topic = self.get_parameter('topic.tracked_obj').get_parameter_value().string_value
self.obj_pub = self.create_publisher(ObjectArray,obj_topic,qos_profile)
def nodeParams(self):
#print('1')
self.display = self.get_parameter('display').get_parameter_value().bool_value
class_id = self.get_parameter('obj_class.id').get_parameter_value().integer_array_value
#print(class_id)
class_name = self.get_parameter('obj_class.name').get_parameter_value().integer_array_value
#print(class_name)
self.class_dict = {}
#for name in class_name:
'''#for i,id_ in enumerate(class_id):
#print('2')
#self.class_dict = class_name [name]
#print('3')'''
for i,id_ in enumerate(class_id):
self.class_dict[int(id_)] = class_name[i]
I'm not sure what's going on...I'd like to try object tracking in Carla 0.9.13 with ros2 foxy in Python 3.8. Could you please help me?
[object_tracking.py-3] self.nodeParams()
[object_tracking.py-3] File "/home/smit/ros2_ws/install/carla_simulation/lib/carla_simulation/object_tracking.py", line 64, in nodeParams
[object_tracking.py-3] self.class_dict[int(id_)] = class_name[i]
[object_tracking.py-3] IndexError: array index out of range
[ERROR] [object_tracking.py-3]: process has died [pid 623526, exit code 1, cmd '/home/smit/ros2_ws/install/carla_simulation/lib/carla_simulation/object_tracking.py --ros-args --params-file /home/smit/ros2_ws/install/carla_simulation/share/carla_simulation/config/params.yaml'].

You are pobably using the returned hierarchy variable wrong.
According to the specification:
In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.
https://docs.opencv.org/4.x/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0

Related

Running MPI python script in MPI azure ml pipeline

I'm trying to run distributed python job through azure ML pipelines using MPIStep pipeline class, by referring to the below example link - https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb
I tried implemented the same but even I change the node count parameter in MpiStep class, while running the script the it shows size (i.e comm.Get_size()) as 1 always. Can you please help me in what I'm missing here. Is there any specific setup required on the cluster?
Code snippets:
Pipeline code snippet:
model_dir = model_ds.path('./'+saved_model_blob+'/',data_reference_name='saved_model_path').as_mount()
label_dir = model_ds.path('./'+model_label_blob+'/',data_reference_name='model_label_blob').as_mount()
input_images = result_ds.path('./'+score_blob_name+'/',data_reference_name='Input_images').as_mount()
output_container = 'abc'
inti_container = 'xyz'
distributed_batch_score_step = MpiStep(
name="batch_scoring",
source_directory=SCRIPT_FOLDER,
script_name="batch_scoring_script_mpi.py",
arguments=["--dataset_path", input_images,
"--model_name", model_dir,
"--label_dir", label_dir,
"--intermediate_data_container", inti_container,
"--output_container", output_container],
compute_target=gpu_cluster,
inputs=[input_images, model_dir,label_dir],
pip_packages=["tensorflow","tensorflow-gpu==1.13.1","pillow","azure-keyvault","azure-storage-blob"],
conda_packages=["mesa-libgl-cos6-x86_64","mpi4py==3.0.2","opencv=3.4.2","scikit-learn=0.21.2"],
use_gpu=True,
allow_reuse = False,
node_count = nodecount_param,
process_count_per_node = 1
)
Python Script code snippet:
def run(input_dataset,comm):
rank = comm.Get_rank()
size = comm.Get_size()
print("Rank:" , rank)
print("Size:", size) # shows always 1, even the input node count is >1
print(MPI.Get_processor_name())
file_names = get_file_names(args.dataset_path)
sorted(file_names)
partition_size = len(file_names) // size
print("partition_size-->",partition_size)
partitioned_filenames = file_names[rank * partition_size: (rank + 1) * partition_size]
print("RANK {} - is processing {} images out of the total {}".format(rank, len(partitioned_filenames),
len(file_names)))
# call to Function 01
# call to Function 02
img_names = score_df['image_name'].unique()
output_batch = pd.DataFrame()
for i in img_names:
# call to Function 3
output_batch = output_batch.append(pp_output, ignore_index=True)
output_paths_list = comm.gather(output_batch, root=0)
print("RANK {} - number of pre-aggregated output files {}".format(rank, len(output_batch)))
print("saved in", currentDT + '\\' + 'data.csv')
if rank == 0:
print("RANK {} - number of aggregated output files {}".format(rank, len(output_paths_list)))
print("RANK {} - end".format(rank))
if __name__ == "__main__":
with tf.device('/GPU:0'):
init()
comm = MPI.COMM_WORLD
run(args.dataset_path,comm)
Got to know the issue is due to package version, earlier it is installed via conda with conda_packages=["mpi4py==3.0.2"], it worked after changing the install through pip - pip_packages=["mpi4py"]

XGetWindowProperty and ctypes

Question
I'm trying to find NET_WM_NAME property for each of the window/client that X11 reports. Problem is that there's nothing returned - number of items is 0 and returned data results in empty string. I've looked at multiple code examples through out github and examples written in C and C++ , specifically Why is XGetWindowProperty returning null? as well as Xlib XGetWindowProperty Zero items returned , however I cannot find where is the problem with my code. Seemingly everything is fine, order of parameters passed to XGetWindowProperty function is in accordance with documentation, and the function returns success status, but results are empty. Where is the problem with my code ?
Code
Below is the code I am working with. The issue is xgetwindowproperty function. The other parts below it work fine, and are provided only for completeness.
#! /usr/bin/env python3
import sys
from ctypes import *
def xgetwindowproperty(display,w):
actual_type_return = c_ulong()
actual_format_return = c_int()
nitems_return = c_ulong()
bytes_after_return = c_ulong()
prop_return = POINTER(c_ubyte)()
wm_name = Xlib.XInternAtom(display,'_NET_WM_NAME',False)
utf8atom = Xlib.XInternAtom(display,'UTF8_STRING',False)
print('_NET_WM_NAME',wm_name, 'UTF8_STRING',utf8atom)
# AnyPropertyType = c_long(0)
status = Xlib.XGetWindowProperty(
display,
w,
wm_name,
0,
65536,
False,
utf8atom,
byref(actual_type_return),
byref(actual_format_return),
byref(nitems_return),
byref(bytes_after_return),
byref(prop_return)
)
print(nitems_return.value) # returns 0
# empty string as result
print( 'Prop', ''.join([ chr(c) for c in prop_return[:bytes_after_return.value] ]) )
Xlib.XFree(prop_return)
print('#'*10)
# -------
Xlib = CDLL("libX11.so.6")
display = Xlib.XOpenDisplay(None)
if display == 0:
sys.exit(2)
w = Xlib.XRootWindow(display, c_int(0))
root = c_ulong()
children = POINTER(c_ulong)()
parent = c_ulong()
nchildren = c_uint()
Xlib.XQueryTree(display, w, byref(root), byref(parent), byref(children), byref(nchildren))
for i in range(nchildren.value):
print("Child:",children[i])
xgetwindowproperty(display,children[i])

PaddingFIFOQueue.enqueue_many throwing Value Error: Shapes must be equal rank

I have an RNN model, to which I'm trying to feed in varying length input sequences through input pipelines randomly sampling from multiple TF record files containing serialized SequenceExamples, batch padding and shuffling across multiple batches
each sequence example has 3 elements, as length: constant, input:1-D array, labels:1-D array
Procedure is as follows
def read_file_queue(self,filename_queue):
reader = tf.TFRecordReader()
key, ex = reader.read(filename_queue)
context_features = {
"seq-len": tf.FixedLenFeature([],dtype=tf.int64)
}
sequence_features = {
"tokens": tf.FixedLenSequenceFeature([],dtype=tf.int64),
"labels": tf.FixedLenSequenceFeature([],dtype=tf.int64)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(serialized=ex,
context_features=context_features,
sequence_features=sequence_features)
return context_parsed["seq-len"], sequence_parsed["tokens"],sequence_parsed["labels"]
def get_batch_data(self):
fqueue = tf.train.string_input_producer(self.data_filelist,
shuffle=True,
num_epochs=self.num_epochs)
# read from multiple tf records as defined by read_threads
ex = [self.read_file_fmt(fqueue) for _ in range(self.read_threads)]
print(ex)
# ex = self.read_file_fmt(fqueue)
pad_output = self.padding_pipeline(ex)
shuffle_output = self.shuffle_pipeline(pad_output)
return shuffle_output
def padding_pipeline(self, input):
padding_queue = tf.PaddingFIFOQueue(
capacity=self.pad_capacity,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=[[], [None], [None]])
# use enqueue_many instead enqueue because
# the input is list of tuples from each tf record reader thread
padding_enqueue_op = padding_queue.enqueue_many(input) # <<< !!!!! error here !!!!!
padding_queue_runner = tf.train.QueueRunner(padding_queue, [padding_enqueue_op] * self.pad_threads)
tf.train.add_queue_runner(padding_queue_runner)
padding_dequeue_op = padding_queue.dequeue_up_to(self.batch_size)
return padding_dequeue_op
def shuffle_pipeline(self,input):
shuffle_queue = tf.RandomShuffleQueue(
capacity=self.shuffle_capacity,
min_after_dequeue=self.shuffle_min_after_dequeue,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=None)
shuffle_enqueue_op = shuffle_queue.enqueue(input)
shuffle_queue_runner = tf.train.QueueRunner(
shuffle_queue, [shuffle_enqueue_op] * self.shuffle_threads)
tf.train.add_queue_runner(shuffle_queue_runner)
shuffle_dequeue_op = shuffle_queue.dequeue()
return shuffle_dequeue_op
For which I'm getting the following error:
ValueError: Shapes must be equal rank, but are 0 and 1 From merging
shape 0 with other shapes. for
'padding_fifo_queue_EnqueueMany/component_0' (op: 'Pack') with input
shapes: [], [?], [?].
I'm sure I'm doing something silly here, however, I could not find what is that im doing wrong..
Taking a hint from here, maybe you should have the following?
padding_queue = tf.PaddingFIFOQueue(
capacity=self.pad_capacity,
dtypes=[tf.int64, tf.int64, tf.int64],
shapes=[None, [None], [None]])
By the way, if you could add some basic script for generating random data in the format you are using, it would be easier to replicate. Thanks.

Google Matrix API - python return Nonetype error

"Update"
*Finally resolved the issue, changed the try except to include TypeError and also use pass instead of continue in the except.
"End of update"
I wrote code to search for distance between two locations using Google Distance Matrix API. The origin location are fixed, however for the destination, I get it from an xlsx file. I was expecting to get Dictionary with Destination as the Key and the distance as value. When I run the code below, after certain loop I'm stumbled with this error code:
TypeError: Expected a lat/lng dict or tuple, but got NoneType
Can you help me understand the cause of the error? Here is the code (pygmap.py):
import googlemaps
import openpyxl
#get origin and destination locations
def cleanWB(file_path):
destination = list()
wb = openpyxl.load_workbook(filename=file_path)
ws = wb.get_sheet_by_name('Sheet1')
for i in range(ws.max_row):
cellValueLocation = ws.cell(row=i+2,column=1).value
destination.append(cellValueLocation)
#remove duplicates from destination list
unique_location = list(set(destination))
return unique_location
def getDistance(origin, destination):
#Google distance matrix API key
gmaps = googlemaps.Client(key = 'INSERT API KEY')
distance = gmaps.distance_matrix(origin, destination)
distance_status = distance['rows'][0]['elements'][0]['status']
if distance_status != 'ZERO_RESULTS':
jDistance = distance['rows'][0]['elements'][0]
distance_location = jDistance['distance']['value']
else:
distance_location = 0
return distance_location
And I run it using this code:
import pygmap
unique_location = pygmap.cleanWB('C:/Users/an_id/Documents/location.xlsx')
origin = 'alam sutera'
result = {}
for i in range(len(unique_location)):
try:
result[unique_location[i]] = pygmap.getDistance(origin, unique_location[i])
except (KeyError, TypeError):
pass
If I print results it will show that I have successfully get 46 results
result
{'Pondok Pinang': 25905, 'Jatinegara Kaum': 40453, 'Serdang': 1623167, 'Jatiasih
': 44737, 'Tanah Sereal': 77874, 'Jatikarya': 48399, 'Duri Kepa': 20716, 'Mampan
g Prapatan': 31880, 'Pondok Pucung': 12592, 'Johar Baru': 46791, 'Karet': 26889,
'Bukit Duri': 34039, 'Sukamaju': 55333, 'Pasir Gunung Selatan': 42140, 'Pinangs
ia': 30471, 'Pinang Ranti': 38099, 'Bantar Gebang': 50778, 'Sukabumi Utara': 204
41, 'Kembangan Utara': 17708, 'Kwitang': 25860, 'Kuningan Barat': 31231, 'Cilodo
ng': 58879, 'Pademangan Barat': 32585, 'Kebon Kelapa': 23452, 'Mekar Jaya': 5381
0, 'Kampung Bali': 1188894, 'Pajang': 30008, 'Sukamaju Baru': 53708, 'Benda Baru
': 19965, 'Sukabumi Selatan': 19095, 'Gandaria Utara': 28429, 'Setia Mulya': 635
34, 'Rawajati': 31724, 'Cireundeu': 28220, 'Cimuning': 55712, 'Lebak Bulus': 273
61, 'Kayuringin Jaya': 47560, 'Kedaung Kali Angke': 19171, 'Pagedangan': 16791,
'Karang Anyar': 171165, 'Petukangan Selatan': 18959, 'Rawabadak Selatan': 42765,
'Bojong Sari Baru': 26978, 'Padurenan': 53216, 'Jati Mekar': 2594703, 'Jatirang
ga': 51119}
Resolved the issue to include TypeError in the Try Except. And also use pass instead of continue
import pygmap
unique_location = pygmap.cleanWB('C:/Users/an_id/Documents/location.xlsx')
origin = 'alam sutera'
result = {}
#get getPlace
for i in range(len(unique_location)):
try:
result[unique_location[i]] = pygmap.getDistance(origin, unique_location[i])
except (KeyError, TypeError):
pass
I skipped some locations using this solution though.

QTreeView crashing with no apparent reason

I introduced a treeview in the GUI of the program I'm making and since it crashes when I attempt to change its model once it has been set.
The course of action is:
load the file using a file dialogue
clearing the models on the interface objects (tables and treeview). The first time the treeview is not affected since there is no model in
it.
Populate the treeview model.
other stuff not related to the issue.
The problematic functions are;
The file loading procedure:
def open_file(self):
"""
Open a file
:return:
"""
print("actionOpen_file_click")
# declare the dialog
# file_dialog = QtGui.QFileDialog(self)
# declare the allowed file types
files_types = "Excel 97 (*.xls);;Excel (*.xlsx);;DigSILENT (*.dgs);;MATPOWER (*.m)"
# call dialog to select the file
filename, type_selected = QtGui.QFileDialog.getOpenFileNameAndFilter(self, 'Open file',
self.project_directory, files_types)
if len(filename) > 0:
self.project_directory = os.path.dirname(filename)
print(filename)
self.circuit = Circuit(filename, True)
# set data structures list model
self.ui.dataStructuresListView.setModel(self.available_data_structures_listModel)
# set the first index
index = self.available_data_structures_listModel.index(0, 0, QtCore.QModelIndex())
self.ui.dataStructuresListView.setCurrentIndex(index)
# clean
self.clean_GUI()
# load table
self.display_objects_table()
# draw graph
self.ui.gridPlot.setTitle(os.path.basename(filename))
self.re_plot()
# show times
if self.circuit.time_series is not None:
if self.circuit.time_series.is_ready():
self.set_time_comboboxes()
# tree view at the results
self.set_results_treeview_structure()
# populate editors
self.populate_editors_defaults()
The treeview model assignation:
def set_results_treeview_structure(self):
"""
Sets the results treeview data structure
#return:
"""
# self.ui.results_treeView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
model = QtGui.QStandardItemModel()
# model.setHorizontalHeaderLabels(['Elements'])
self.ui.results_treeView.setModel(model)
# self.ui.results_treeView.setUniformRowHeights(True)
def pass_to_QStandardItem_list(list_):
res = list()
for elm in list_:
elm1 = QtGui.QStandardItem(elm)
elm1.setEditable(False)
res.append(elm1)
return res
bus_results = pass_to_QStandardItem_list(['Voltages (p.u.)', 'Voltages (kV)'])
per_bus_results = pass_to_QStandardItem_list(['Voltage (p.u.) series', 'Voltage (kV) series',
'Active power (MW)', 'Reactive power (MVar)',
'Active and reactive power (MW, MVar)', 'Aparent power (MVA)',
'S-V curve', 'Q-V curve'])
branches_results = pass_to_QStandardItem_list(['Loading (%)', 'Current (p.u.)',
'Current (kA)', 'Losses (MVA)'])
per_branch_results = pass_to_QStandardItem_list(['Loading (%) series', 'Current (p.u.) series',
'Current (kA) series', 'Losses (MVA) series'])
generator_results = pass_to_QStandardItem_list(['Reactive power (p.u.)', 'Reactive power (MVar)'])
per_generator_results = pass_to_QStandardItem_list(['Reactive power (p.u.) series',
'Reactive power (MVar) series'])
self.family_results_per_family = dict()
# nodes
buses = QtGui.QStandardItem('Buses')
buses.setEditable(False)
buses.appendRows(bus_results)
self.family_results_per_family[0] = len(bus_results)
names = self.circuit.bus_names
for name in names:
bus = QtGui.QStandardItem(name)
bus.appendRows(per_bus_results)
bus.setEditable(False)
buses.appendRow(bus)
# branches
branches = QtGui.QStandardItem('Branches')
branches.setEditable(False)
branches.appendRows(branches_results)
self.family_results_per_family[1] = len(branches_results)
names = self.circuit.branch_names
for name in names:
branch = QtGui.QStandardItem(name)
branch.appendRows(per_branch_results)
branch.setEditable(False)
branches.appendRow(branch)
# generators
generators = QtGui.QStandardItem('Generators')
generators.setEditable(False)
generators.appendRows(generator_results)
self.family_results_per_family[2] = len(generator_results)
names = self.circuit.gen_names
for name in names:
gen = QtGui.QStandardItem(name)
gen.appendRows(per_generator_results)
gen.setEditable(False)
generators.appendRow(gen)
model.appendRow(buses)
model.appendRow(branches)
model.appendRow(generators)
And the GUI "cleaning":
def clean_GUI(self):
"""
Initializes the comboboxes and tables
Returns:
"""
self.ui.tableView.setModel(None)
if self.ui.results_treeView.model() is not None:
self.ui.results_treeView.model().clear()
self.ui.profile_time_selection_comboBox.clear()
self.ui.results_time_selection_comboBox.clear()
self.ui.gridPlot.clear()
The complete code can be seen here
I have seen that this behavior is usually triggered by calls outside the GUI thread by I don think this is the case.
I'd appreciate if someone could point out the problem. Again the complate code for test is here.
The solution to this in my case has been the following:
The QStandardItemModel() variable called model in the code was turned into a class global variable self.tree_model
When I want to replace the treeview object model, I delete the global tree_model with del self.tree_model
Then I re-create the global model with self.tree_model = QStandardItemModel()
This way the TreeView object model is effectively replaced without crashing...

Resources