What can cause the CPU usage drop in the script moviepy FFMPEG - python-3.x

What would cause the exporting rate to drop or cpu usage to drop between these 2 scripts
This one exports with a rate of 15bit/s
clip1 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/01.mp4")
clip2 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/02.mp4")
clip3 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/03.mp4")
final = concatenate([clip1,
clip2.crossfadein(1),
clip3.crossfadein(1)],
padding=-1, method="compose")
final.write_videofile('myvideo.mp4')
This one drops down to only 3 bit/s
path = "C:\TBD\TBD\#Frikkie\T1234567\Export"
videolist = []
for clips in glob.glob(os.path.join(path, '**')):
print(clips)
if ".mp4" in clips:
videolist.append(VideoFileClip(clips).crossfadein(1))
print(videolist)
final = concatenate(videolist, padding=-1, method="compose")
final.write_videofile('myvideo.mp4')

Related

How to calculate active hours of an employee using face_recognition for attendance tracking

I am working on face recognition system for my academic project. I want to set the first time an employee was recognized as his first active time and the next time he is being recognized should be recorded as his last active time and then calculate the total active hours based on first active and last active time.
I tried with the following code but I'm getting only the current system time as the start time. can someone help me on what I am doing wrong.
Code:
data = pickle.loads(open(args["encodings"], "rb").read())
vs = VideoStream(src=0).start()
writers = None
time.sleep(2.0)
while True:
frame = vs.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=750)
r = frame.shape[1] / float(rgb.shape[1])
boxes = face_recognition.face_locations(rgb)
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
face_names = []
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
names.append(name)
if names != []:
for i in names:
first_active_time = datetime.now().strftime('%H:%M')
last_active_time = datetime.now().strftime('%H:%M')
difference = datetime.strptime(first_active_time, '%H:%M') - datetime.strptime(last_active_time, '%H:%M')
difference = difference.total_seconds()
total_hours = time.strftime("%H:%M", time.gmtime(difference))
face_names.append([i, first_active_time, last_active_time, total_hours])

Running MPI python script in MPI azure ml pipeline

I'm trying to run distributed python job through azure ML pipelines using MPIStep pipeline class, by referring to the below example link - https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/pipeline-style-transfer/pipeline-style-transfer.ipynb
I tried implemented the same but even I change the node count parameter in MpiStep class, while running the script the it shows size (i.e comm.Get_size()) as 1 always. Can you please help me in what I'm missing here. Is there any specific setup required on the cluster?
Code snippets:
Pipeline code snippet:
model_dir = model_ds.path('./'+saved_model_blob+'/',data_reference_name='saved_model_path').as_mount()
label_dir = model_ds.path('./'+model_label_blob+'/',data_reference_name='model_label_blob').as_mount()
input_images = result_ds.path('./'+score_blob_name+'/',data_reference_name='Input_images').as_mount()
output_container = 'abc'
inti_container = 'xyz'
distributed_batch_score_step = MpiStep(
name="batch_scoring",
source_directory=SCRIPT_FOLDER,
script_name="batch_scoring_script_mpi.py",
arguments=["--dataset_path", input_images,
"--model_name", model_dir,
"--label_dir", label_dir,
"--intermediate_data_container", inti_container,
"--output_container", output_container],
compute_target=gpu_cluster,
inputs=[input_images, model_dir,label_dir],
pip_packages=["tensorflow","tensorflow-gpu==1.13.1","pillow","azure-keyvault","azure-storage-blob"],
conda_packages=["mesa-libgl-cos6-x86_64","mpi4py==3.0.2","opencv=3.4.2","scikit-learn=0.21.2"],
use_gpu=True,
allow_reuse = False,
node_count = nodecount_param,
process_count_per_node = 1
)
Python Script code snippet:
def run(input_dataset,comm):
rank = comm.Get_rank()
size = comm.Get_size()
print("Rank:" , rank)
print("Size:", size) # shows always 1, even the input node count is >1
print(MPI.Get_processor_name())
file_names = get_file_names(args.dataset_path)
sorted(file_names)
partition_size = len(file_names) // size
print("partition_size-->",partition_size)
partitioned_filenames = file_names[rank * partition_size: (rank + 1) * partition_size]
print("RANK {} - is processing {} images out of the total {}".format(rank, len(partitioned_filenames),
len(file_names)))
# call to Function 01
# call to Function 02
img_names = score_df['image_name'].unique()
output_batch = pd.DataFrame()
for i in img_names:
# call to Function 3
output_batch = output_batch.append(pp_output, ignore_index=True)
output_paths_list = comm.gather(output_batch, root=0)
print("RANK {} - number of pre-aggregated output files {}".format(rank, len(output_batch)))
print("saved in", currentDT + '\\' + 'data.csv')
if rank == 0:
print("RANK {} - number of aggregated output files {}".format(rank, len(output_paths_list)))
print("RANK {} - end".format(rank))
if __name__ == "__main__":
with tf.device('/GPU:0'):
init()
comm = MPI.COMM_WORLD
run(args.dataset_path,comm)
Got to know the issue is due to package version, earlier it is installed via conda with conda_packages=["mpi4py==3.0.2"], it worked after changing the install through pip - pip_packages=["mpi4py"]

How to Play Sound in Odoo with Single Device Running

I have designed a method when product not found in using barcode scanning I put this code in that product not found.
#api.multi
def _product_sound(self)
PyAudio = pyaudio.PyAudio
bitrate = 8000
frq = 500
LENGTH = 2
if frq > bitrate:
bitrate = frq+100
numberofframe = int(bitrate * LENGTH)
restframe = numberofframe % bitrate
wave = ''
for x in range(numberofframe):
wave = wave+chr(int(math.sin(x/((bitrate/frq)/math.pi))*124+128))
for x in range(restframe):
wave = wave+chr(128)
p = PyAudio()
stream = p.open(format = p.get_format_from_width(1), channels = 1,rate = bitrate,output = True)
stream.write(wave)
stream.stop_stream()
stream.close()
p.terminate()
When I try this code in single system its work perfectly. but when I try to using in the different device that time sound not be generated.
So how to play sound in odoo with different system or current system ?

tensorflow workers start before the main thread could initialize variables

I am trying to use tf.train.batch to run enqueue images in multiple threads. When the number of threads is 1, the code works fine. But when I set a higher number of threads I receive an error:
Failed precondition: Attempting to use uninitialized value Variable
[[Node: Variable/read = Identity[T=DT_INT32, _class=["loc:#Variable"], _device="/job:localhost/replica:0/task:0/cpu:0"](Variable)]]
The main thread has to run for some time under one second to index the database of folders and put it into tensor.
I tried to use sess.run([some_image]) before running tf.train.bath loop. In that case workers fail in the background first with the same error, and after that I receive my images.
I tried to use time.sleep(), but it does not seem to be possible to delay the workers.
I tried adding a dependency to the batch:
g = tf.get_default_graph()
with g.control_dependencies([init_one,init_two]):
example_batch = tf.train.batch([my_image])
where init_one, and init_two are tf.initialize_all(variables) and tf.initialize_local_variables()
the most relevant issue I could find is at: https://github.com/openai/universe-starter-agent/issues/44
Is there a way I could ask the synchronize worker threads with the main thread so that they don't race first and die out ?
A similar and easy to reproduce error with variable initialization happens when set up the epoch counter to anything that is not None Are there any potential solutions ? I've added the code needed to reproduce the error below:
def index_the_database(database_path):
"""indexes av4 database and returns two tensors of filesystem path: ligand files, and protein files"""
ligand_file_list = []
receptor_file_list = []
for ligand_file in glob(os.path.join(database_path, "*_ligand.av4")):
receptor_file = "/".join(ligand_file.split("/")[:-1]) + "/" + ligand_file.split("/")[-1][:4] + '.av4'
if os.path.exists(receptor_file):
ligand_file_list.append(ligand_file)
receptor_file_list.append(receptor_file)
index_list = range(len(ligand_file_list))
return index_list,ligand_file_list, receptor_file_list
index_list,ligand_file_list,receptor_file_list = index_the_database(database_path)
ligand_files = tf.convert_to_tensor(ligand_file_list,dtype=tf.string)
receptor_files = tf.convert_to_tensor(receptor_file_list,dtype=tf.string)
filename_queue = tf.train.slice_input_producer([ligand_files,receptor_files],num_epochs=10,shuffle=True)
serialized_ligand = tf.read_file(filename_queue[0])
serialized_receptor = tf.read_file(filename_queue[1])
image_one = tf.reduce_sum(tf.exp(tf.decode_raw(serialized_receptor,tf.float32)))
image_batch = tf.train.batch([image_one],100,num_threads=100)
init_two = tf.initialize_all_variables()
init_one = tf.initialize_local_variables()
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
sess.run([init_one])
sess.run([init_two])
while True:
print "next"
sess.run([image_batch])

How to show the current audio volume of my microphone?

I'm completely new to QMultimedia. At the moment, I try to get the audio stream from the microphone in my webcam for further processing. Right now I just try to continuously show the volume level of the sound "heard" by the mic with a slider. So I googled some code together (found nearly 10 tons of examples how I can play an audio, but only a few blocks of C++ code about audio input) and got stuck.
This is my actual code:
import sys, time
from PyQt4 import Qt, QtGui, QtCore, QtMultimedia
class VolumeSlider(QtGui.QSlider):
def __init__(self, parent=None):
super(VolumeSlider, self).__init__(parent)
self.audio = None
self.volumeSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.volumeSlider.setTickInterval(1)
self.volumeSlider.setMaximum(100)
self.volumeSlider.setValue(49)
self.volumeSlider.show()
self.openMicStream()
# THIS IS WHAT I WANT - DOESN'T WORK
while True:
self.volumeSlider.setValue(self.audio.volume())
time.sleep(0.02)
def openMicStream( self ):
#audioInputDevices = QtMultimedia.QAudioDeviceInfo.availableDevices(QtMultimedia.QAudio.AudioInput)
#for d in audioInputDevices: d.deviceName()
info = QtMultimedia.QAudioDeviceInfo(QtMultimedia.QAudioDeviceInfo.defaultInputDevice())
print "Default audio input device:", info.deviceName()
audioFormat = QtMultimedia.QAudioFormat()
audioFormat.setFrequency(8000);
audioFormat.setChannels(1);
audioFormat.setSampleSize(8);
audioFormat.setCodec("audio/pcm");
audioFormat.setByteOrder(QtMultimedia.QAudioFormat.LittleEndian);
audioFormat.setSampleType(QtMultimedia.QAudioFormat.UnSignedInt);
audioDeviceInfo = QtMultimedia.QAudioDeviceInfo.defaultInputDevice();
if not audioDeviceInfo.isFormatSupported(audioFormat):
sys.stderr("default audioFormat not supported try to use nearest")
audioFormat = audioDeviceInfo.nearestFormat(audioFormat);
self.audioInput = QtMultimedia.QAudioInput(audioFormat);
fmtSupported = info.isFormatSupported(audioFormat)
print "Is the selected format supported?", fmtSupported
if not fmtSupported:
audioFormat = info.nearestFormat(audioFormat)
print "Is the nearest format supported?", info.isFormatSupported(audioFormat)
self.audio = QtMultimedia.QAudioInput(audioFormat, None)
self.audio.start()
if __name__ == "__main__":
app = Qt.QApplication(sys.argv)
x = VolumeSlider()
sys.exit(app.exec_())
Could anybody poke me in the head what I have to do at the "#THIS IS WHAT I WANT" place to calculate and show the current level of volume?
There is no inbuilt function for computing the current volume level of the input sound signal when recorded with QAudioInput neither in Qt 4 (QAudioInput documentation) nor in Qt 5.
But you could calculate it for yourself. The root-mean-square in a moving window of the signal is often used as a measure for current loudness. See How can I determine how loud a WAV file will sound? for more suggestions.
Solved it after a while of working on another parts. Now I can at least hear the sound out of the boxes, after I changed the openMicStream(self) to this:
def openMicStream( self ):
info = QAudioDeviceInfo(QAudioDeviceInfo.defaultInputDevice())
print "Default audioInput input device: ", info.deviceName()
audioFormat = QAudioFormat()
audioFormat.setFrequency(44100);
audioFormat.setChannels(1);
audioFormat.setSampleSize(16);
audioFormat.setCodec("audioInput/pcm");
audioFormat.setByteOrder(QAudioFormat.LittleEndian);
audioFormat.setSampleType(QAudioFormat.UnSignedInt);
audioDeviceInfo = QAudioDeviceInfo.defaultInputDevice();
if not audioDeviceInfo.isFormatSupported(audioFormat):
messages.error(__name__, "default audioFormat not supported try to use nearest")
audioFormat = audioDeviceInfo.nearestFormat(audioFormat);
print audioFormat.frequency()
print audioFormat.channels()
print audioFormat.sampleSize()
print audioFormat.codec()
print audioFormat.byteOrder()
print audioFormat.sampleType()
self.audioInput = QAudioInput(audioFormat);
audioFmtSupported = info.isFormatSupported(audioFormat)
messages.info(__name__, "Is the selected format supported?"+str(audioFmtSupported))
if not audioFmtSupported:
audioFormat = info.nearestFormat(audioFormat)
messages.info(__name__, "Is the nearest format supported?"+str(info.isFormatSupported(audioFormat)))
self.audioInput = QAudioInput(audioFormat, None)
self.audioOutput = QAudioOutput(audioFormat, None)
device = self.audioOutput.start()
self.audioInput.start(device)

Resources