I have designed a method when product not found in using barcode scanning I put this code in that product not found.
#api.multi
def _product_sound(self)
PyAudio = pyaudio.PyAudio
bitrate = 8000
frq = 500
LENGTH = 2
if frq > bitrate:
bitrate = frq+100
numberofframe = int(bitrate * LENGTH)
restframe = numberofframe % bitrate
wave = ''
for x in range(numberofframe):
wave = wave+chr(int(math.sin(x/((bitrate/frq)/math.pi))*124+128))
for x in range(restframe):
wave = wave+chr(128)
p = PyAudio()
stream = p.open(format = p.get_format_from_width(1), channels = 1,rate = bitrate,output = True)
stream.write(wave)
stream.stop_stream()
stream.close()
p.terminate()
When I try this code in single system its work perfectly. but when I try to using in the different device that time sound not be generated.
So how to play sound in odoo with different system or current system ?
Related
What would cause the exporting rate to drop or cpu usage to drop between these 2 scripts
This one exports with a rate of 15bit/s
clip1 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/01.mp4")
clip2 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/02.mp4")
clip3 = VideoFileClip("C:/TBD/TBD/#Frikkie/T1234567/Export/03.mp4")
final = concatenate([clip1,
clip2.crossfadein(1),
clip3.crossfadein(1)],
padding=-1, method="compose")
final.write_videofile('myvideo.mp4')
This one drops down to only 3 bit/s
path = "C:\TBD\TBD\#Frikkie\T1234567\Export"
videolist = []
for clips in glob.glob(os.path.join(path, '**')):
print(clips)
if ".mp4" in clips:
videolist.append(VideoFileClip(clips).crossfadein(1))
print(videolist)
final = concatenate(videolist, padding=-1, method="compose")
final.write_videofile('myvideo.mp4')
How overlay audio onto other audio?
I want to make a FadeIO(..) feature for audio channels of clips in avisynth.
I have this script
video = DirectShowSource("Z:\video\vvv.mp4", fps=fps_count, audio=true, convertfps=true).AssumeFPS(fps_count, 1).ConvertToYV12()
audio = BlankClip(audio_rate=audio_sample_rate, channels=2, length = logo_timeout).ResampleAudio(audio_sample_rate)
blank = BlankClip(logo_timeout, res_width, res_height, pixel_type = "RGB32", fps = fps_count).ConvertToYV12()
blank = Overlay(blank, blank_logo, mode = "blend", x = 0, y = 0)
blank = AudioDub(blank, blank_logo).AssumeFPS(fps_count, 1)
And smooth overlaying of parts of videos is like this:
blank.Trim(0 * fps_count, transparent_overlay_latency * fps_count).Overlay(video.Trim(0 * fps_count, transparent_overlay_latency * fps_count), mode="blend", mask=logo.showalpha(), x = 0, y = 0)
But this works only for videos. The audio starts when second clip (video.Trim()) is already faded In. I want the sound of this video "grows up" , when the clip appears.
As the #SeedManc said in commentary under question:
avisynth.nl/index.php/Dissolve, it allows for gradual transition between two clips, both in terms of video and audio.
This method is very simple and is the answer for my question
I'm completely new to QMultimedia. At the moment, I try to get the audio stream from the microphone in my webcam for further processing. Right now I just try to continuously show the volume level of the sound "heard" by the mic with a slider. So I googled some code together (found nearly 10 tons of examples how I can play an audio, but only a few blocks of C++ code about audio input) and got stuck.
This is my actual code:
import sys, time
from PyQt4 import Qt, QtGui, QtCore, QtMultimedia
class VolumeSlider(QtGui.QSlider):
def __init__(self, parent=None):
super(VolumeSlider, self).__init__(parent)
self.audio = None
self.volumeSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.volumeSlider.setTickInterval(1)
self.volumeSlider.setMaximum(100)
self.volumeSlider.setValue(49)
self.volumeSlider.show()
self.openMicStream()
# THIS IS WHAT I WANT - DOESN'T WORK
while True:
self.volumeSlider.setValue(self.audio.volume())
time.sleep(0.02)
def openMicStream( self ):
#audioInputDevices = QtMultimedia.QAudioDeviceInfo.availableDevices(QtMultimedia.QAudio.AudioInput)
#for d in audioInputDevices: d.deviceName()
info = QtMultimedia.QAudioDeviceInfo(QtMultimedia.QAudioDeviceInfo.defaultInputDevice())
print "Default audio input device:", info.deviceName()
audioFormat = QtMultimedia.QAudioFormat()
audioFormat.setFrequency(8000);
audioFormat.setChannels(1);
audioFormat.setSampleSize(8);
audioFormat.setCodec("audio/pcm");
audioFormat.setByteOrder(QtMultimedia.QAudioFormat.LittleEndian);
audioFormat.setSampleType(QtMultimedia.QAudioFormat.UnSignedInt);
audioDeviceInfo = QtMultimedia.QAudioDeviceInfo.defaultInputDevice();
if not audioDeviceInfo.isFormatSupported(audioFormat):
sys.stderr("default audioFormat not supported try to use nearest")
audioFormat = audioDeviceInfo.nearestFormat(audioFormat);
self.audioInput = QtMultimedia.QAudioInput(audioFormat);
fmtSupported = info.isFormatSupported(audioFormat)
print "Is the selected format supported?", fmtSupported
if not fmtSupported:
audioFormat = info.nearestFormat(audioFormat)
print "Is the nearest format supported?", info.isFormatSupported(audioFormat)
self.audio = QtMultimedia.QAudioInput(audioFormat, None)
self.audio.start()
if __name__ == "__main__":
app = Qt.QApplication(sys.argv)
x = VolumeSlider()
sys.exit(app.exec_())
Could anybody poke me in the head what I have to do at the "#THIS IS WHAT I WANT" place to calculate and show the current level of volume?
There is no inbuilt function for computing the current volume level of the input sound signal when recorded with QAudioInput neither in Qt 4 (QAudioInput documentation) nor in Qt 5.
But you could calculate it for yourself. The root-mean-square in a moving window of the signal is often used as a measure for current loudness. See How can I determine how loud a WAV file will sound? for more suggestions.
Solved it after a while of working on another parts. Now I can at least hear the sound out of the boxes, after I changed the openMicStream(self) to this:
def openMicStream( self ):
info = QAudioDeviceInfo(QAudioDeviceInfo.defaultInputDevice())
print "Default audioInput input device: ", info.deviceName()
audioFormat = QAudioFormat()
audioFormat.setFrequency(44100);
audioFormat.setChannels(1);
audioFormat.setSampleSize(16);
audioFormat.setCodec("audioInput/pcm");
audioFormat.setByteOrder(QAudioFormat.LittleEndian);
audioFormat.setSampleType(QAudioFormat.UnSignedInt);
audioDeviceInfo = QAudioDeviceInfo.defaultInputDevice();
if not audioDeviceInfo.isFormatSupported(audioFormat):
messages.error(__name__, "default audioFormat not supported try to use nearest")
audioFormat = audioDeviceInfo.nearestFormat(audioFormat);
print audioFormat.frequency()
print audioFormat.channels()
print audioFormat.sampleSize()
print audioFormat.codec()
print audioFormat.byteOrder()
print audioFormat.sampleType()
self.audioInput = QAudioInput(audioFormat);
audioFmtSupported = info.isFormatSupported(audioFormat)
messages.info(__name__, "Is the selected format supported?"+str(audioFmtSupported))
if not audioFmtSupported:
audioFormat = info.nearestFormat(audioFormat)
messages.info(__name__, "Is the nearest format supported?"+str(info.isFormatSupported(audioFormat)))
self.audioInput = QAudioInput(audioFormat, None)
self.audioOutput = QAudioOutput(audioFormat, None)
device = self.audioOutput.start()
self.audioInput.start(device)
I want to know how can I do for integrating vision.VideoPlayer into a GUI Axes please, i'm using computer vision system i tried to integrate this tools into axis but i don't arrive, the size of real time video image is 640x480 RGB
videocam1 = vision.VideoPlayer;
videocam2 = vision.VideoPlayer;
while get(hObject,'Value')
frame1 = getdata(cam1,1,'uint8');
frame1 = double(frame1) ;
frame2 = getdata(cam2,1,'uint8');
frame2 = double(frame2);
m1 = min(min(min(frame1))) ; M1 = max(max(max(frame1))) ;
m2 = min(min(min(frame2))) ; M2 = max(max(max(frame2))) ;
frame01 = (frame1-m1)/(M1-m1) ;
frame02 = (frame2-m2)/(M2-m2) ;
% showFrameOnAxis(hAxes.axis1, videocam1.step(frame01));
% showFrameOnAxis(hAxes.axis2, videocam2.step(frame02));
videocam1.step(frame01);
videocam2.step(frame02);
nframe = nframe+1 ;
end
Here is an example called Video Display in a Custom User Interface that shows you how to do just that.
I have successfully wrote a code that will record a few seconds of audio and save it in the selected directory in python 2.7 using pyaudio, like so:
import pyaudio
import wave
import sys
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "%d_%d.wav" % (self.get('subject_nr'), self.get('count_inline_script'))
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
Now, i only recently started using Python 3.2 and i am wondering if there is a way to record sound like in the older version?
If you are on windows and your script only uses wave and pyAudio it is perfectly possible to run it with py3k.
wave is a module in the official distribution and windows binary installers for pyAudio can be obtained from here