I am trying to print output using sys.stdout but getting none on stdout. please check
# Read input from STDIN. Print output to STDOUT
import math
import os
import random
import re
import sys
def stdin(s):
if (len(s)>=2 and len(s)<=10000):
ev=[s[i] for i in range(len(s)) if i%2==0]
od=[s[i] for i in range(len(s)) if i%2!=0]
even=''.join(map(str,ev))
odd=''.join(map(str,od))
sys.stdout.write("{0} {1}".format(even,odd)) #print outpout using stdout but error got
sys.stdout.flush()
if __name__ == '__main__':
s = input().strip()
stdin(s)
By default python uses stdout when using the built in print() function. It will also read from stdin when using input(). I would suggest going that route as it will be less prone to error. Otherwise could you share the exact error message you are getting? The code is running without error for me.
Hello guys ı am trying to do desktop voice assistant for ubuntu.In my program ıt was working with
os.system("mpg123 audio.mp3")
How can ı use pydub module instead of this line?
but ı don't want to use system to play audio file for talk with me.I think it's slower. I want faster program and today ı was trying pydub module.
There is my program;
from gtts import gTTS
import speech_recognition as sr
import os
import webbrowser
import datetime
import time
import sys
import random
from pydub import AudioSegment
from pydub.playback import play
#sound = AudioSegment.from_mp3('hello.mp3')
#play(sound)
def talkToMe(audio):
print(audio)
tts = gTTS(text=audio, lang= "en")
tts.save("audio.mp3")
play("audio.mp3") #os.system("mpg123 audio.mp3")
def OurCommands():
r = sr.Recognizer()
with sr.Microphone() as source:
os("clear")
print("Ready for next command")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration = 1)
audio = r.listen(source)
try:
command = r.recognize_google(audio)
print("You said: " + command + "\n")
except sr.UnknownValueError:
print("Your last command not understand")
command = str(input("Command: "))
return command
#if statements..
def asistan(command):
if "open web browser" in command:
talkToMe("İt\'s opening")
webbrowser.open("www.google.com.tr")
elif "play music" in command:
mixer.init()
mixer.music.load('/path/to/music/')
mixer.music.play()
elif "stop music" in command:
mixer.music.stop()
elif "thank you" in command:
talkToMe("You're welcome")
elif "shutdown my computer" in command:
talkToMe("I will close after five second")
time.sleep(5)
os.system("shutdown now -h")
elif "open youtube" in command:
webbrowser.open("youtube.com")
hour = int(datetime.datetime.now().hour)
if hour >= 1 and hour <12:
talkToMe("Goodmorning")
elif hour >= 12 and hour < 16:
talkToMe("Good afternoon")
else:
talkToMe("Good night")
while True:
asistan(OurCommands())
but when ı want to use pydub module ı get an error like this;
AttributeError: 'str' object has no attribute 'raw_data'
I tried in my computer this module working for me and played mp3 files.
So now how can ı use pydub module in my program for computer to talk with me.
I think ı need audiosegment but how can ı use in my program do ı have to use another module?
or also ı can work with pydub module in my program? Thanks in advance:)
From the docs:
from pydub import AudioSegment
from pydub.playback import play
sound = AudioSegment.from_file("mysound.wav", format="wav")
play(sound)
Play doesn't expect a filename, it expects input like this. You want
sound = AudioSegment.from_file("audio.mp3", format="mp3")
play(sound)
I have a code which takes videos from an input folder, converts it into audio file(.wav) using ffmpeg.
It then converts the audio file to text by recording 30 seconds audio (dura=30) and converting it to text using google translate api.
The problem is that the code takes a lot of time to convert video to text and it drops first two words and some words after every 30 seconds.
import speech_recognition as sr
import sys
import shutil
from googletrans import Translator
from pathlib import Path
import os
import wave
def audio_to_text(self,video_lst,deploy_path,video_path,audio_path):
try:
txt_lst=[]
for video_file in video_lst:
file_part=video_file.split('.')
audio_path_mod = audio_path +'/'+ '.'.join(file_part[:-1])
dir_path=video_path+'.'.join(file_part[:-1])
self.createDirectory(audio_path_mod)
audio_file='.'.join(file_part[:-1])+'.wav'
command_ffmpeg='set PATH=%PATH%;'+deploy_path.replace('config','script')+'audio_video/ffmpeg/bin/'
command='ffmpeg -i '+video_path+'/'+video_file+' '+audio_path_mod+'/'+audio_file
os.system(command_ffmpeg)
os.system(command)
r=sr.Recognizer()
dura=30
lang='en'
wav_filename=audio_path_mod+'/'+audio_file
f = wave.open(wav_filename, 'r')
frames = f.getnframes()
rate = f.getframerate()
audio_duration = frames / float(rate)
final_text_lst=[]
counter=0
with sr.AudioFile(wav_filename) as source:
while counter<audio_duration:
audio=r.record(source,duration=dura)
counter+=dura
try:
str=r.recognize_google(audio)
final_text_lst.append(str)
except Exception as e:
print(e)
print('Text data generated..')
text_path=audio_path_mod+'/'+audio_file.replace('.wav','_audio_text.csv')
with open(text_path, 'w') as f:
f.write(' '.join(final_text_lst))
except Exception as e:
print(e)
Any help/suggestion would be valuable. Thanks in advance.
I use python cv2 module to join jpg frames into video, but I can't add audio to it. Is it possible to add audio to video in python without ffmpeg?
P.S. Sorry for my poor English
Use ffpyplayer to handle the audio part.
import cv2
import numpy as np
#ffpyplayer for playing audio
from ffpyplayer.player import MediaPlayer
video_path="../L1/images/Godwin.mp4"
def PlayVideo(video_path):
video=cv2.VideoCapture(video_path)
player = MediaPlayer(video_path)
while True:
grabbed, frame=video.read()
audio_frame, val = player.get_frame()
if not grabbed:
print("End of video")
break
if cv2.waitKey(28) & 0xFF == ord("q"):
break
cv2.imshow("Video", frame)
if val != 'eof' and audio_frame is not None:
#audio
img, t = audio_frame
video.release()
cv2.destroyAllWindows()
PlayVideo(video_path)
The sample code will work but you need to play around the cv2.waitKey(28) depending on the speed of your video.
This is how I am reading audio and video frames:
from moviepy.editor import *
from pafy import pafy
if __name__ == '__main__':
video = pafy.new('https://www.youtube.com/watch?v=K_IR90FthXQ')
stream = video.getbest(preftype='mp4')
video = VideoFileClip(stream.url)
audio = video.audio
for t, video_frame in video.iter_frames(with_times=True):
audio_frame = audio.get_frame(t)
print(audio_frame)
print(video_frame)
This code downloads YouTube video and returns raw frames as numpy arrays.
You can pass the file as an argument to the VideoFileClip instead of the URL.
You can use pygame for audio.
You need to initialize pygame.mixer module
And in the loop, add pygame.mixer.music.play()
But for that, you will need to choose audio file as well.
However, I have found better idea! You can use webbrowser module for playing videos (and because it would play on browser, you can hear sounds!)
import webbrowser
webbrowser.open("video.mp4")
import pygame
pygame.mixer.init()
pygame.mixer.music.load(
'c:Your_file')
pygame.mixer.music.play()
while True:
pygame.time.Clock().tick()
#used to show that you can do other stuff while playing audio
print("hi")
I tried pygame for playing wav file like this:
import pygame
pygame.init()
pygame.mixer.music.load("mysound.wav")
pygame.mixer.music.play()
pygame.event.wait()
but It change the voice and I don't know why!
I read this link solutions and can't solve my problem with playing wave file!
for this solution I dont know what should I import?
s = Sound()
s.read('sound.wav')
s.play()
and for this solution /dev/dsp dosen't exist in new version of linux :
from wave import open as waveOpen
from ossaudiodev import open as ossOpen
s = waveOpen('tada.wav','rb')
(nc,sw,fr,nf,comptype, compname) = s.getparams( )
dsp = ossOpen('/dev/dsp','w')
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
dsp.setparameters(AFMT_S16_NE, nc, fr)
data = s.readframes(nf)
s.close()
dsp.write(data)
dsp.close()
and when I tried pyglet It give me this error:
import pyglet
music = pyglet.resource.media('mysound.wav')
music.play()
pyglet.app.run()
--------------------------
nima#ca005 Desktop]$ python play.py
Traceback (most recent call last):
File "play.py", line 4, in <module>
music = pyglet.resource.media('mysound.wav')
File "/usr/lib/python2.7/site-packages/pyglet/resource.py", line 587, in media
return media.load(path, streaming=streaming)
File "/usr/lib/python2.7/site-packages/pyglet/media/__init__.py", line 1386, in load
source = _source_class(filename, file)
File "/usr/lib/python2.7/site-packages/pyglet/media/riff.py", line 194, in __init__
format = wave_form.get_format_chunk()
File "/usr/lib/python2.7/site-packages/pyglet/media/riff.py", line 174, in get_format_chunk
for chunk in self.get_chunks():
File "/usr/lib/python2.7/site-packages/pyglet/media/riff.py", line 110, in get_chunks
chunk = cls(self.file, name, length, offset)
File "/usr/lib/python2.7/site-packages/pyglet/media/riff.py", line 155, in __init__
raise RIFFFormatException('Size of format chunk is incorrect.')
pyglet.media.riff.RIFFFormatException: Size of format chunk is incorrect.
AL lib: ReleaseALC: 1 device not closed
You can use PyAudio. An example here on my Linux it works:
#!usr/bin/env python
#coding=utf-8
import pyaudio
import wave
#define stream chunk
chunk = 1024
#open a wav format music
f = wave.open(r"/usr/share/sounds/alsa/Rear_Center.wav","rb")
#instantiate PyAudio
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
#read data
data = f.readframes(chunk)
#play stream
while data:
stream.write(data)
data = f.readframes(chunk)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
Works for me on Windows:
https://pypi.org/project/playsound/
>>> from playsound import playsound
>>> playsound('/path/to/a/sound/file/you/want/to/play.wav')
NOTE: This has a bug in Windows where it doesn't close the stream.
I've added a PR for a fix here:
https://github.com/TaylorSMarks/playsound/pull/53/commits/53240d970aef483b38fc6d364a0ae0ad6f8bf9a0
The reason pygame changes your audio is mixer defaults to a 22k sample rate:
initialize the mixer module
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096): return None
Your wav is probably 8k. So when pygame plays it, it plays roughly twice as fast. So specify your wav frequency in the init.
Pyglet has some problems correctly reading RIFF headers. If you have a very basic wav file (with exactly a 16 byte fmt block) with no other information in the fmt chunk (like 'fact' data), it works. But it makes no provision for additional data in the chunks, so it's really not adhering to the RIFF interface specification.
PyGame has 2 different modules for playing sound and music, the pygame.mixer module and the pygame.mixer.music module. This module contains classes for loading Sound objects and controlling playback. The difference is explained in the documentation:
The difference between the music playback and regular Sound playback is that the music is streamed, and never actually loaded all at once. The mixer system only supports a single music stream at once.
If you want to play a single wav file, you have to initialize the module and create a pygame.mixer.Sound() object from the file. Invoke play() to start playing the file. Finally, you have to wait for the file to play.
Use get_length() to get the length of the sound in seconds and wait for the sound to finish:
(The argument to pygame.time.wait() is in milliseconds)
import pygame
pygame.mixer.init()
my_sound = pygame.mixer.Sound('mysound.wav')
my_sound.play()
pygame.time.wait(int(my_sound.get_length() * 1000))
Alternatively you can use pygame.mixer.get_busy to test if a sound is being mixed. Query the status of the mixer continuously in a loop:
import pygame
pygame.init()
pygame.mixer.init()
my_sound = pygame.mixer.Sound('mysound.wav')
my_sound.play()
while pygame.mixer.get_busy():
pygame.time.delay(10)
pygame.event.poll()
Windows
winsound
If you are a Windows user,the easiest way is to use winsound.You don't even need to install it.
Not recommended, too few functions
import winsound
winsound.PlaySound("Wet Hands.wav", winsound.SND_FILENAME)
# add winsound.SND_ASYNC flag if you want to wait for it.
# like winsound.PlaySound("Wet Hands.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
mp3play
If you are looking for more advanced functions, you can try mp3play.
Unluckily,mp3play is only available in Python2 and Windows.
If you want to use it on other platforms,use playsound despite its poor functions.If you want to use it in Python3,I will give you the modified version which is available on Python 3.(at the bottom of the answer)
Also,mp3play is really good at playing wave files, and it gives you more choices.
import time
import mp3play
music = mp3play.load("Wet Hands.wav")
music.play()
time.sleep(music.seconds())
Cross-platform
playsound
Playsound is very easy to use,but it is not recommended because you can't pause or get some infomation of the music, and errors often occurs.Unless other ways doesn't work at all, you may try this.
import playsound
playsound.playsound("Wet Hands.wav", block=True)
pygame
I'm using this code and it works on Ubuntu 22.04 after my test.
If it doesn't work on your machine, consider updating your pygame lib.
import pygame
pygame.mixer.init()
pygame.mixer.music.load("Wet Hands.wav")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pass
pyglet
This works on Windows but it doesn't work on my Ubuntu, so I can do nothing.
import pyglet
import time
sound = pyglet.media.load("Wet Hands.wav", "Wet Hands.wav")
sound.play()
time.sleep(sound.duration)
Conclusion
It seems that you are using Linux,so playsound may be your choice.My code maybe cannot solve your problem by using pygame and pyglet,because I always use Windows.If none of the solutions work on your machine,I suggest you run the program on Windows...
To other users seeing my answer, I have done many tests among many libraries,so if you are using Windows,you may try mp3play which can play both mp3 and wave files, and mp3play is the most pythonic, easy, light-weight and functional library.
mp3play in Python3
just copy the code below and create a file named mp3play.py in your working directory and paste the content.
import random
from ctypes import windll, c_buffer
class _mci:
def __init__(self):
self.w32mci = windll.winmm.mciSendStringA
self.w32mcierror = windll.winmm.mciGetErrorStringA
def send(self, command):
buffer = c_buffer(255)
command = command.encode(encoding="utf-8")
errorcode = self.w32mci(command, buffer, 254, 0)
if errorcode:
return errorcode, self.get_error(errorcode)
else:
return errorcode, buffer.value
def get_error(self, error):
error = int(error)
buffer = c_buffer(255)
self.w32mcierror(error, buffer, 254)
return buffer.value
def directsend(self, txt):
(err, buf) = self.send(txt)
# if err != 0:
# print('Error %s for "%s": %s' % (str(err), txt, buf))
return err, buf
class _AudioClip(object):
def __init__(self, filename):
filename = filename.replace('/', '\\')
self.filename = filename
self._alias = 'mp3_%s' % str(random.random())
self._mci = _mci()
self._mci.directsend(r'open "%s" alias %s' % (filename, self._alias))
self._mci.directsend('set %s time format milliseconds' % self._alias)
err, buf = self._mci.directsend('status %s length' % self._alias)
self._length_ms = int(buf)
def volume(self, level):
"""Sets the volume between 0 and 100."""
self._mci.directsend('setaudio %s volume to %d' %
(self._alias, level * 10))
def play(self, start_ms=None, end_ms=None):
start_ms = 0 if not start_ms else start_ms
end_ms = self.milliseconds() if not end_ms else end_ms
err, buf = self._mci.directsend('play %s from %d to %d'
% (self._alias, start_ms, end_ms))
def isplaying(self):
return self._mode() == 'playing'
def _mode(self):
err, buf = self._mci.directsend('status %s mode' % self._alias)
return buf
def pause(self):
self._mci.directsend('pause %s' % self._alias)
def unpause(self):
self._mci.directsend('resume %s' % self._alias)
def ispaused(self):
return self._mode() == 'paused'
def stop(self):
self._mci.directsend('stop %s' % self._alias)
self._mci.directsend('seek %s to start' % self._alias)
def milliseconds(self):
return self._length_ms
def __del__(self):
self._mci.directsend('close %s' % self._alias)
_PlatformSpecificAudioClip = _AudioClip
class AudioClip(object):
__slots__ = ['_clip']
def __init__(self, filename):
self._clip = _PlatformSpecificAudioClip(filename)
def play(self, start_ms=None, end_ms=None):
if end_ms is not None and end_ms < start_ms:
return
else:
return self._clip.play(start_ms, end_ms)
def volume(self, level):
assert 0 <= level <= 100
return self._clip.volume(level)
def isplaying(self):
return self._clip.isplaying()
def pause(self):
return self._clip.pause()
def unpause(self):
return self._clip.unpause()
def ispaused(self):
return self._clip.ispaused()
def stop(self):
return self._clip.stop()
def seconds(self):
return int(round(float(self.milliseconds()) / 1000))
def milliseconds(self):
return self._clip.milliseconds()
def load(filename):
"""Return an AudioClip for the given filename."""
return AudioClip(filename)