from gtts import gTTS
import os
import random
with open('symbols.txt', 'r') as file:
symbols = file.read().replace('\n', '')
def getunique():
for r, d, f in os.walk(path):
for file in f:
if file.endswith('.mp3'):
unique.add(file.split('.')[0])
def generate(unique):
chars = symbols
while True:
value = "".join(random.choice(chars) for _ in range(8))
if value not in unique:
language = 'en'
myobj = gTTS(text=value, lang=language, slow=True)
myobj.save("audio_captcha_training_set2/"+value+".mp3")
unique.add(value)
break
path = "C:\\Users\\DNS\\Desktop\\Audio Captcha\\Test Python code\\audio_captcha_training_set\\"
unique = set()
getunique()
for _ in range(20):
generate(unique)
Here is a code which reads a file in which there is characters from 0-9 and A-Z. This is generating audio for random 8 characters reading from file but it does not give any pause or delay while generating audio of characters. I want to add a pause of a second between each characters. Any help?
Related
in these days I asked a couple of questions related to string encode and file encode in Python but the real problem is harder then I supposed to be and now I have a clearer understanding of the problem.
Windows encoded text files in different formats depending by the language or the language group. So, I would be able to read a ini (text file) encoded in different formats because some keys contain strings that I need to display on forms and menues on the screen.
So I would like to write something (that has to work with any text file and encode format) similar to this code example:
from configparser import ConfigParser
import magic
def detect(iniFileName):
return magic.Magic(mime_encoding=True).from_file(iniFileName)
#---------------------------------------------------------------------------
encoding = detect('config.ini')
ini = ConfigParser()
ini.read('config.ini', encoding)
title = ini.get('USERENTRY-FORM', 'TITLE')
'''
then title is passed to the tk form
'''
UserEntryForm.setTitle(title)
if _DEBUG == True:
print("USERENTRY-FORM title=",title)
This is the new solution that seems to work better because recognizes better the encode format, AIniParser is a wrapper class arund ConfigParser.
from chardet import detect
def chardetPrint(filename):
text = open(filename, 'rb').read()
print(filename,": ",detect(text))
#---------------------------------------------------------------------------
def chardet(filename):
text = open(filename, 'rb').read()
print(filename,": ",detect(text)['encoding']) # only for test
return detect(text)['encoding']
if __name__ == '__main__':
from ainiparser import AIniParser
def testIniRead(filename, section, key):
with open(filename, "rb") as f:
line = f.readline()
print("line 1: ", line)
encode = chardet(filename)
ini = AIniParser(filename, encoding=encode)
ini._open()
text = ini.getString(section, key)
print(text)
def main():
testIniRead("../cdata/config-cro-16.ini", "CGUIINTF", "game-turn-text")
testIniRead("../bat/output-lva-16/idata/config-lva-16.ini", "CGUIINTF", "game-turn-text")
testIniRead("../idata/config-lva-16.ini", "CGUIINTF", "game-turn-text")
testIniRead("../idata/domande.ini", "D2", "B")
#---------------------------------------------------------------------------
main()
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
This solution seems recognizes better how files are encoded but I am still testing it.
I have a csv file of several thousands of rows in multiple languages and I am thinking of using google cloud translate API to translate foreign language text into English. I have used a simple code to find out if everything works properly and the code is running smoothly.
from google.cloud import translate_v2 as translate
from time import sleep
from tqdm.notebook import tqdm
import multiprocessing as mp
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "file path.py"
translate_client = translate.Client()
text = "Good Morning, My Name is X."
target ="ja"
output = translate_client.translate(text, target_language=target)
print(output)
I want to now import csv file (using pandas) and translate the text and save the output as a csv file. But don't know how should I do that. Most of the examples I found stop at translating sample text just like above.
Can anyone suggest how can I do this?
To translate the text in csv file and save the output in same CSV file using Google Cloud Translation API, you can use below code:
import csv
from pathlib import Path
def translate_text(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
import six
from google.cloud import translate_v2 as translate
translate_client = translate.Client()
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(text, target_language=target)
# print(u"Text: {}".format(result["input"]))
# print(u"Translation: {}".format(result["translatedText"]))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
return result["translatedText"]
def main(input_file, translate_to):
"""
Translate a text file and save as a CSV file
using Google Cloud Translation API
"""
input_file_path = Path(input_file)
target_lang = translate_to
output_file_path = input_file_path.with_suffix('.csv')
with open(input_file_path) as f:
list_lines = f.readlines()
total_lines = len(list_lines)
with open(output_file_path, 'w') as csvfile:
my_writer = csv.writer(csvfile, delimiter=',', quotechar='"')
my_writer.writerow(['id', 'original_text', 'translated_text'])
for i, each_line in enumerate(list_lines):
line_id = f'{i + 1:04}'
original_text = each_line.strip('\n') # Strip for the writer(*).
translated_text = translate_text(
target=target_lang,
text=each_line)
my_writer.writerow([line_id, original_text, translated_text]) # (*)
# Progress monitor, non-essential.
print(f"""
{line_id}/{total_lines:04}
{original_text}
{translated_text}""")
if __name__ == '__main__':
origin_file = input('Input text file? >> ')
output_lang = input('Output language? >> ')
main(input_file=origin_file,
translate_to=output_lang)
Example:
Translated text in input file to target language “es”, the output got stored in the same csv file.
Input:
new.csv
How are you doing,Is everything fine there
Do it today
Output:
new.csv
id,original_text,translated_text
0001,"How are you doing,Is everything fine there",¿Cómo estás? ¿Está todo bien allí?
0002,Do it today,Hazlo hoy
I am using the IDLE editor and Python 3.7, and I would like to know why my code is not playing multiple audio files (sequentially) and sometimes not playing audio at all:
import re
import wave
import pyaudio
import _thread
import time
class TextToSpeech:
CHUNK = 1024
def __init__(self, words_pron_dict:str = 'cmudict-0.7b.txt'):
self._l = {}
self._load_words(words_pron_dict)
def _load_words(self, words_pron_dict:str):
with open(words_pron_dict, 'r') as file:
for line in file:
if not line.startswith(';;;'):
key, val = line.split(' ',2)
self._l[key] = re.findall(r"[A-Z]+",val)
def get_pronunciation(self, str_input):
list_pron = []
for word in re.findall(r"[\w']+",str_input.upper()):
if word in self._l:
list_pron += self._l[word]
print(list_pron)
delay=0
for pron in list_pron:
_thread.start_new_thread( TextToSpeech._play_audio, (pron,delay,))
delay += 0.145
def _play_audio(sound, delay):
try:
time.sleep(delay)
wf = wave.open("sounds/"+sound+".wav", 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(TextToSpeech.CHUNK)
while data:
stream.write(data)
data = wf.readframes(TextToSpeech.CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
return
except:
pass
if __name__ == '__main__':
tts = TextToSpeech()
while True:
tts.get_pronunciation(input('Enter a word or phrase: '))
I have a list of audio files that will play in a certain order, depending on what word I type in, when running the code. The code has no errors, but when I run it, when I type in a word, it only plays the first audio file needed (Example: When I type in "buy" it requires these two sounds: "b" and "ie" played together), but it only plays the first sound, "b", and sometimes no sound at all.
Why isn't it playing multiple audio files? I know that lots of people have been having this issue, but haven't been able to solve it.
Thank you for your help in advance, it is greatly appreciated :)
I am working on an AI, like Jarvis in python3. I am using the python speech_recognition module and pyaudio and everything else required acording to this page.
https://pypi.python.org/pypi/SpeechRecognition/
I have it on a raspberry pi now, before i was using my mac which was working fine. Now sometimes i get an error when running my Jarvis code on my Raspberry pi! Not always but frequetly enough to put a wrench in our progress. And not knowing when the error will come is a big problem and we need to get rid of it. Iḿ using a blue Snowball mic. Here is my code and my error if you could help, that would be great thanks!
Traceback (most recent call last):
File "/media/pi/TRAVELDRIVE/Jarvis(10.0).py", line 172, in <module>
with m as source: r.adjust_for_ambient_noise(source)
File "/usr/local/lib/python3.4/dist-packages/speech_recognition/__init__.py", line 140, in __enter__
input=True, # stream is an input stream
File "/usr/local/lib/python3.4/dist-packages/pyaudio.py", line 750, in open
stream = Stream(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/pyaudio.py", line 441, in __init__
self._stream = pa.open(**arguments)
OSError: [Errno -9999] Unanticipated host error
Jarvis.py
#JARVIS mark 10. python 3.5.1 version
#JUST.A.RATHER.VERY.INTELEGENT.SYSTEM.
##import speech_recognition
##import datetime
##import os
##import random
##import datetime
##import webbrowser
##import time
##import calendar
from difflib import SequenceMatcher
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tokenize import PunktSentenceTokenizer
import speech_recognition as sr
import sys
from time import sleep
import os
import random
r = sr.Recognizer()
m = sr.Microphone()
#Brain functions, vocab!
what_i_should_call_someone = [""]
Good_Things = ["love","sweet","nice","happy","fun","awesome","great"]
Bad_Things = ["death","kill","hurt","harm","discomfort","rape","pain","sad","depression","depressed","angry","mad","broken","raging","rage"]
# Words that you might says in the beginning of your input, for example: "um hey where are we!?!"
Slang_Words = ["um","uh","hm","eh"]
# Put all greetings in here
Static_Greetings = ["Hey","Hi","Hello"]
# Put your AIs Name and other names just in case.
Name = ["jarvis"]
posible_answer_key_words = ["becuase","yes","no"]
Chance_that_question_was_asked_1 = 0
Chance_that_question_was_asked_2 = 0
certainty_question_was_asked = 0
Me_statment_keywords = ["you","your","yours"]
You_statment_keywords = ["i","i'm","me"]
global certainty_person_is_talking_to_me
what_i_said = ("")
Just_asked_querstion = False
the_last_thing_i_said = ("")
the_last_thing_person_said = ("")
what_person_said = ("")
what_person_said_means = [""]
what_im_about_to_say = [""]
why_im_about_to_say_it = [""]
who_im_talking_to = [""]
how_i_feel = [""]
why_do_i_feel_the_way_i_do = [""]
what_i_am_thinking = ("")
# ways to describe the nouns last said
it_pronouns = ["it","they","she","he"]
# last person place or thing described spoken or descussed!
last_nouns = [""]
# Sample of random questions so Jarvis has somthing to index to know what a question is!
Sample_Questions = ["what is the weather like","where are we today","why did you do that","where is the dog","when are we going to leave","why do you hate me","what is the Answer to question 8",
"what is a dinosour","what do i do in an hour","why do we have to leave at 6.00", "When is the apointment","where did you go","why did you do that","how did he win","why won’t you help me",
"when did he find you","how do you get it","who does all the shipping","where do you buy stuff","why don’t you just find it in the target","why don't you buy stuff at target","where did you say it was",
"when did he grab the phone","what happened at seven am","did you take my phone","do you like me","do you know what happened yesterday","did it break when it dropped","does it hurt everyday",
"does the car break down often","can you drive me home","where did you find me"
"can it fly from here to target","could you find it for me"]
Sample_Greetings = ["hey","hello","hi","hey there","hi there","hello there","hey jarvis","hey dude"]
Question_Keyword_Answer = []
Int_Question_Keywords_In_Input = []
Possible_Question_Key_Words = ["whats","what","where","when","why","isn't","whats","who","should","would","could","can","do","does","can","can","did"]
Possible_Greeting_Key_Words = ["hey","hi","hello",Name]
# In this function: Analyze the user input find out if it's (Question, Answer, Command. Etc) and what is being: Asked, Commanded, ETC.
def Analyze():
def Analyze_For_Greeting():
def Greeting_Keyword_Check():
global Possible_Greeting_Key_Words
Int_Greeting_Keywords_In_Input = []
for words in what_person_said_l_wt:
if words in Possible_Greeting_Key_Words:
Int_Greeting_Keywords_In_Input.append(words)
Amount_Greeting_Keywords = (len(Int_Greeting_Keywords_In_Input))
if Amount_Greeting_Keywords > 0:
return True
def Greeting_Sentence_Match():
for Ran_Greeting in Sample_Greetings:
Greeting_Matcher = SequenceMatcher(None, Ran_Greeting, what_person_said_l).ratio()
if Greeting_Matcher > 0.5:
print (Greeting_Matcher)
print ("Similar to Greeting: "+Ran_Greeting)
return True
Greeting_Keyword_Check()
Greeting_Sentence_Match()
#In this function: determin if the input is a question or not.
def Analyze_For_Question():
# In this function: if there is atleast one question keyword in the user input then return true.
def Question_Keyword_Check():
global Possible_Question_Key_Words
Int_Question_Keywords_In_Input = []
for words in what_person_said_l_wt:
if words in Possible_Question_Key_Words:
Int_Question_Keywords_In_Input.append(words)
Amount_Question_keywords = (len(Int_Question_Keywords_In_Input))
if Amount_Question_keywords > 0:
return True
# In this function: if the users input is simular to other sample questions, return true.
def Question_Sentence_Match():
for Ran_Question in Sample_Questions:
Question_Matcher = SequenceMatcher(None, Ran_Question, what_person_said_l).ratio()
if Question_Matcher > 0.5:
print (Question_Matcher)
print ("Similar to Question: "+Ran_Question)
return True
# In this function: if the first word of the users input is a question keyword and there is a different question keyword in the input return true.
def Question_Verb_Noun_Check():
#if you say "hey jarvis" before somthing like a question or command it will still understand
try:
for word in what_person_said_l_wt:
if word in Static_Greetings or word in Name:
print (word)
Minus_Begin_Greet1 = what_person_said_l_wt.remove(word)
print (Minus_Begin_Greet1)
return True
except IndexError:
pass
Question_Keyword_Check()
Question_Sentence_Match()
Question_Verb_Noun_Check()
if Question_Keyword_Check()==True and Question_Sentence_Match()==True and Question_Verb_Noun_Check()==True:
return True
else:
return False
# All the funtions in Analyze
Analyze_For_Greeting()
Analyze_For_Question()
Conversation=True
Conversation_Started=False
while Conversation==True:
try:
if Conversation_Started==False:
#Greeting()
Conversation_Started=True
with m as source: r.adjust_for_ambient_noise(source)
print(format(r.energy_threshold))
print("Say something!") # just here for now and testing porposes so we know whats happening
with m as source: audio = r.listen(source)
print("Got it! Now to recognize it...")
try:
# recognize speech using Google Speech Recognition
value = r.recognize_google(audio)
# we need some special handling here to correctly print unicode characters to standard output
if str is bytes: # this version of Python uses bytes for strings (Python 2)
print(u"You said {}".format(value).encode("utf-8"))
else: # this version of Python uses unicode for strings (Python 3+)
print("You said {}".format(value))
what_person_said_l = value.lower()
what_person_said_l_wt = word_tokenize(what_person_said_l)
Analyze()
except sr.UnknownValueError:
print ("what was that?")
except sr.RequestError as e:
print("Uh oh! Sorry sir Couldn't request results from Google Speech Recognition service; {0}".format(e))
except KeyboardInterrupt:
pass
I am working on setting up some usable data for semantic analysis. I have a corpus of raw text data that I am iterating over. I open the data, read it as a string, split into a list, and prepare the data to be built into a dataset in a later function. However, when I build the dataset, my most common words end up being punctuation. I need to remove all punctuation from the list before I process the data further.
import os
import collections
import string
import sys
import tensorflow as tf
import numpy as np
from six.moves import xrange
totalvocab = []
#Loop for: loop through all files in 'Data' directory
for subdir, dirs, files in os.walk('Data'):
for file in files:
filepath = subdir + os.sep + file
print(filepath)
#Function for: open file, convert input to string, split into list
def read_data(filepath):
with open(filepath, 'r') as f:
data = tf.compat.as_str(f.read()).split()
return data
#Run function on data, add file data to full data set.
filevocab = read_data(filepath)
totalvocab.extend(filevocab)
filevocab_size = len(filevocab)
print('File vocabulary size: %s' % filevocab_size)
totalvocab_size = len(totalvocab)
print('Total vocabulary size: %s' % totalvocab_size)
If I do the following:
def read_data(filepath):
with open(filepath, 'r') as f:
data = tf.compat.as_str(f.read())
data.translate(string.punctuation)
data.split()
return data
The words are split into individual letters.
Any other methods I have attempted have errored out.
There are a couple of errors in the code:
str.split() and str.translate() do not modify in place.
str.translate() expects a mapping.
To fix:
def read_data(filepath):
with open(filepath, 'r') as f:
data = tf.compat.as_str(f.read())
data = data.translate(str.maketrans('', '', string.punctuation))
return data.split()
Removing punctuation, may or may not do what you want, e.g. hyphenated words will become concatenated. You could alternatively identify punctuation that you would replace with a space.