azure tts use go sdk generate in stream to wav less 46 byte - azure

i use this samplehttps://github.com/Microsoft/cognitive-services-speech-sdk-go/blob/1af83b0cf8fb/samples/synthesizer/to_audio_data_stream.go
and i wirte byte to file and i find file cant open
stream.SaveToWavFileAsync("fun.wav")
//use this func can save a file which can open
output
Synthesis started.
Synthesizing, audio chunk size 65582.
Synthesizing, audio chunk size 294510.
Synthesizing, audio chunk size 56446.
Synthesizing, audio chunk size 65582.
Synthesizing, audio chunk size 1710.
Synthesized, audio length 483646.
Read [483600] bytes from audio data stream.
the stream less 46 bytes

pcm need add wav head.
func PcmToWav(dst []byte, numChannel int, sampleRate int) (resDst []byte) {
byteDst := dst
longSampleRate := sampleRate
byteRate := 16 * sampleRate * numChannel / 8
totalAudioLen := len(byteDst)
totalDataLen := totalAudioLen + 36
var header = make([]byte, 44)
// RIFF/WAVE header
header[0] = 'R'
header[1] = 'I'
header[2] = 'F'
header[3] = 'F'
header[4] = byte(totalDataLen & 0xff)
header[5] = byte((totalDataLen >> 8) & 0xff)
header[6] = byte((totalDataLen >> 16) & 0xff)
header[7] = byte((totalDataLen >> 24) & 0xff)
//WAVE
header[8] = 'W'
header[9] = 'A'
header[10] = 'V'
header[11] = 'E'
// 'fmt ' chunk
header[12] = 'f'
header[13] = 'm'
header[14] = 't'
header[15] = ' '
// 4 bytes: size of 'fmt ' chunk
header[16] = 16
header[17] = 0
header[18] = 0
header[19] = 0
// format = 1
header[20] = 1
header[21] = 0
header[22] = byte(numChannel)
header[23] = 0
header[24] = byte(longSampleRate & 0xff)
header[25] = byte((longSampleRate >> 8) & 0xff)
header[26] = byte((longSampleRate >> 16) & 0xff)
header[27] = byte((longSampleRate >> 24) & 0xff)
header[28] = byte(byteRate & 0xff)
header[29] = byte((byteRate >> 8) & 0xff)
header[30] = byte((byteRate >> 16) & 0xff)
header[31] = byte((byteRate >> 24) & 0xff)
// block align
header[32] = byte(2 * 16 / 8)
header[33] = 0
// bits per sample
header[34] = 16
header[35] = 0
//data
header[36] = 'd'
header[37] = 'a'
header[38] = 't'
header[39] = 'a'
header[40] = byte(totalAudioLen & 0xff)
header[41] = byte((totalAudioLen >> 8) & 0xff)
header[42] = byte((totalAudioLen >> 16) & 0xff)
header[43] = byte((totalAudioLen >> 24) & 0xff)
resDst = append(header, dst...)
return
}

Related

Low of specific period (eg: 20 jan 2015 to 15 nov 2021)

This script draws ATH on chart with date and duration
I want to store low of period as well (like high shown in label) to show (high-low) range in the same label. The period does not end today, but as per script.
indicator("Previous Year(s) ATH", overlay = true)
num_years = input.int(1, title = "Number of years back", minval = 1)
var float ATH = high
var int ATH_time = time
var float[] ATH_vals = array.new_float()
var int[] ATH_time_vals = array.new_int()
var int[] ATH_time_change_vals = array.new_int()
ATH := math.max(ATH, high)
if ta.change(ATH) != 0
ATH_time := time
array.unshift(ATH_vals, ATH)
array.unshift(ATH_time_vals, time)
array.unshift(ATH_time_change_vals, ATH_time)
var float ATH1Y = na
if barstate.isconfirmed
search_time = time - 31536000000 * num_years
for i = 0 to array.size(ATH_time_vals) - 1
if array.get(ATH_time_vals, i) < search_time
ATH1Y := array.get(ATH_vals, i)
ATH1Y_time = array.get(ATH_time_change_vals, i)
y = year(ATH1Y_time)
m = month(ATH1Y_time)
d = dayofmonth(ATH1Y_time)
days_ago = (time - ATH1Y_time) / 86400000
date_text = str.tostring(y) + "/" + str.tostring(m) + "/" + str.tostring(d) + " : " + str.tostring(ATH1Y) + "\nDays Ago : " + str.tostring(math.round(days_ago, 2))
if ATH > ATH1Y and ATH[3] <= ATH1Y[3]
label.new(x = bar_index[3], y = ATH[3], text = date_text, style = label.style_label_lower_right)
break
ATH_val = ATH > ATH1Y ? na : ATH1Y
buy_signal = ATH > ATH1Y and ATH[3] <= ATH1Y[3]
plotshape(buy_signal, color = color.green, location = location.belowbar, size = size.small, style = shape.triangleup)
plot(ATH_val, title = "ATH", style = plot.style_linebr)

How do I print literal bytes in python?

I have this little script to encode and decode a header
HEADER_LENGTH = 4
class Header:
def encode(length: int, isRequest: bool):
if length > 0x7fffffff:
raise ValueError("length too big to handle")
byteList = [(isRequest << 7) + ((length >> 24) & 0x7f), (length >> 16) & 0xff ,(length >> 8) & 0xff, (length) & 0xff]
return byteList
def decode(head):
isRequest = bool(head[0] & 0x80)
byteList = [head[0] & 0x7f]
for i in range(1, HEADER_LENGTH):
byteList.append(head[i] & 0xff)
return (isRequest, byteList)
n = int(input("number: "))
r = bool(input("is request: "))
encoded = Header.encode(n, isRequest)
print(f"encoded: {encoded}; bytes: {bytes(encoded)}")
decoded = Header.decode(bytes(encoded)) #I transform them into bytes because this is a test script and the input is this one
print(f"is request: {decoded[0]}; decoded: {decoded[1]}")
but when I insert as input the number 123456789 and request to False I get as response the following output:
encoded: [7, 91, 205, 21]; bytes: b'\x07[\xcd\x15'
is request: False; decoded: [7, 91, 205, 21]
which is all correct, except for that b'\x07[\xcd\x15' that should be b'\x07\x5b\xcd\x15' and I don't know why it converts to that [, it doesn't do this only with the value 0x5b but with (apparently) all values that represent some printable character, I tried everything to make it print only the literal byte but nothing, am I doing something wrong? How can I make sure that it will print only the literal byte?

Problem with Serial.read() and Struct.pack / serial communication between Arduino and Python (3.x)

I have a problem while trying to send some values from Python 3.x to Arduino using Serial Communication.
It working fine when the value is smaller than 255, but when it's greater than 255, error will happen.
I'm using Struct.pack in Python and Serial.read() in Arduino
Python code:
import cv2
import numpy as np
from serial import Serial
import struct
arduinoData = Serial('com6', 9600)
cap = cv2.VideoCapture(0)
hand_cascade = cv2.CascadeClassifier('hand.xml')
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
handdetect = hand_cascade.detectMultiScale(gray, 1.6, 3)
for (x, y, w, h) in handdetect:
cv2.rectangle(frame, (x, y), (x + w, y + h), (127, 127, 0), 2)
xcenter = int(x + w/2)
ycenter = int(y + h/2)
#This is where i send values to serial port
arduinoData.write(struct.pack('>II',xcenter,ycenter))
cv2.imshow('Webcam', frame)
k = cv2.waitKey(25) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Arduino code:
int SerialData[8];
const int led = 7;
int xcenter;
int ycenter;
void setup(){
Serial.begin(9600);
pinMode(led, OUTPUT);
}
void loop(){
if (Serial.available() >= 8){
for (int i = 0; i < 8; i++){
SerialData[i] = Serial.read();
}
xcenter = (SerialData[0]*1000) + (SerialData[1]*100) + (SerialData[2]*10) + SerialData[3];
ycenter = (SerialData[4]*1000) + (SerialData[5]*100) + (SerialData[6]*10) + SerialData[7];
if (xcenter <= 200){
digitalWrite(led, LOW);
}
else if(xcenter > 200){
digitalWrite(led, HIGH);
}
//Serial.flush();
}
}
Like I said at the beginning of this topic, when xcenter > 200 && xcenter <= 255, the LED is turned ON (it's mean the code working fine).
But when xcenter > 255, the LED is OFF (something wrong here).
I think I already read all 8 bytes in Arduino code and used unsigned int >II in struct.pack, so what and where is my false?
I'm appreciate for all help! Thank you!
EDIT and FIXED.
"It doesn't pack int into digits (0-9), it packs them into bytes (0-255)"_
So here is the false:
xcenter = (SerialData[0]*1000) + (SerialData[1]*100) + (SerialData[2]*10) + SerialData[3];
ycenter = (SerialData[4]*1000) + (SerialData[5]*100) + (SerialData[6]*10) + SerialData[7];
Changed to this (for the large values):
long result = long((unsigned long(unsigned char(SerialData[0])) << 24) | (unsigned long(unsigned char(SerialData[1])) << 16)
| (unsigned long(unsigned char(SerialData[2])) << 8) | unsigned char(SerialData[3]));
Or changed to this (for the small values):
xcenter = (SerialData[2]*256) + SerialData[3];
ycenter = (SerialData[6]*256) + SerialData[7];
Or this (for the small values too):
int result = int((unsigned int(unsigned char(SerialData[2])) << 8) | unsigned char(SerialData[3]));
And the code gonna work perfectly!
This code will not work correctly for large values...
int xcenter = (SerialData[0]*256*256*256) + (SerialData[1]*256*256) + (SerialData[2]*256) + SerialData[3];
The problem is that the input is 4 bytes wide, which is a long integer, while the integer size on the arduino is only 2 bytes wide, which means that 256 * 256 = 0x10000 & 0xFFFF = 0 !
To make sure you do not run into problems for values wider than 2 bytes, one must use shift operations.
This gives:
long result = long((unsigned long(unsigned char(SerialData[0])) << 24) | (unsigned long(unsigned char(SerialData[1])) << 16)
| (unsigned long(unsigned char(SerialData[2])) << 8) | unsigned char(SerialData[3]));
Alternatively, if you do not expect large values, only use two bytes from the input. Make sure to do the calculus using unsigned values, or you may run into problems !!!
int result = int((unsigned int(unsigned char(SerialData[2])) << 8) | unsigned char(SerialData[3]));
This is very verbose, but it's safe for all input types. For example, the solution presented by the OP would not work if SerialData[] was a char array, which it should be, to avoid wasting memory.

IOS9 - cannot invoke 'count' with an argument list of type '(String)'

I just migrate to Xcode7/IOS9 and some part of my code are not compatible.
i get the following error from Xcode :
" cannot invoke 'count' with an argument list of type '(String)' "
This is my code :
let index = rgba.startIndex.advancedBy(1)
let hex = rgba.substringFromIndex(index)
let scanner = NSScanner(string: hex)
var hexValue: CUnsignedLongLong = 0
if scanner.scanHexLongLong(&hexValue)
{
if count(hex) == 6
{
red = CGFloat((hexValue & 0xFF0000) >> 16) / 255.0
green = CGFloat((hexValue & 0x00FF00) >> 8) / 255.0
blue = CGFloat(hexValue & 0x0000FF) / 255.0
}
else if count(hex) == 8
{
red = CGFloat((hexValue & 0xFF000000) >> 24) / 255.0
green = CGFloat((hexValue & 0x00FF0000) >> 16) / 255.0
blue = CGFloat((hexValue & 0x0000FF00) >> 8) / 255.0
alpha = CGFloat(hexValue & 0x000000FF) / 255.0
}
In swift2 they did some changes on count
this is the code for swift 1.2:
let test1 = "ajklsdlka"//random string
let length = count(test1)//character counting
since swift2 the code would have to be
let test1 = "ajklsdlka"//random string
let length = test1.characters.count//character counting
In order to be able to find the length of an array.
This behaviour mainly happens in swift 2.0 because String no longer conforms to SequenceType protocol while String.CharacterView does
Keep in mind that it also changed the way you are iterating in an array:
var password = "Meet me in St. Louis"
for character in password.characters {
if character == "e" {
print("found an e!")
} else {
}
}
So be really careful although most likely Xcode is going to give you an error for operations like these one.
So this is how your code should look in order to fix that error you are having (cannot invoke 'count' with an argument list of type '(String)'):
let index = rgba.startIndex.advancedBy(1)
let hex = rgba.substringFromIndex(index)
let scanner = NSScanner(string: hex)
var hexValue: CUnsignedLongLong = 0
if scanner.scanHexLongLong(&hexValue)
{
if hex.characters.count == 6 //notice the change here
{
red = CGFloat((hexValue & 0xFF0000) >> 16) / 255.0
green = CGFloat((hexValue & 0x00FF00) >> 8) / 255.0
blue = CGFloat(hexValue & 0x0000FF) / 255.0
}
else if hex.characters.count == 8 //and here
{
red = CGFloat((hexValue & 0xFF000000) >> 24) / 255.0
green = CGFloat((hexValue & 0x00FF0000) >> 16) / 255.0
blue = CGFloat((hexValue & 0x0000FF00) >> 8) / 255.0
alpha = CGFloat(hexValue & 0x000000FF) / 255.0
}

What is wrong with my function in Octave?

I just tried to create my first function in octave, it looks as follows:
function hui(x)
if(0 <= x && x <2)
retval = (1.5 * x + 2)
elseif(2<= x && x <4)
retval = (-x + 5)
elseif(4<= x && x < 6)
retval = (0.5 * x)
elseif(6<= x && x < 8)
retval = (x - 3)
elseif(8<= x && x <=10)
retval = (2 * x - 11)
endif
endfunction
but if I try to plot it using: x=0:0.1:10; plot(x, hui(x));
It shows a plot witch seems a little bit strange.
What did I wrong?
Thanks in advance
John
You'll have to pardon my rustiness with the package, but you need to change the code around a bit. Notably, the notation 0<=x is incorrect, and must be x>=0. Since hui is operating on a vector, I believe you need to take that into account when constructing your return value.
I'm sure there are more effective ways of vectorizing this, but basically, While stepping over the input vector, I added the latest value onto the return vector, and at the end lopping off the initial 0 that I had put in. I put in a sentinel value in case the input didn't fulfill one of the criteria (it was always taking the "else" path in your code, so putting something there could have alerted you to something being wrong).
function [retval] = hui(x)
retval = 0
for i=1:size(x,2)
if(x(i)>=0 && x(i) <2)
retval = [retval (1.5 * x(i) + 2)];
elseif( x(i)>=2 && x(i) <4)
retval = [retval (-1*x(i) + 5)];
elseif(x(i)>=4 && x(i) < 6)
retval = [retval (0.5 * x(i))];
elseif(x(i)>=6 && x(i) < 8)
retval = [retval (x(i) - 3)];
elseif(x(i)>=8 && x(i) <=10)
retval = [retval (2 * x(i) - 11)];
else
retval = -999;
endif
endfor
retval = retval(2:size(retval,2));
endfunction
x is a vector, so you either need to loop through it or vectorise your code to removing the need.
As you're using Octave, it's worth vectorising everything you possibly can. The easiest way I can think of doing this is:
x = 0:0.1:10;
y = x;
y(x >= 0 & x < 2) = x(x >= 0 & x < 2) * 1.5 + 2;
y(x >= 2 & x < 4) = x(x >= 2 & x < 4) * -1 + 5;
y(x >= 4 & x < 6) = x(x >= 4 & x < 6) * 0.5;
y(x >= 6 & x < 8) = x(x >= 6 & x < 8) - 3;
y(x >= 8 & x < 10) = x(x >= 8 & x < 10) * 2 - 11;
The y(x >= a & x < b) syntax is logical indexing. Alone, x >= a & x < b gives you a vector of logical values, but combined with another vector you get the values which meet the condition. Octave will also let you do assignments like this.

Resources