Google Cloud Speech service is down? - speech-to-text

The problem is I am using v1 and the latest #0.10.2. It worked perfectly for 6 months (even v1beta) until yesterday when it just randomly (and silently!) stops working. I mean one call of streamingRecognize may go well and recognition starts, but then second or third call of streamingRecognize just don't receive any input (and I'm getting you're streaming to slow error). Using the same code!
And even in recognition starts, the quality is very low. Often leading to completely wrong results.
The problem started occuring after releasing V1 from beta, I suppose, but I'm not sure.
Environment details
OS: amazon linux 4.4.44-39.55.amzn1.x86_64
Node.js version: v7.2.1
npm version: 3.10.10
using google-cloud/speech#0.10.2
The code:
var Speech = require('#google-cloud/speech')({
credentials: require(_base + '/google_cloud_credential.json')
});
self.recognizeStream = Speech.streamingRecognize({
config: {
encoding: 'MULAW',
sampleRateHertz: 8000,
languageCode: "ru-RU",
},
singleUtterance: false,
interimResults: true
});
self.iStream.pipe(self.recognizeStream)
.on('error', function(err) {
logger.error('google-speech error:', err);
self.restartRecognizing(); //GOOGLE BUG: randomly crashes https://github.com/GoogleCloudPlatform/google-cloud-node/issues/1894
})
.on('end', function(err) {
logger.trace('google-speech end:', err);
})
.on('close', function(err) {
logger.error('google-speech close: ', err);
self.restartRecognizing(); //GOOGLE BUG: randomly crashes https://github.com/GoogleCloudPlatform/google-cloud-node/issues/1894
})
.on('data', function(data) {
//logger.warn(data);
require('tracer').setLevel('info');
if(data.results && data.results[0] && data.results[0].alternatives)
logger.warn("isFinal: %s, conf: %s, stab: %s, trans: %s, delay=%s", data.results[0].isFinal, data.results[0].alternatives[0].confidence, data.results[0].stability, data.results[0].alternatives[0].transcript, Number(self.thisDate-self.prevDate));
if(!self.isListeningEmitted && data && data.results && data.results[0] && !data.results[0].isFinal)
{
self.isListeningEmitted = true;
self.emit('started_hearing_speech');
}
if(data && data.results && data.results[0] && data.results[0].alternatives && !data.results[0].isFinal)
{
self.thisDate = Date.now();
if(!self.prevDate)
self.prevDate = self.thisDate;
logger.debug("isFinal: %s, conf: %s, stab: %s, trans: %s, delay=%s", data.results[0].isFinal, data.results[0].alternatives[0].confidence, data.results[0].stability, data.results[0].alternatives[0].transcript, Number(self.thisDate-self.prevDate));
self.prevDate = self.thisDate;
if(self.timer)
clearTimeout(self.timer);
if(!self.isFired)
self.timer = setTimeout(self.onRecognizedText.bind(self), 800, data.results[0].alternatives[0].transcript);
}
});
And also there are a lot of errors in my Google Cloud Console:
30 days
Also posted here: https://github.com/GoogleCloudPlatform/google-cloud-node/issues/2541

Related

OKD request ETIMEDOUT - nodejs # Production

I'm stuck past few days. I've googled but nothings working to fetch me the results. The following code works onto my local m/c all results are fetched in 4ms.
But then in production level, the following code throws gateway time out error.
The application is deployed onto OKD (node module- okd-api) cluster where my application is one of its pods.
Here, I'm fetching all the pods
let fetchListarr=[];
aws_app.get('/List', (req,res) =>
{
try
{
Promise.all(promisesArray).then(values => {
// do stuff with values here
res.send(values)
})
.catch((err)=>{console.log(err)});
}
catch (e){console.log( e);}
});
var WMArr=[];
var prom1 = new Promise(function(resolve, reject) {
let config = {
cluster:'my/url/to/openshift',
user: 'user',
password: 'password',
strictSSL: false
};
login(config)
.then(okd=>{
okd.namespace('namespace').pod.watch_all(pods=>{
pods.map((v)=> {
if(!WMArr.includes(v.object.metadata.labels.app))
{ let obj = {
TargetServiceName: v.object.metadata.labels.app,
Instance:
WMArr.lastIndexOf(v.object.metadata.labels.app) ===
WMArr.indexOf(v.object.metadata.labels.app)
? 1
: WMArr.lastIndexOf(v.object.metadata.labels.app) + 1,
Status: v.object.status.phase
};
fetchListarr.push(obj);
}
WMArr.push(v.object.metadata.labels.app);
});
})
setTimeout(function() {
resolve( fetchListarr);
}, 5000);
})
.catch(err=>{console.log(err)})
});
var promisesArray= [prom1];
Increasing the timeout won't do.
Can anyone please lemme know is the issue with the code?
Or as to where do I need to configure timeout setting (I'm new to using OKD(openshift) to deploy the app)

Google speech to text live stream, single_utterance is not working

I'm trying live stream speech to text using Google. I have installed node into my server.
I have successfully implemented it but I want google to recognize when the user stops to speaking. Google explained how to do that using single_utterance=true but it is not taking effect. Can you please tell what issue is there in the below code. Thank you!
var request = {
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
//profanityFilter: false,
enableWordTimeOffsets: true,
//single_utterance: true
// speechContexts: [{
// phrases: ["hoful","shwazil"]
// }] // add your own speech context for better recognition
},
interimResults: true, // If you want interim results, set this to true
singleUtterance: true
};
function startRecognitionStream(client, data) {
console.log(request);
recognizeStream = speechClient.streamingRecognize(request)
.on('error', console.error)
.on('data', (data) => {
process.stdout.write(
(data.results[0] && data.results[0].alternatives[0])
? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
: `\n\nReached transcription time limit, press Ctrl+C\n`);
client.emit('speechData', data);
// if end of utterance, let's restart stream
// this is a small hack. After 65 seconds of silence, the stream will still throw an error for speech length limit
if (data.results[0] && data.results[0].isFinal) {
stopRecognitionStream();
startRecognitionStream(client);
// console.log('restarted stream serverside');
}
})
.on('end_of_single_utterance', (data) => {
process.stdout.write('data ended');
console.log('data ended');
})
;
}
Thank you in advance!

How to send huge data (1 million size array of objects) from one service to another service in Nodejs (microservice)?

I am using microservice architecture. Let say there are two services i.e A and B. I am trying to request from service A to service B which fetch some data from database and give that data as response to service A. But when there is huge amount of data then service B is unable to send in response but it prints on console. I tried many things but none were worked. Please help me on this.
SERVICE A
function makePostRequest(url, data, cb) {
let postContents = {
headers: {
'content-type': 'application/json'
},
url: url,
form: data,
timeout: 1200000
}
console.log('POST ==> ', postContents)
request.post(postContents, function(err, response, body) {
console.log(err)
if (err) {
return cb({
code: httpStatus.serverError,
message: err
})
}
else if (response.statusCode != 200) {
return cb({
code: response.statusCode,
message: body
})
}
else {
console.log(body, 'bodybodybodyPOST')
try {
var data = JSON.parse(body);
if (data && typeof data == 'object')
return cb(null, data)
else
return cb({
code: httpStatus.serverError,
message: 'invalid response'
})
}
catch(Ex) {
console.log(Ex)
return cb({
code: httpStatus.serverError,
message: Ex
})
}
}
})
}
exports.myapi = (req, res) => {
makePostRequest(SERVICE-B-URL, POST-DATA, (e, d) => {
if (e) res.status(500).json({msg: 'please try later'})
else res.status(200).json({msg: 'data fetched', result: d})
})
}
SERVICE B
console.log(result)
console.log('sending response ...', resTotal)
res.status(200).json({
total: resTotal,
result: result,
condition: req.query
});
[ RowDataPacket {
f_stamp: 2019-05-25T05:17:48.000Z,
f_player_id: 33370333,
amount: -0.5,
f_param_notes: null,
f_money_type: 'R',
f_type: 84 },
RowDataPacket {
f_stamp: 2019-05-25T05:14:44.000Z,
f_player_id: 31946955,
amount: 30.9,
f_param_notes: null,
f_money_type: 'R',
f_type: 70 },
RowDataPacket {
f_stamp: 2019-05-25T05:14:41.000Z,
f_player_id: 31035703,
amount: 258,
f_param_notes: null,
f_money_type: 'R',
f_type: 70 },
... 163783 more items ]
sending response .... 163883
A solution is to combine nodejs streams and websockets, here an example:
Streaming data in node.js with ws and websocket-stream
Usually when you have a big amount of data you want to fetch a little portion at time and send it. In the other end you can collect those little pieces and reconstruct the bigger data.
With a single HTTP request, you usually can a 413 error (Payload too large).
In order to avoid this error you should setup the server to accept bigger payloads.
ExpressJS example (more info here: Error: request entity too large)
app.use(express.json({limit: '50mb'}));
Nginx example
server {
client_max_body_size 100M;
...
}

Stream audio to Dialogflow with chunks from browser

We're doing some experimenting with Dialogflow and we've run into a complete stop for the time being. We're trying to set up a browser client that streams audio in chunks to Dialogflow via the node v2beta1 version of the dialogflow npm package. We followed the example to get it running and it works fine when we use the node server to pick up the sound via extra software (sox), but we want to stream from the browser. So we've set up a small code snippet that picks up the MediaStream from the mic.
When the data event is triggerend we get a chunk (an arraybuffer) that we, in chunks, pass to our node server.
On the server we've followed this example: https://cloud.google.com/dialogflow-enterprise/docs/detect-intent-stream#detect-intent-text-nodejs. The only thing we do different is instead of using pump to chain streams, we just write our chunks to the sessionsClient.
streamingDetectIntent().write({ inputAudio: [chunk] })
During experimentation we received several errors that we solved. But at this point we pass our chunks and receive empty responses, during and at the end.
Is this a valid way of passing audio to dialogflow, or do we really need to set up a stream? We do not want to use the node server as an entry, it needs to be the browser. We will have full control.
Client
import getUserMedia from 'get-user-media-promise';
import MicrophoneStream from 'microphone-stream';
export const startVoiceStream = () => {
const microphoneStream = new MicrophoneStream();
getUserMedia({ video: false, audio: true })
.then(function(micStream) {
microphoneStream.setStream(micStream);
socket.emit('startMicStream');
state.streamingMic = true;
setTimeout(() => {
// Just closing the stream on a timer for now
socket.emit('endMicStream');
}, 5000);
})
.catch(function(error) {
console.log(error);
});
microphoneStream.on('data', function(chunk) {
if (state.streamingMic) {
socket.emit('micStreamData', chunk);
}
});
};
Server code is much longer so I think I'll spare the details, but these are the main parts.
const initialStreamRequest = {
session: sessions.sessionPath,
queryParams: {
session: sessions.sessionPath, //TODO: try to delete
},
queryInput: {
audioConfig: {
audioEncoding: 'AUDIO_ENCODING_LINEAR_16',
sampleRateHertz: '16000',
languageCode: 'en-US',
},
singleUtterance: false
},
};
const startRecognitionStream = socketClient => {
streamIntent = sessions.sessionClient
.streamingDetectIntent()
.on('error', error => {
console.error({ error });
socketClient.emit('streamError', error);
})
.on('data', data => {
socketClient.emit('debug', { message: 'STREAM "ON DATA"', data });
if (data.recognitionResult) {
socketClient.emit(
'playerTranscript',
data.recognitionResult.transcript,
);
console.log(
`#Intermediate transcript : ${data.recognitionResult.transcript}`,
);
} else {
socketClient.emit('streamAudioResponse', data);
}
});
streamIntent.write(initialStreamRequest);
};
socket.on('micStreamData', data => {
if (streamIntent !== null) {
stop = true;
streamIntent.write({ inputAudio: data });
}
});

Gphoto2 node ### An error occurred in the io-library ('Could not claim the USB device')

I am Working on Connectivity between RaspberryPi 3 and DSLR Camera (Canon 1300 D). When I run command for capture image , first time is working and when I run again I am having following issue:
An error occurred in the io-library ('Could not claim the USB device'): Could not claim interface 0 (Device or resource busy). Make sure no other program (gvfs-gphoto2-volume-monitor) or kernel module (such as sdc2xx, stv680, spca50x) is using the device and you have read/write access to the device.
Please give me solution for "How to communicate Raspberry Pi 3 with DSLR using NodeJs ?"
Code Example:
app.post('/onDemand', function(req, res) {
GPhoto.list(function (list) {
console.log('List:', list);
if (list.length === 0) return;
var camera = list[0];
camera.takePicture({download: true,keep: true}, function (er, data) {
fs.writeFileSync(__dirname + '/input/picture1.jpg', data);
var filePath = "./input/picture1.jpg";
var params = {
Bucket: 'marzs',
Body : fs.createReadStream(filePath),
Key : "marzs/"+Date.now()+"_"+path.basename(filePath)
};
s3.putObject(params, function (err, data) {
if (err) {
console.log('ERROR MSG: ', err);
res.status(500).send(err);
} else {
console.log('Successfully uploaded data');
res.status(200).send({ imageURL: data.Location });
}
res.status(200).send({ imageURL: data.Location });
});
});
});
});
Thanks in Advance.
Yogesh Waghmare
We need to install libusb on server and run following command.
gphoto2 --get-config=capturetarget
gphoto2 --set-config=capturetarget=1
gphoto2 --set-config shutterspeed=bulb
gphoto2 --wait-event=2s --set-config eosremoterelease=Immediate --wait-event=5s --set-config eosremoterelease=Off --wait-event-and-download=5s
After that we need to exit process with "process.exit();" after completion of process. and run via forever command
now code running properly.
Thanks & Regards,
Yogesh Waghmare

Resources