Amazon Lex SDK only running one command - node.js

I am attempting to use the AWS SDK version 3 and the Lex Runtime client V2. (Documentation)
What I don't understand is why can I not reuse my client instance to send multiple commands? See the code below. I am attempting to do a basic flow of putting a session, recognizing the text, and then deleting the session. When I comment the code out to run each command individually it works. Why does my application stop after running only the first client.send command? All the documentation ends up doing is creating a new client for each command because the examples are showing multiple services being used not the same service (example)
const {
LexRuntimeV2Client,
PutSessionCommand,
RecognizeTextCommand,
} = require("#aws-sdk/client-lex-runtime-v2");
const AWS = require("aws-sdk");
require("dotenv").config();
const sessionCustomId = "12345678";
const getLexClient = () => {
return new LexRuntimeV2Client({
credentials: new AWS.Credentials(
process.env.AWS_ACCESS_KEY,
process.env.AWS_SECRET_KEY
),
region: process.env.AWS_REGION,
});
};
const main = async () => {
await console.log(
`******* ENV VARS *********\n\n AWS_REGION : ${process.env.AWS_REGION}\n DEWEY_ALIAS_ID : ${process.env.DEWEY_ALIAS_ID}\n DEWEY_BOT_ID : ${process.env.DEWEY_BOT_ID}\n LOCAL_ID : ${process.env.LOCALE_ID}\n\n`
);
/******************************* PUT SESSION ***********************************/
// a client can be shared by different commands.
const client = await getLexClient();
const putSessionCommand = new PutSessionCommand({
accept: "text/plain; charset=utf-8",
botAliasId: process.env.DEWEY_ALIAS_ID,
botId: process.env.DEWEY_BOT_ID,
localeId: process.env.LOCALE_ID,
sessionState: {},
sessionId: sessionCustomId,
});
try {
const response = await client.send(putSessionCommand);
console.log("finished putting session\n");
console.log(response);
var chatSessionState = response.sessionState;
} catch (err) {
console.error(
"Error sending PutSessionCommand, error message: " + JSON.stringify(err)
);
}
/******************************* RECOGNIZE TEXT ***********************************/
//get new client
//var client = await getLexClient();
const recognizeTextCommand = new RecognizeTextCommand({
botAliasId: process.env.DEWEY_ALIAS_ID,
botId: process.env.DEWEY_BOT_ID,
localeId: process.env.LOCALE_ID,
sessionState: {},
sessionId: sessionCustomId,
text: "Create a case",
});
try {
const responseTwo = await client.send(recognizeTextCommand);
console.log("finished posting text input\n");
console.log(responseTwo);
} catch (err) {
console.error(
"Error sending RecognizeTextCommand, error message: " +
JSON.stringify(err)
);
}
/******************************* DELETE SESSION ***********************************/
const deleteSessionCommand = new DeleteSessionCommand({
botAliasId: process.env.DEWEY_ALIAS_ID,
botId: process.env.DEWEY_BOT_ID,
localeId: process.env.LOCALE_ID,
sessionId: sessionCustomId,
});
try {
const responseThree = await client.send(deleteSessionCommand);
console.log("finished deleting session\n");
console.log(responseThree);
} catch (err) {
console.error(
"Error sending DeletingSession, error message: " + JSON.stringify(err)
);
}
};
main();

Related

Delivering image from S3 to React client via Context API and Express server

I'm trying to download a photo from an AWS S3 bucket via an express server to serve to a react app but I'm not having much luck. Here are my (unsuccessful) attempts so far.
The Workflow is as follows:
Client requests photo after retrieving key from database via Context API
Request sent to express server route (important so as to hide the true location from the client)
Express server route requests blob file from AWS S3 bucket
Express server parses image to base64 and serves to client
Client updates state with new image
React Client
const [profilePic, setProfilePic] = useState('');
useEffect(() => {
await actions.getMediaSource(tempPhoto.key)
.then(resp => {
console.log('server resp: ', resp.data.data.newTest) // returns ����\u0000�\u0000\b\u0006\
const url = window.URL || window.webkitURL;
const blobUrl = url.createObjectURL(resp.data.data.newTest);
console.log("blob ", blobUrl);
setProfilePic({ ...profilePic, image : resp.data.data.newTest });
})
.catch(err => errors.push(err));
}
Context API - just axios wrapped into its own library
getMediaContents = async ( key ) => {
return await this.API.call(`http://localhost:5000/${MEDIA}/mediaitem/${key}`, "GET", null, true, this.state.accessToken, null);
}
Express server route
router.get("/mediaitem/:key", async (req, res, next) => {
try{
const { key } = req.params;
// Attempt 1 was to try with s3.getObject(downloadParams).createReadStream();
const readStream = getFileStream(key);
readStream.pipe(res);
// Attempt 2 - attempt to convert response to base 64 encoding
var data = await getFileStream(key);
var test = data.Body.toString("utf-8");
var container = '';
if ( data.Body ) {
container = data.Body.toString("utf-8");
} else {
container = undefined;
}
var buffer = (new Buffer.from(container));
var test = buffer.toString("base64");
require('fs').writeFileSync('../uploads', test); // it never wrote to this directory
console.log('conversion: ', test); // prints: 77+977+977+977+9AO+/vQAIBgYH - this doesn't look like base64 to me.
delete buffer;
res.status(201).json({ newTest: test });
} catch (err){
next(ApiError.internal(`Unexpected error > mediaData/:id GET -> Error: ${err.message}`));
return;
}
});
AWS S3 Library - I made my own library for using the s3 bucket as I'll need to use more functionality later.
const getFileStream = async (fileKey) => {
const downloadParams = {
Key: fileKey,
Bucket: bucketName
}
// This was attempt 1's return without async in the parameter
return s3.getObject(downloadParams).createReadStream();
// Attempt 2's intention was just to wait for the promise to be fulfilled.
return await s3.getObject(downloadParams).promise();
}
exports.getFileStream = getFileStream;
If you've gotten this far you may have realised that I've tried a couple of things from different sources and documentation but I'm not getting any further. I would really appreciate some pointers and advice on what I'm doing wrong and what I could improve on.
If any further information is needed then just let me know.
Thanks in advance for your time!
Maybe it be useful for you, that's how i get image from S3, and process image on server
Create temporary directory
createTmpDir(): Promise<string> {
return mkdtemp(path.join(os.tmpdir(), 'tmp-'));
}
Gets the file
readStream(path: string) {
return this.s3
.getObject({
Bucket: this.awsConfig.bucketName,
Key: path,
})
.createReadStream();
}
How i process file
async MainMethod(fileName){
const dir = await this.createTmpDir();
const serverPath = path.join(
dir,
fileName
);
await pipeline(
this.readStream(attachent.key),
fs.createWriteStream(serverPath + '.jpg')
);
const createFile= await sharp(serverPath + '.jpg')
.jpeg()
.resize({
width: 640,
fit: sharp.fit.inside,
})
.toFile(serverPath + '.jpeg');
const imageBuffer = fs.readFileSync(serverPath + '.jpeg');
//my manipulations
fs.rmSync(dir, { recursive: true, force: true }); //delete temporary folder
}

node js not behaving the same way on local and Google App Egine

I'm developing an app to upload .las file to cesium ion.
I have modified this code https://github.com/CesiumGS/cesium-ion-rest-api-examples/blob/main/tutorials/rest-api/index.js
To pass a file from the browser.
It's working flawlessly when I run npm start on my local env.
When I try the same thing on the app Engine, I do not get the message about where the process is at. It's does upload the file though. It's just I can't monitor what is going on.
To explain what is going on below, I send a file from the client, then it's catch by app.post("/upload"
Then, it create asset on Ion, and then upload to S3, then it tell ion it's finished, then it monitor the tiling on Ion.
Then I call every second app.post("/progress" That is sending back the stat of things to the client.
I think there is probably a logic I'm missing, something basic I make totally wrong. I'm using one single service for both the backend and the frontend. Can this be a part of the problem ?
const express = require('express');
const app = express();
const port = process.env.PORT || 3001;
const fileUpload = require("express-fileupload");
const cors = require('cors');
var path = require('path');
const AWS = require('aws-sdk');
const fs = require('fs');
const rawdatafr = require('./lang/fr.json');
const rawdataen = require('./lang/en.json');
const axios = require('axios').default;
const accessToken = process.env.REACT_APP_ION_TOKEN;
const environment = process.env.NODE_ENV || 'production';
var urlLang = (environment === 'development') ? 'client/src/lang/' : 'lang/';
console.log('urlLang ? '+urlLang);
app.use(cors());
app.use(fileUpload({
useTempFiles: true,
safeFileNames: false,
preserveExtension: true,
tempFileDir: 'temp/'
}));
'use strict';
var messageFromLoc = rawdataen;
var input = null;
var filename = null;
var srcType = 'POINT_CLOUD';
var message = null;
var needMonitoring = false;
var assetMetadata = null;
var finished = null;
function resetGlobalvar(){
message = null;
needMonitoring = false;
assetMetadata = null;
finished = null;
input = null;
filename = null;
srcType = 'POINT_CLOUD';
}
async function creat_asset(){
finished = false;
message = 'create asset';
axios.post('https://api.cesium.com/v1/assets', {
name: filename,
description: '',
type: '3DTILES',
options: {
position:[ 2.29, 48.85, 0.1],
sourceType: srcType,
}
},{
headers: { Authorization: `Bearer ${accessToken}` }
})
.then(function (response) {
message = 'created successfully :> send to s3';
sendtos3(response.data);
})
.catch(function (error) {
console.log(error);
message = error;
});
}
async function sendtos3(response){
console.log('Asset created.');
message = 'send to s3';
try{
const uploadLocation = response.uploadLocation;
const s3 = new AWS.S3({
apiVersion: '2006-03-01',
region: 'us-east-1',
signatureVersion: 'v4',
endpoint: uploadLocation.endpoint,
credentials: new AWS.Credentials(
uploadLocation.accessKey,
uploadLocation.secretAccessKey,
uploadLocation.sessionToken)
});
let params = {
Body: fs.createReadStream(input),
Bucket: uploadLocation.bucket,
Key: uploadLocation.prefix+filename
};
let s3Response = await s3.upload(params).on('httpUploadProgress', function (progress) {
message = `${messageFromLoc.upload}: ${((progress.loaded / progress.total) * 100).toFixed(2)}%`;
console.log(`Upload: ${((progress.loaded / progress.total) * 100).toFixed(2)}%`);
}).promise();
// request successed
console.log(`File uploaded to S3 at ${s3Response.Bucket} bucket. File location: ${s3Response.Location}`);
message = `File uploaded to S3 at ${s3Response.Bucket} bucket. File location: ${s3Response.Location}`;
step3(response);
// return s3Response.Location;
}
// request failed
catch (ex) {
console.error(ex);
message = ex;
}
}
async function step3(response){
const onComplete = response.onComplete;
assetMetadata = response.assetMetadata;
message = 'step3';
axios.post(onComplete.url, onComplete.fields,{
headers: { Authorization: `Bearer ${accessToken}` }
})
.then(function (response) {
message = 'step3 done';
monitorTiling(assetMetadata);
})
.catch(function (error) {
console.log(error);
message = error;
});
}
async function monitorTiling(assetMetadata){
// console.log(response);
const assetId = assetMetadata.id;
message = 'monitorTiling';
axios.get(`https://api.cesium.com/v1/assets/${assetId}`,{headers: { Authorization: `Bearer ${accessToken}` }})
.then(function (response) {
// handle success
console.log('monitorTiling - success');
var status = response.data.status;
message = 'Tiling - success';
if (status === 'COMPLETE') {
console.log('Asset tiled successfully');
console.log(`View in ion: https://cesium.com/ion/assets/${assetMetadata.id}`);
message = 'Asset tiled successfully';
needMonitoring = false;
finished = true;
} else if (status === 'DATA_ERROR') {
console.log('ion detected a problem with the uploaded data.');
message = 'ion detected a problem with the uploaded data.';
needMonitoring = false;
finished = true;
} else if (status === 'ERROR') {
console.log('An unknown tiling error occurred, please contact support#cesium.com.');
message = 'An unknown tiling error occurred, please contact support#cesium.com.';
needMonitoring = false;
finished = true;
} else {
needMonitoring = true;
if (status === 'NOT_STARTED') {
console.log('Tiling pipeline initializing.');
message = 'Tiling pipeline initializing.';
} else { // IN_PROGRESS
console.log(`Asset is ${assetMetadata.percentComplete}% complete.`);
message = `Asset is ${assetMetadata.percentComplete}% complete.`;
}
}
})
.catch(function (error) {
// handle error
console.log(error);
message =error;
})
}
/*------- LISTEN FOR CALL TO UPLOAD AND START THE UPLOAD PROCESS ----------*/
app.post("/upload", (req, res) => {
if (!req.files) {
res.send("File was not found");
message = 'File was not found';
return;
}
input = req.files.file.tempFilePath;
filename = req.files.file.name;
emptyTempFolder('temp', input.replace('temp/', ''));
var ext = path.extname(filename);
if(ext=='.zip'){
srcType = 'CITYGML';
}
/*------- START UPLOAD PROCESS ----------*/
creat_asset();
});
app.listen(port, () => {
console.log(`Example app listening at http://localhost:${port}`)
})
/*------- LISTEN FOR PROGRESS TO UPLOAD ASSET ----------*/
app.get("/progress", (req, res) => {
// lang = req.get('Accept-Language').substring(0, 2).toLowerCase();
// if(lang=='fr'){
// messageFromLoc = rawdatafr;
// }
console.log('message ='+message);
if(needMonitoring){
monitorTiling(assetMetadata);
}
res.json({ message: message, done: finished, myAssetMetadata: assetMetadata });
if(finished){
resetGlobalvar();
}
});
/*--------------STATIC ----------------*/
app.use(express.static( path.join(__dirname, 'build' )));
And my app.yaml is like this :
runtime: nodejs14
env: standard
includes:
- env_variables.yaml
instance_class: B1
service: my-app
basic_scaling:
max_instances: 25
idle_timeout: 60m
I think this is from your instance(s). You're using basic scaling with up to 25 instances.
It looks like a combination of the following is happening
a) When you send a request to /progress, a new instance of your App is created which means all of the global variables are starting from their default values (the initial value of message is null).
b) Other times, a request to /progress is handled by an existing instance which was already processing an upload request and that request has completed and so the message says completed
You don't have this problem on your local environment because only 1 instance runs.
To test this theory, modify your app.yaml and set max_instances: 1. This is supposed to force the App to only use 1 instance which means subsequent requests should use an existing instance (which has the updated state of your global variables)

Nodejs Elastic benastalk refused to connect to upsteam/ upsteam prematurely closed

I am getting the following errors when running my application in elastic beanstalk: [error] 3636#0: *295 upstream prematurely closed connection while reading response header from upstream and [error] 3636#0: *295 connect() failed (111: Connection refused) while connecting to upstream Its strange because if I hit those routes independently it works fine. It only appears to error when firing those routes from my vuex action.
The following is the log from the AWS elastic beanstalk.
The following is the network tab when it hits my FFmpeg route:
The following is the generate video action as fired from vuex.
async [GENERATE_VIDEO]({state, rootState, dispatch, commit}){
const username = rootState.user.currentUser.username;
const s3Id = rootState.templates.currentVideo.stock_s3_id;
const type = rootState.dataClay.fileFormat || state.type;
const vid = new Whammy.fromImageArray(state.captures, 30);
vid.lastModifiedDate = new Date();
vid.name = "canvasVideo.webm";
const data = new FormData();
const id = `${username}_${new Date().getTime()}`;
data.append("id", id);
data.append("upload", vid);
const projectId = await dispatch(INSERT_PROJECT);
await dispatch(UPLOAD_TEMP_FILE, data);
const key = await dispatch(CONVERT_FILE_TYPE, { id, username, type, projectId});
const role = rootState.user.currentUser.role;
state.file = `/api/files/${key}`;
let message;
if(role!='banner'){
message =`<p>Your video is ready.</p> Download`;
} else {
message = `<p>Your video is ready. You may download your file from your banner account</p>`;
const resolution = rootState.dataClay.matrix[0];
await dispatch(EXPORT_TO_BANNER, { s3Id, fileUrl: key, extension: `.${type}`, resolution});
}
And here are the api routes called in the actions.
async [UPLOAD_TEMP_FILE]({ commit }, data) {
try {
const response = await axios.post("/api/canvas-editor/upload-temp", data);
return response.data;
} catch (error) {
console.log(error);
}
},
async [CONVERT_FILE_TYPE]({commit}, data) {
try{
const response = await axios.post("/api/canvas-editor/ffmpeg", data);
return response.data;
} catch(error){
console.log(error);
}
}
}
As I said all my routes work and the application runs as expected on localhost however when uploaded to aws I receive unexpected errors.
After some digging I found out that I did not set the ffmpeg path.
Once this was done it worked great.
const ffmpeg = require('fluent-ffmpeg');
const ffmpegPath = require('#ffmpeg-installer/ffmpeg').path;
ffmpeg.setFfmpegPath(ffmpegPath);
module.exports = ffmpeg;

Insufficient funds. The account you tried to send transaction from does not have enough funds. Required 892413000000000 and got: 0

I am trying to deploy a solidity contract using web3 and nodejs and I get an error on all testnets:
If I try to run on the local testrpc, everything works fine.
Can you spot any error in the code that might cause this error, or is there an issue with the testnets?
const path = require('path');
const fs = require('fs');
const solc = require('solc');
var Web3 = require('web3');
// Infura test network (kovan)
var web3 = new Web3(new Web3.providers.HttpProvider('https://kovan.infura.io/v3/3e0f68cb39c64417b15cf55e486479dd'));
var myAddress = '0x362aa2Bf4b6fB733C4EF41F4d2833E8e5aDc54ed';
var myPrivateKey = new Buffer('a288c7c873f09e96b7f0e404759288606e2ffc0edf58874aeb5a0fe4bcd9c262', 'hex')
// Compile contract from file
const contractPath = path.resolve(__dirname, 'contracts', 'HDS.sol');
const contractSourceCode = fs.readFileSync(contractPath, 'UTF-8');
const compiledContract = solc.compile(contractSourceCode, 1).contracts[':HDS']
var newContractAddress = web3.utils.toChecksumAddress(web3.utils.randomHex(20));
// Create a transaction
var rawTx = {
from: myAddress,
nonce: web3.utils.toHex('13'),
gasPrice: web3.utils.toHex(web3.utils.toWei('1', 'gwei')),
gas: web3.utils.toHex('892413'),
gasLimit: web3.utils.toHex('892413'),
data: compiledContract.bytecode
};
// // Unlock account to sign transaction
// web3.eth.personal.unlockAccount(myAddress, myPrivateKey, 600)
// .then(console.log('Account unlocked!'))
// .catch((error) => { console.log(error); });
web3.eth.getBalance(myAddress)
.then(function(balance) { console.log("My balance: ", balance); })
.catch(function(error) { console.log(error); });
web3.eth.accounts.signTransaction(rawTx, myPrivateKey)
.then(function(signResult) {
web3.eth.sendSignedTransaction(signResult.rawTransaction)
.on('error', function (error) { console.log("Error deploying contract: " + error); })
.on('transactionHash', function (transactionHash) { console.log("Transaction hash: " + transactionHash); })
.on('receipt', function (receipt) { console.log("Receipt contract address: " + receipt.contractAddress); })
.on('confirmation', function (confirmationNumber, receipt) {
console.log("Confirmation number: " + confirmationNumber);
console.log("Confirmation receipt: " + receipt);
})
.catch(function (error) { console.log(error); });
});
Here's the account on Kovan testnet, if it helps: https://kovan.etherscan.io/address/0x362aa2bf4b6fb733c4ef41f4d2833e8e5adc54ed
You need to sign the transaction before you send it to the network. The easiest way to do this would be to unlock one account using your mnemonic. You can do this when you initialize web3 and using truffle-hdwallet-provider, after that you can send transaction from your account without the need to manually sign them, in my opinion this is the easiest way to do this. Another option is to manually sign each transaction before you send it using your private key, you can read here how you can do this. There is no difference in terms of functionality between the two ways but the first on is a little bit easier if you are new to this.

Error: 5 NOT_FOUND: Requested entity was not found on LongRunningRecognize

I'm trying to transcribe an audio file with the node.js client Google Speech to Text and Google Cloud Function.
Unfortunately I get this error :
Error: 5 NOT_FOUND: Requested entity was not found
I supposed it comes from authentification problem, but i am not sure.
First, I tried without credentials assuming that GCF will use ADC (Application Default Credentials).
After, I added client_email et private_key from service account to SpeechClient options param, but it didn't work.
I added projectId and keyFilename... not better.
Maybe it isn't the good way... I have no idea !
Here is my code. Thanks for your help.
const audioFilename = 'gs://' + outputBucket.name + '/' + event.data.name;
const request = {
"config": {
"enableWordTimeOffsets": false,
"languageCode": "fr-FR",
"encoding":"FLAC"
},
"audio": {
"uri": audioFilename
}
}
const options = {
credentials :{
projectId: 'xxxxxx',
keyFilename: './xxxxx.json',
client_email:'xxxx#xxxxx',
private_key:'xxxxxxxxx'
}
};
const client = new speech.SpeechClient(options);
client
.longRunningRecognize(request)
.then(data => {
const response = data[0];
const operation = response;
operation.on('progress', (metadata, apiResponse) => {
console.log(JSON.stringify(metadata))
});
// Get a Promise representation of the final result of the job
return operation.promise();
})
.then(data => {
const [response] = data[0];
const content = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
console.log(`Transcription: ${content}`);
resolve(content);
})
.catch(err => {
reject(err);
});

Resources