I am currently developing an Amazon Alexa Skill, which asks the user for a color. The user enters the color by voice and Alexa checks, if the color is in a defined array of values. If that is the case, it returns the color name with an ID. Now this works as intended, but now I would like to put this value in AWS DynamoDB. I read some tutorials on how to connect and to write into DynamoDB using AWS Lambda (Runtime Node.js 8.10)
So, in the following code, you can see my AnswerIntentHandler of my Alexa Skill. I included the exports. handle function to write the value of the ID of the color and the name of the color in the table called "alexa_farbe". But when I simulate the Skill with the Alexa Skill developer, the code only gives out the "speechText" and doesn't seem to run the code to write into the DynamoDB. Perhaps it is not possible to run a exports.handle in the AnswerIntentHanlder or is it? I am very new to this topic so I am not really sure where the mistake in this code is. I provide the code of my AnswerIntentHandler, but I can also provide the whole code of the Alexa Skill.
I hope somebody can give me a hint what to to.
const AnswerIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'AnswerIntent';
},
handle(handlerInput) {
const slots = handlerInput.requestEnvelope.request.intent.slots;
const number = slots['FarbAuswahl'].value;
var numberid = slots['FarbAuswahl'].resolutions.resolutionsPerAuthority[0].values[0].value.id; /* ID der Farbe auslesen */
var speechText = 0;
speechText = `Prima, ich stelle die Farbe ${number} mit der ID ${numberid} ein!`;
/* Ab hier Versuch in die Datenbank DynamoDB zu schreiben */
exports.handle = function(e, etx, callback) {
var params = {
Item: {
ID: '${numberid}',
Farbname: '${number}'
},
TableName: 'alexa_farbe'
};
docClient.put(params, function(err, data) {
if(err) {
callback(err, null);
} else {
callback(null, data);
}
});
};
return handlerInput.responseBuilder
.speak(speechText)
.withSimpleCard('Ausgewählte Farbe:', speechText)
.getResponse();
},
};
You should not put the code in the exports.handler. Please take a look at this example (the DynamoDB support code is in the helpers.js file). As a coincidence it's also working with colors.
If the data you're storing is associated to each skill user (eg. a user attribute) a much easier way to do DynamoDB persistence is to use ASK persistent attributes (already supported in the ASK SDK).
I tried to edit the code and put the whole DynamoDB posting to another file called dynamodb.js. This is the content:
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient({region: 'us-east-1'});
exports.handle = function(e, ctx, callback) {
var params = {
Item: {
date: Date.now(),
message: "This hopefully works"
},
TableName: 'alexa_farbe'
};
docClient.put(params, function(err, data) {
if (err) {
console.log("Error", err);
} else {
console.log("Success", data);
}
});
};
I just want to write the date and an example phrase into my DynamoDB.
To trigger this function, I tried to implement some kind of "runScript" into my AnswerIntentHandler of my Alexa Skill.
But it doesn't seem to trigger the file. Can you give me some advice whats wrong with this code and how to call another node.js file from the AnswerIntent.
Here is my edited AnswerIntent:
const AnswerIntentHandler = {
canHandle(handlerInput) {
return handlerInput.requestEnvelope.request.type === 'IntentRequest'
&& handlerInput.requestEnvelope.request.intent.name === 'AnswerIntent';
},
handle(handlerInput) {
const slots = handlerInput.requestEnvelope.request.intent.slots;
let number = slots['FarbAuswahl'].value;
let numberid = slots['FarbAuswahl'].resolutions.resolutionsPerAuthority[0].values[0].value.id; /* ID der Farbe auslesen */
var speechText = 0;
speechText = `Prima, ich stelle die Farbe ${number} mit der ID ${numberid} ein!`;
module.runScript('./some-script.js', function (err) {
if (err) throw err;
console.log('finished running some-script.js');
});
return handlerInput.responseBuilder
.speak(speechText)
.withSimpleCard('Ausgewählte Farbe:', speechText)
.getResponse();
},
};
Related
i am new to this , i am having problem , i have to create almost 200 users in cognito after reading data from csv file which is located in S3 bucket
the problem is , if a user already exists in Cognito , my code stop executing and give me an error "An account with the given email already exists." is there a way that i can pass the whole data. if there is user already in the cognito with the same email, it skips that user and checks for the new user data , and at the end which users are already exists in cognito .this the function to create user in cognito
here is the function for creating the cognito user
function RegisterUser(data2) {
console.log(data2[1])
for(let i=0;i<=data2.length;i++){
var attributeList = [];
var cognitoUser;
attributeList.push(new AmazonCognitoIdentity.CognitoUserAttribute({ Name: "name", Value: data2[i][0]}));
attributeList.push(new AmazonCognitoIdentity.CognitoUserAttribute({ Name: "email", Value: data2[i][1] }));
try{
return new Promise((resolve, reject) => {
userPool.signUp(data2[i][1], data2[i][2], attributeList, null, (err, result) => {
if (err) {
console.log(err.message);
reject(err);
return;
}
cognitoUser = result.user;
resolve(cognitoUser);
});
});
}catch(err){
return{
success:false,
message:err
}
}
}
}
here is the lambda handler
exports.handler = async (event, context) => {
try {
// Converted it to async/await syntax just to simplify.
const data = await S3.getObject({Bucket: 'user-data-file', Key: 'SampleCSVFile_2kb.csv'}).promise();
var data1 = Buffer.from(data.Body).toString();
var data2 = data1.split("\r\n"); // SPLIT ROWS
for (let i in data2) { // SPLIT COLUMNS
data2[i] = data2[i].split(",");
}
const userPoolResponse = await RegisterUser(data2);
}
catch (err) {
return {
statusCode: err.statusCode || 400,
body: err.message || JSON.stringify(err.message)
}
}
}
A quick google search brought this up: How to check Email Already exists in AWS Cognito?
Which sure thats Front end but your use case seem to be a quick once in a while run script, not a regular use User System - in which case, this is basic programing 101 to solve. You put another try catch around your call to register the user. You check the exception thrown, and if its 'already registered' you pass and continue in the loop without interruption. The above link can give you some idea of what to look for to determine if it is that exception or not.
iv been setting up a lambda instance, it grabs data from a few different services and then its meant to update a custom Cognito attribute for that user, that works correctly and i get the return response "{}" along and no errors so im assuming that means its working correctly, however when i check the users attributes its not returning anything?
i have triple checked that the app clients have read and write permissions, so i have no idea whats happening, everything, as far as i can tell, is working, just that the attribute isnt changing.
return new Promise((resolve, reject) => {
console.log("TestInside");
// setTimeout(function() {
console.log("TimeoutFunction");
var cognitoidentityserviceprovider = new AWS.CognitoIdentityServiceProvider();
var params = {
UserAttributes: [
{
Name: 'locale',
Value: UserFarms
},
],
UserPoolId: 'UserPoolID',
Username: UserName
};
console.log("Executing call");
const cognitoD = cognitoidentityserviceprovider.adminUpdateUserAttributes(params, function(err, data) {
if (err) {
console.log(err, err.stack); // an error occurred
//passon = err.message;
resolve('Error');
}
else {
// cognitodata = data;
console.log('Returned positive cognito admin update');
//UserName = data.Username;
//passon = data;
console.log(data);
resolve(data);
}
}).promise();
whats wrong, am i missing somthing really simple?
I am playing with AWS Lambda along with Twilio. I have a Lambda function that integrates Lex with Twilio. I also have another Lambda function that does the the validations for my LexBot. Both work fine separately. However, I'm trying to put them together so whenever my LexBot integrates with Twilio, it also calls my validations in the same Lambda function.
Any ideas? Thank you.
Here is the Lambda that integrates Lex with Twilio:
var twilio = require('twilio');
var qs = require('qs');
var AWS = require('aws-sdk');
exports.handler = (event, context, callback) => {
try {
var twilioSMS = qs.parse(event["body-json"]);
// ************************
// validate and filter bad/empty messages
// ************************
if(!twilioSMS.hasOwnProperty('Body')){
var error = new Error("Cannot process message without a Body.");
callback(error);
}
else {
// Message is valid so now we prepare to pass it along to the Lex API.
AWS.config.region = 'us-east-1';
var lexruntime = new AWS.LexRuntime();
var userNumber = twilioSMS.From.replace('+', '');
var params = {
botAlias: process.env.BOT_ALIAS,
botName: process.env.BOT_NAME,
inputText: twilioSMS.Body,
userId: userNumber,
sessionAttributes: {
}
};
lexruntime.postText(params, function(err, data) {
var twimlResponse = new twilio.TwimlResponse();
if (err) {
console.log(err, err.stack); // an error occurred
twimlResponse.message('Sorry, we ran into a problem at our end.');
callback(err, twimlResponse.toString());
} else {
console.log(data); // got something back from Amazon Lex
twimlResponse.message(data.message);
callback(null, twimlResponse.toString());
}
});
}
} catch(e) {
console.log(e);
callback(e);
}
};
And here is my Lambda with the validations:
exports.handler = (event, context, callback) => {
// TODO implement
var numberType =event.currentIntent.slots.number,
response = "is not valid. Try 'One' or 'Two'." ;
if(numberType === "one" ) {
response = "Call: 111 111 1111 "
}
else if(numberType === "two") {
response = "Call: 222 222 2222"
}
callback(null, {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": "Your option: " + event.currentIntent.slots.number + ": " + response
}
}
});
};
Step functions would be your friend. Please have a look at below links,
https://aws.amazon.com/step-functions/
https://cloudacademy.com/blog/aws-step-functions-a-serverless-orchestrator/
I realized didn't need to write a Lambda function to connect Lex with Twilio. All I had to do was go to 'Channels' under my LexBot Console and integrate manually my bot with my Twilio account.
I want to merge data from two tables and then send the result as the response.
I'm new to nodejs and lambda and I'm unable to figure out how I can merge json data from both scan calls and send it as the response.
If I uncomment the callback then response for only one table is sent.
My code is below, can someone please help in completing it
'use strict';
const AWS = require("aws-sdk");
const dynamodb = new AWS.DynamoDB();
const docClient = new AWS.DynamoDB.DocumentClient();
exports.handler = function(event, ctx, callback) {
var params= {
TableName:'x',
FilterExpression:'SessionId = :SessionId',
ExpressionAttributeValues:{ ":SessionId" : 'ca47a131'},
};
var params1= {
TableName:'y',
FilterExpression:'sessionid = :SessionId',
ExpressionAttributeValues:{ ":SessionId" : 'ca47a131'},
};
docClient.scan(params, onScan);
docClient.scan(params1, onScan1);
function onScan(err, data){
if(err){
callback(err, null);
}else{
//callback(null, data);
}
}
function onScan1(err, data){
if(err){
callback(err, null);
}else{
//callback(null, data);
}
}
}
You can use the following modification to the code so that you can send the response in a single callback.
'use strict';
const AWS = require("aws-sdk");
const dynamodb = new AWS.DynamoDB();
const docClient = new AWS.DynamoDB.DocumentClient();
exports.handler = function(event, ctx, callback) {
var params= {
TableName:'x',
FilterExpression:'SessionId = :SessionId',
ExpressionAttributeValues:{ ":SessionId" : 'ca47a131'},
};
var params1= {
TableName:'y',
FilterExpression:'sessionid = :SessionId',
ExpressionAttributeValues:{ ":SessionId" : 'ca47a131'},
};
docClient.scan(params, onScan);
docClient.scan(params1, onScan1);
var firstResultData = false;
function runAfterBothCallbacks(data){
if(!firstResultData){
firstResultData = data;
}else{
// Combine firstResultData with data and return in the callback
callback(null,{ dataX: firstResultData, dataY: data });
// Note: The order of scan and scan1 result coming cannot be guaranteed so, dataX can be the results of scan or scan1. If you can identify the result based on the scan, either pass it as another parameter to the runAfterBothCallbacks method or identify the scan based on data result (If possible).
}
}
function onScan(err, data){
if(err){
callback(err, null);
}else{
runAfterBothCallbacks(data);
}
}
function onScan1(err, data){
if(err){
callback(err, null);
}else{
runAfterBothCallbacks(data);
}
}
}
Welcome to JavaScript asynchronous callbacks (aka callback hell).
Fortunately, the AWS SDK supports promises so you can use Promise.all() to wait for multiple promises to be resolved. When that happens, merge the JSON results and return the merged result via the Lambda function's callback() method.
please bear in mind that I can, at best, be described as a rookie in both node and amazon S3. I have something app that writes to S3 in the background. I want to read from S3 when the file has been written, and only once it's been written. I attempt to check the number of objects and return the result:
function haveFilesBeenWrittenToBucket(bucketName, callback) {
s3.listObjects({ Bucket: bucketName }, function(err, data) {
const items = data.Contents;
callback(items);
});
}
and the readFile function:
OSClient.prototype.readFile = function(params, callback) {
haveFilesBeenWrittenToBucket(params.Bucket, items => {
console.log("Number of items " + items.length);
if (items.length > 0) {
const rl = readline.createInterface({
input: s3.getObject(params).createReadStream()
});
const myArray = [];
rl.on("line", function (line) {
const lineArray = line.split(",");
for (const value of lineArray) {
if (isNaN(value)) {
// line.split creates string elements, adding extraneous quotation marks in a string and converting
// number to string, so there is a need to reverse this process.
const slicedElement = value.slice(1, -1);
myArray.push(slicedElement);
} else {
const valueOfNumber = Number(value);
myArray.push(valueOfNumber);
}
}
})
.on("close", function () {
callback(myArray);
});
}
else{
var myfunction = this.readFile.bind(this, params, callback);
setTimeout(myfunction, 5000);
}
});
};
and lastly:
targetClient.readFile(params, function (arrayResult) {
logger.info("Read file:" + fileName + OS_FILE_SUFFIX);
readArray = arrayResult;
});
If I put a breakpoint on callback(items) (in 'haveFilesBeenWrittenToBucket') everything works fine and I get back the file written in the bucket, but if not, nothing seems to get written to S3. Seems like some race condition, but I'm really clueless and I really would appreciate some help. Is there a conflict between listing objects and writing to S3 (at least not until much later, in some other test, when it shouldn't be (it's part of a mocha test suite - the readFile is in async.waterfall). I have been on this for days and got nowhere. As I said, it's my first exposure to node, so please be patient with me. Thanks.
S3 provides eventual consistency for list after read. So, you might observe the following:
A process writes a new object to Amazon S3 and immediately lists keys within its bucket. Until the change is fully propagated, the object might not appear in the list.
The only situation in which S3 provides immediate consistency is read-after-write for PUTS of new objects (with a minor caveat, documented here).
More details at S3 consistency model.
Here is an example of how you can use async retry to wait for an object and then retrieve its contents (assumed to be text in this example).
var aws = require("aws-sdk");
var async = require("async");
var s3 = new aws.S3();
var bucket = 'mybucket';
var iteration = 0;
function waitForObjects(bucket, callback) {
console.error(`Iteration: ${++iteration}`);
s3.listObjects({Bucket:bucket}, function(err, data) {
if (err) {
callback(err);
} else if (!data.Contents || !data.Contents.length) {
callback(new Error("No objects"))
} else {
callback(null, data);
}
});
}
// Try calling waitForObjects 10 times with exponential backoff
// (intervals of 100, 200, 400, 800, 1600, ... milliseconds)
async.retry({
times: 10,
interval: function(retryCount) {
return 50 * Math.pow(2, retryCount);
}
}, async.apply(waitForObjects, bucket), function(err, data) {
if (err) {
console.error(`Error waitForObjects: ${err}`);
} else {
console.log(`Object count: ${data.Contents.length}`);
data.Contents.forEach(function(item, index) {
console.log(`Object ${index+1} key: ${item.Key}`);
s3.getObject({Bucket:bucket, Key:item.Key}, function(err, data) {
console.log(`Object ${index+1} txt: ${data.Body.toString()}`);
});
});
}
});
Two things. Firstly, it turns out that my issue was not nodeJS related. Sigh
Secondly, the API now provides a 'waitFor' method for polling whether a bucket or objects exists:
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#waitFor-property