How to get the DisconnectTimestamp from Amazon Connect call in NodeJS - node.js

My call recordings are being pushed at S3 and stored with contactId_timestamp.wav as filename.
For now i can get/download the files by specifically providing the file name as key, now i wanted to create the filename by myself as contactId + disconnecttimestamp i can get ge contactId through getContactId() but how to get the disconnecttimestamp?
My goal is same what we are experiencing in Contact Flow Search the recordings can be played with respect to contactId.
Here is how i am downloading the recordings from S3.
require("dotenv").config();
const expres = require("express");
const app = expres();
app.listen(3001);
const aws = require("aws-sdk");
aws.config.update({
secretAccessKey: process.env.ACCESS_SECRET,
accessKeyId: process.env.ACCESS_KEY,
region: process.env.REGION
})
const BUCKET = process.env.BUCKET
const s3 = new aws.S3(secretAccessKey = process.env.ACCESS_SECRET, accessKeyId = process.env.ACCESS_KEY);
app.get("/download/filename", async(req, res)=>{
const filename = req.params.filename
let x = await s3.getObject({Bucket:BUCKET, Key:filename}).promise();
res.send(x.Body);
})
And Than hitting the http://localhost:3001/download/0989c085-16d1-478b-8858-1ccddb2990f4_20220303T16:46_UTC.wav

If you have the ContactID for the call you can use describeContact to get the contact info which includes the DisconnectTimestamp.
Something along these lines should work.
const AWS = require('aws-sdk');
aws.config.update({
secretAccessKey: process.env.ACCESS_SECRET,
accessKeyId: process.env.ACCESS_KEY,
region: process.env.REGION
})
const connect = new AWS.Connect({ region: process.env.REGION });
var params = {
ContactId: 'STRING_VALUE', /* required */
InstanceId: 'STRING_VALUE' /* required - the connect instance ID */
};
connect.describeContact(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else var DisconnectTimestamp = data.Contact.DisconnectTimestamp); // successful response
});
more info here https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Connect.html#describeContact-property

Related

Missing region in AWS rekognition in node js

//Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)
const AWS = require('aws-sdk')
const bucket = 'bucket' // the bucketname without s3://
const photo_source = 'source.jpg'
const photo_target = 'target.jpg'
const config = new AWS.Config({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
region: process.env.AWS_REGION
})
const client = new AWS.Rekognition();
const params = {
SourceImage: {
S3Object: {
Bucket: bucket,
Name: photo_source
},
},
TargetImage: {
S3Object: {
Bucket: bucket,
Name: photo_target
},
},
SimilarityThreshold: 70
}
client.compareFaces(params, function(err, response) {
if (err) {
console.log(err, err.stack); // an error occurred
} else {
response.FaceMatches.forEach(data => {
let position = data.Face.BoundingBox
let similarity = data.Similarity
console.log(`The face at: ${position.Left}, ${position.Top} matches with ${similarity} % confidence`)
}) // for response.faceDetails
} // if
});
Above code is from official aws webpage (https://docs.aws.amazon.com/rekognition/latest/dg/faces-comparefaces.html). The code is for implementing comparing faces in image using node js. When comparing images using the above code a error occurs which says missing region on config. Then when i checked the code eventhough config object is created in this code but it is not being used anywhere. Can someone tell me where i must use the config object. If the error is appearing due to any other reason please tell me the reason

S3 triggers Lambda Function twice with unique request id

I used s3 to trigger my lambda function every time I uploaded a file in s3 bucket, I've tested it and it works but I noticed that S3 is triggering my lambda function twice with unique request id, even if I removed the entire codes and just add console.info('something') it's still trigger twice so clearly it's not lambda making retries if there's error. I also set retry to 0.
1st request id : awsRequestId: '9f73e49f-6cc7-454e-a89f-7d88122a7166'
2nd request id : awsRequestId: '1c8572d5-61ee-4b0b-93d9-4f8a3dcd28bf'
here's my code
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
region: process.env.region,
accessKeyId: process.env.accessKeyId,
secretAccessKey: process.env.secretAccessKey
});
const axios = require('axios');
const csvtojson = require('csvtojson');
const _ = require('lodash');
exports.handler = async (event, context) => {
const { normalizeAndHashData } = require('./hash');
const params = {Bucket: 'myBucket', 'myKey'}
const data = await s3.getObject(params).promise();
const eventsJson = await csvtojson().fromString(data.Body.toString());
const result1 = await axios.post(`https://myurl`, eventJson);
if (result1) {
const sampleDataToUpload = {id: 1, name: 'test'}
const result2 = await axios.post(`https://myurl2`, sampleDataToUpload);
}
return context.succeed()
};

Nodejs AWS S3 deleteObject() If you put in the missing key, you'll find it DeleteMarker true

If you send it with the right key, it will be deleted. I thought it was good because DeleteMarker got true, but if you send it with a key that is not in S3, DeleteMarker true comes out. What should we do with the validation?
const s3 = new AWS.S3({
accessKeyId:awsConfig.accessKeyId,
secretAccessKey: awsConfig.secretAccessKey,
region: awsConfig.region,
});
(async function () {
const params = {
Bucket: awsConfig.bucket,
Key: '17f316af9b011f08b68538a8ac76b73f.jpg',
};
const deleteS3Data = await s3.deleteObject(params).promise();
if(deleteS3Data.DeleteMarker !== true){
throw new Error("I don't have the key.");
}else{
return deleteS3Data;
}
}())
// deleteS3Data Whatever comes out true

AWS textract methods in node js are not getting invoked

I want to extract text from image using node js so created a lambda in aws. Please find the below code snippet. Issue is that the textract method detectDocumentText is not getting invoked.
As far as permission I had given s3 full access and textract full access to the lambda. Am I missing anything?
var AWS = require("aws-sdk");
var base64 = require("base-64");
var fs = require("fs");
exports.handler = async (event, context, callback) => {
// Input for textract can be byte array or S3 object
AWS.config.region = "us-east-1";
//AWS.config.update({ region: 'us-east-1' });
var textract = new AWS.Textract({ apiVersion: "2018-06-27" });
//var textract = new AWS.Textract();
console.log(textract);
var params = {
Document: {
/* required */
//'Bytes': imageBase64
S3Object: {
Bucket: "717577",
Name: "Picture2.png"
}
}
};
textract.detectDocumentText(params, function(err, data) {
if (err) {
console.log(err); // an error occurred
} else {
console.log(data); // successful response
callback(null, data);
}
});
};
As well as I don't see any error logs in cloudwatch logs.
The problem is that you have marked your method as async which means that you are returning a promise. In your case you are not returning a promise so for lambda there is no way to complete the execution of the method. You have two choices here
Remove async
Or more recommended way is to convert your callback style to use promise. aws-sdk support .promise method on all methods so you could leverage that. The code will look like this
var AWS = require("aws-sdk");
var base64 = require("base-64");
var fs = require("fs");
exports.handler = async (event, context) => {
// Input for textract can be byte array or S3 object
AWS.config.region = "us-east-1";
//AWS.config.update({ region: 'us-east-1' });
var textract = new AWS.Textract({ apiVersion: "2018-06-27" });
//var textract = new AWS.Textract();
console.log(textract);
var params = {
Document: {
/* required */
//'Bytes': imageBase64
S3Object: {
Bucket: "717577",
Name: "Picture2.png"
}
}
};
const data = await textract.detectDocumentText(params).promise();
return data;
};
Hope this helps.

Lambda randomly misses files, create dupes in S3

I have a lambda function to ingest images from an S3 bucket, get some metadata, store this to an AWS RDS instance and then re-upload the image. Should be simple but I fear one of the following is causing issues.
Sometimes creates duplicates
Sometime misses files
It seems to happen with larger sets of images. I uploaded sub-1000 assets and it seems to work reasonably well. 3000+ it seems to be unreliable. The function is not set to time out too early (30 seconds should be fine) it has good memory allocation 512MB (please tell me if these are false assumptions. I am an amateur at this and a novice with Lambda so please so let me know what you think I have done.
const AWS = require('aws-sdk')
const uuidv4 = require('uuid/v4');
AWS.config.update({
accessKeyId: 'XXX',
secretAccessKey: 'XXX'
})
const s3 = new AWS.S3({
signatureVersion: 'v4',
region: 'eu-west-2'
})
const hasha = require('hasha')
const { Pool, Client } = require('pg')
const pool = new Pool({
user: 'XXX',
host: 'XXX',
database: 'XXX',
password: 'XXX',
port: 5432,
})
exports.handler = async (event, context) => {
const bucket = event.Records[0].s3.bucket.name;
const key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
console.log("Processing: " + key)
//Get file
try {
const file = await s3.getObject({
Bucket: bucket,
Key: key
}).promise()
const hash = await hasha(file.Body, { algorithm: 'md5' })
const id = uuidv4()
newfile = await s3.putObject({
Bucket: 'XXX',
Key: id,
Body: file.Body,
ContentType: file.ContentType
}).promise()
var fileString = key.split('/')
var fileName = fileString[fileString.length - 1]
const text = 'INSERT INTO original(original_filename, mime, file_size, file_path, file_name, hash) VALUES($1, $2, $3, $4, $5, $6) RETURNING *'
const values = [fileName, file.ContentType, file.ContentLength, key, id, hash]
const res = await pool.query(text, values)
console.log(res.rows[0])
return "Done"
} catch (err) {
console.log("####### Error #######: ", err)
return "Error"
}
}
I am expecting that X numbers of files are uploaded and the same number are in the target bucket and within my DB table. This is not always the case and is tricky to unpick where it is going wrong. I am sure there is a more elegant way to do this.

Resources