Ec2 Instance Data Not pushed in array - node.js

I want to query all ec2 across regions.
I can log the data, but I can't push it to an array.
Here is the example
to loop over regions
let AWS = require('aws-sdk');
function regionConfig(reg) {
return new AWS.EC2({
region: reg
});
}
to list the allowed regions
const listRegions = async () => {
ec2 = regionConfig('eu-west-1')
let regionsArray = []
return await new Promise((resolve, reject) => {
ec2.describeRegions({}, function(err, data) {
if (err) reject(err, err.stack); // an error occurred
else {
for (let region of data.Regions) {
regionsArray.push(region.RegionName)
}
resolve(regionsArray)
}
});
})
}
to describe the ec2 instances
const describeIntances = async () => {
regions = await listRegions()
let instances = [];
return await new Promise((resolve, reject) => {
for(let i of regions) {
let ec2 = regionConfig(i)
ec2.describeInstances({}, function(err, data) {
if (err) reject(err, err.stack); // an error occurred
else {
let instances = [];
console.log(data)
for (let reservation of data.Reservations) {
instances.push(reservation)
}
resolve(instances)
}
})
}
})
}
execute the lambda
const handler = async (event) => {
const regions = await listRegions()
// console.log(regions)
const describeInstances = await describeIntances()
console.log(describeInstances)
const response = {
statusCode: 200,
body: JSON.stringify('Hello from Lambda!'),
};
return response;
};
handler()
I can see data for as many instances as I have, but when I push, the array is empty
could it be because of the async?

Related

AWS S3 Angular 14 with Nodejs - Multi Part Upload sending the same ETag for every part

AWS S3 Angular 14 with Nodejs - Multi Part Upload sending the same ETag for every part
Backend Nodejs Controller Looks Like -
const AWS = require('aws-sdk');
const S3 = new AWS.S3({
// endpoint: "http://bucket.analysts24x7.com.s3-website-us-west-1.amazonaws.com",
// accessKeyId: S3_KEY,
// secretAccessKey: S3_SECRET,
// region: process.env.POOL_REGION,
apiVersion: '2006-03-01',
signatureVersion: 'v4',
// maxRetries: 10
});
exports.startUpload = (req, res) => {
try {
const filesData = JSON.parse(JSON.stringify(req.files));
const eachFiles = Object.keys(filesData)[0];
console.log(filesData[eachFiles]);
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: filesData[eachFiles].name,
// Body: Buffer.from(filesData[eachFiles].data.data, "binary"),
ContentType: filesData[eachFiles].mimetype
// ContentType: filesData[eachFiles].data.type
};
return new Promise((resolve, reject) => {
S3.createMultipartUpload(params, (err, uploadData) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ uploadId: uploadData.UploadId }));
}
});
});
} catch(err) {
res.status(400).send({
error: err
})
}
}
exports.getUploadUrl = async(req, res) => {
try {
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: req.body.fileName,
PartNumber: req.body.partNumber,
UploadId: req.body.uploadId
}
return new Promise((resolve, reject) => {
S3.getSignedUrl('uploadPart', params, (err, presignedUrl) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ presignedUrl }));
}
});
})
} catch(err) {
res.status(400).send({
error: err
})
}
}
exports.completeUpload = async(req, res) => {
try {
let params = {
Bucket: process.env.STORE_BUCKET_NAME,
Key: req.body.fileName,
MultipartUpload: {
Parts: req.body.parts
},
UploadId: req.body.uploadId
}
// console.log("-----------------")
// console.log(params)
// console.log("-----------------")
return new Promise((resolve, reject) => {
S3.completeMultipartUpload(params, (err, data) => {
if (err) {
reject(res.send({
error: err
}));
} else {
resolve(res.send({ data }));
}
})
})
} catch(err) {
res.status(400).send({
error: err
})
};
}
FrontEnd Angular 14 Code --
uploadSpecificFile(index) {
const fileToUpload = this.fileInfo[index];
const formData: FormData = new FormData();
formData.append('file', fileToUpload);
this.shared.startUpload(formData).subscribe({
next: (response) => {
const result = JSON.parse(JSON.stringify(response));
this.multiPartUpload(result.uploadId, fileToUpload).then((resp) => {
return this.completeUpload(result.uploadId, fileToUpload, resp);
}).then((resp) => {
console.log(resp);
}).catch((err) => {
console.error(err);
})
},
error: (error) => {
console.log(error);
}
})
}
multiPartUpload(uploadId, fileToUpload) {
return new Promise((resolve, reject) => {
const CHUNKS_COUNT = Math.floor(fileToUpload.size / CONSTANTS.CHUNK_SIZE) + 1;
let promisesArray = [];
let params = {};
let start, end, blob;
for (let index = 1; index < CHUNKS_COUNT + 1; index++) {
start = (index - 1) * CONSTANTS.CHUNK_SIZE
end = (index) * CONSTANTS.CHUNK_SIZE
blob = (index < CHUNKS_COUNT) ? fileToUpload.slice(start, end) : fileToUpload.slice(start);
// blob.type = fileToUpload.type;
params = {
fileName: fileToUpload.name,
partNumber: index,
uploadId: uploadId
}
console.log("Start:", start);
console.log("End:", end);
console.log("Blob:", blob);
this.shared.getUploadUrl(params).subscribe({
next: (response) => {
const result = JSON.parse(JSON.stringify(response));
// Send part aws server
const options = {
headers: { 'Content-Type': fileToUpload.type }
}
let uploadResp = axios.put(result.presignedUrl, blob, options);
promisesArray.push(uploadResp);
if(promisesArray.length == CHUNKS_COUNT) {
resolve(promisesArray)
}
},
error: (error) => {
console.log(error);
reject(error);
}
})
}
})
}
async completeUpload(uploadId, fileToUpload, resp) {
let resolvedArray = await Promise.all(resp)
let uploadPartsArray = [];
console.log("I am etag -----");
console.log(resolvedArray);
resolvedArray.forEach((resolvedPromise, index) => {
uploadPartsArray.push({
ETag: resolvedPromise.headers.etag,
PartNumber: index + 1
})
})
// Complete upload here
let params = {
fileName: fileToUpload.name,
parts: uploadPartsArray,
uploadId: uploadId
}
return new Promise((resolve, reject) => {
this.shared.completeUpload(params).subscribe({
next: (response) => {
resolve(response);
},
error: (error) => {
reject(error);
}
})
})
}
What I am trying to do --
Initiate a multipart upload ( API - /start-upload ) --> to get the uploadId
Upload the object’s parts ( API - /get-upload-url ) --> to get the presignedUrl
Call the Presigned URL and put blob as part --- To get the Etag
Complete multipart upload ( API - /complete-upload ) --> to send the complete parts.
**Sample Example of code --- **
FrontEnd --
https://github.com/abhishekbajpai/aws-s3-multipart-upload/blob/master/frontend/pages/index.js
BackEnd --
https://github.com/abhishekbajpai/aws-s3-multipart-upload/blob/master/backend/server.js
Attach the screenshot below how the API call looks like --
Now the problem here, Each and everytime I am getting same Etag from the -- above 3 steps while I am calling presignedURL using Axios. For that reason, I am getting the error in the final upload ---
Your proposed upload is smaller than the minimum allowed size
**Note --
**
Each and every chuck size I am uploading
CHUNK_SIZE: 5 * 1024 * 1024, // 5.2 MB
Apart from last part.
Also all the API are giving success response, apart from /complete-upload. Because all the API giving same Etag.
Same question also asked here, but there are no solutions --
https://github.com/aws/aws-sdk-java/issues/2615
Any idea about this ? How to resolve it ?
This is so uncommon problem, Provide me the solution of the problem.

Socket Hangup error in Nodejs for multiple API calls

I am trying to fetch a list of all companies listed in stock market from an external API, and after getting the list, I am trying to fetch all details regarding individual companies including graph data. It was all working fine. However, today I am getting socket hangup error. I have tried going through other posts here in stackoverflow. However, none of them works.
const request = require('request');
const fetchAPI = apiPath => {
return new Promise(function (resolve, reject) {
request(apiPath, function (error, response, body) {
if (!error && response.statusCode == 200) {
resolve(body);
} else {
reject(error);
}
});
});
}
// get list of all companies listed in
const fetchCompanyDetails = () => {
return new Promise(function (resolve, reject) {
let details = [];
fetchAPI('https://api//')
.then(res => {
res = JSON.parse(res)
details.push(res);
resolve(details);
})
.catch(err => {
console.log("error at fetchcompany details" + err);
})
});
}
const getDateAndPriceForGraphData = (graphData) => {
let res = []
graphData.forEach(data => {
let d = {}
d["x"] = new Date(data.businessDate).getTime() / 1000
d["y"] = data.lastTradedPrice
res.push(d)
})
return res
}
// get graph data for individual assets
const getGraphDataForAssets = (assetID) => {
return new Promise((resolve, reject) => {
let details = {};
fetchAPI(`https://api/${assetID}`)
.then(async (res) => {
res = JSON.parse(res)
let data = await getDateAndPriceForGraphData(res)
details = data
resolve(details);
})
.catch(err => {
console.log("error at getGraphDataForAssets" + err);
})
});
}
// fetch data about individual assets
const fetchAssetDetailsOfIndividualCompanies = (assetID) => {
return new Promise((resolve, reject) => {
let details = {"assetData" : {}, "graphData": {}};
fetchAPI(`https://api/${assetID}`)
.then(async (res1) => {
res1 = JSON.parse(res1)
details["assetData"] = res1
// get graph data
var graphData = await getGraphDataForAssets(assetID)
details["graphData"] = graphData
resolve(details);
})
.catch(err => {
console.log("error at fetchAssetDetailsOfIndividualCompanies" + err);
reject(err)
})
});
}
// returns list of details of all tradeable assets (Active and Suspended but not delisted)
const fetchDetailsForEachCompany = async (companyList) => {
let result = []
await Promise.all(companyList.map(async (company) => {
try {
// return data for active and suspended assets
if(company.status != "D") {
let companyData = await fetchAssetDetailsOfIndividualCompanies(company.id)
result.push(companyData)
}
} catch (error) {
console.log('error at fetchDetailsForEachCompany'+ error);
}
}))
return result
}
exports.fetchAssetDetails = async () => {
let companyDetails = await fetchCompanyDetails()
let det = await fetchDetailsForEachCompany(companyDetails[0])
return det
}
To expand on what I meant with not needing those new Promise()s, this would be an idiomatic async function refactoring for the above code.
I eliminated getGraphDataForAssets, since it was eventually not used; fetchAssetDetailsOfIndividualCompanies fetched the same data (based on URL, anyway), and then had getGraphDataForAssets fetch it again.
const request = require("request");
function fetchAPI(apiPath) {
return new Promise(function (resolve, reject) {
request(apiPath, function (error, response, body) {
if (!error && response.statusCode === 200) {
resolve(body);
} else {
reject(error);
}
});
});
}
async function fetchJSON(url) {
return JSON.parse(await fetchAPI(url));
}
async function fetchCompanyDetails() {
return [await fetchAPI("https://api//")];
}
function getDateAndPriceForGraphData(graphData) {
return graphData.map((data) => ({
x: new Date(data.businessDate).getTime() / 1000,
y: data.lastTradedPrice,
}));
}
// fetch data about individual assets
async function fetchAssetDetailsOfIndividualCompanies(assetID) {
const assetData = await fetchJSON(`https://api/${assetID}`);
const graphData = getDateAndPriceForGraphData(assetData);
return { assetID, assetData, graphData };
}
// returns list of details of all tradeable assets (Active and Suspended but not delisted)
async function fetchDetailsForEachCompany(companyList) {
const promises = companyList.map(async (company) => {
if (company.status === "D") return null;
return fetchAssetDetailsOfIndividualCompanies(company.id);
});
const results = await Promise.all(promises);
return results.filter(Boolean); // drop nulls
}
async function fetchAssetDetails() {
const companyDetails = await fetchCompanyDetails();
return await fetchDetailsForEachCompany(companyDetails[0]);
}
exports.fetchAssetDetails = fetchAssetDetails;

Call DynamoDb scan recursively when Promisified

I need to get some data from DynamoDb, using the scan() method. I have implemented some basic pagination by calling my function recursively n number of times to get the correct page.
Currently, I call my function and inside the scan() callback, if the data can be send back, I use the handler callback to return the data.
CURRENT CODE
const AWS = require('aws-sdk')
const docClient = new AWS.DynamoDB.DocumentClient()
const TABLE_NAME = process.env.TABLE_NAME
const DEFAULT_PAGE_SIZE = 500
const DEFAULT_PAGE_NUMBER = 1
const self = {
handler: (event, context, callback) => {
const {pageNumber, pageSize} = event.queryStringParameters ? event.queryStringParameters : {pageNumber: DEFAULT_PAGE_NUMBER, pageSize: DEFAULT_PAGE_SIZE}
const params = {
TableName: ORGANISATION_TYPES_TABLE_NAME,
Limit: pageSize ? pageSize : DEFAULT_PAGE_SIZE
}
return self.scan(params, pageNumber, 1, callback)
},
scan: (params, pageNumber, pageCount, callback) => {
docClient.scan(params, (err, data) => {
if (err) {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
})
};
if (data.LastEvaluatedKey && pageCount < pageNumber) {
pageCount += 1
params.ExclusiveStartKey = data.LastEvaluatedKey
self.scan(params, pageNumber, pageCount, callback)
} else {
callback(null, {
statusCode: 200,
body: JSON.stringify(data)
})
}
})
}
}
module.exports = self
The above code does work, allowing me to specify a pageSize and pageNumber query parameter.
However, I want to Promisify self.scan.
I tried the following, but it results in the response being undefined
DESIRED CODE
const AWS = require('aws-sdk')
const docClient = new AWS.DynamoDB.DocumentClient()
const ORGANISATION_TYPES_TABLE_NAME = process.env.ORGANISATION_TYPES_TABLE_NAME
const DEFAULT_PAGE_SIZE = 500
const DEFAULT_PAGE_NUMBER = 1
const self = {
handler: (event, context, callback) => {
const {pageNumber, pageSize} = event.queryStringParameters ? event.queryStringParameters : {pageNumber: DEFAULT_PAGE_NUMBER, pageSize: DEFAULT_PAGE_SIZE}
const params = {
TableName: ORGANISATION_TYPES_TABLE_NAME,
Limit: pageSize ? pageSize : DEFAULT_PAGE_SIZE
}
return self.scan(params, pageNumber, 1).then((response) => {
callback(null, {
statusCode: 200,
body: JSON.stringify(response)
})
}).catch((err) => {
callback(null, {
statusCode: 500,
body: JSON.stringify(err)
})
})
},
scan: (params, pageNumber, pageCount) => {
return new Promise((resolve, reject) => {
docClient.scan(params, (err, data) => {
if (err) {
reject(err)
};
if (data.LastEvaluatedKey && pageCount < pageNumber) {
pageCount += 1
params.ExclusiveStartKey = data.LastEvaluatedKey
self.scan(params, pageNumber, pageCount, callback)
} else {
resolve(data)
}
})
})
}
}
module.exports = self
I also tried just doing return Promise.resolve(data) inside the docClient.scan() callback, but that doesn't work either. It's as if promises cannot be resolved inside a callback?
I have recently helped someone with this problem, there's actually quite an elegant solution that we hit upon that uses the hasNextPage property on the response you get from the SDK. The key is to have your recursive function pass an array that holds your results through the recursive calls and just concat until you run out of pages and then just return the array.
const scan = async params => {
function scanRec(promise, xs) {
return promise
.then(async result => {
const response = result.$response;
const items = xs.concat(result.Items);
response.hasNextPage() ? scanRec(response.nextPage().promise(), items) : items
})
}
return scanRec(docClient.query(params).promise(), []);
}
You'd then use the function in the normal way:
const params = { /** params **/ };
scan(params).then(x => {
// ...
})

Getting infinite loop in firebase cloud function

I am using firestore to store the data in firebase. To get the count i am using cloud function. When i try to add / update / delete an entry in one collection it starts the infinite loop with another collection.
Example:
I am having a user table and agent table when i add/update/delete a user it should get updated in the agent table.
Though i have used separate functions for users and agent still i am getting an infinite loop.can anyone tell me how to resolve it
Query to update the user in user and agent table:
export const addUser = (values) =>
db
.collection('users')
.add(values)
.then((docRef) => {
let customer = { customer: {} };
customer.customer[docRef.id] = {
id: docRef.id,
name: values.name,
commission: values.agent.commission
};
let agentId = values.agent.id;
db.collection('agents')
.doc(agentId)
.set(customer, { merge: true });
});
Cloud function for user:
const functions = require("firebase-functions");
const admin = require("firebase-admin");
exports = module.exports = functions.firestore
.document("users/{userUid}")
.onWrite(
(change, context) =>
new Promise((resolve, reject) => {
let dashboardId;
getDashboardId();
})
);
getDashboardId = () => {
admin.firestore().collection('dashboard').get().then((snapshot) => {
if (snapshot.size < 1) {
dashboardId = admin.firestore().collection('dashboard').doc().id;
} else {
snapshot.docs.forEach((doc) => {
dashboardId = doc.id;
});
}
return updateUser(dashboardId);
}).catch((error) => {
console.log('error is', error);
});
}
updateUser = (id) => {
admin.firestore().collection('users').where('isDeleted', '==', false).get().then((snap) => {
let usersData = {users: snap.size};
return admin.firestore().collection('dashboard').doc(id).set(usersData, {merge: true});
}).catch((error) => {
console.log('error is', error);
});
}
Cloud function for agent:
const functions = require("firebase-functions");
const admin = require("firebase-admin");
exports = module.exports = functions.firestore
.document("agents/{agentUid}")
.onWrite(
(change, context) =>
new Promise((resolve, reject) => {
let dashboardId;
getDashboardId();
})
);
getDashboardId = () => {
admin.firestore().collection('dashboard').get().then((snapshot) => {
if (snapshot.size < 1) {
dashboardId = admin.firestore().collection('dashboard').doc().id;
} else {
snapshot.docs.forEach((doc) => {
dashboardId = doc.id;
});
}
return updateAgent(dashboardId);
}).catch((error) => {
console.log('error is', error);
});
}
updateAgent = (id) => {
admin.firestore().collection('agents').where('isDeleted', '==', false).get().then((snap) => {
let agentsData = {agents: snap.size};
return admin.firestore().collection('dashboard').doc(id).set(agentsData, {merge: true});
}).catch((error) => {
console.log('error is', error);
});
}

Delete all items in Dynamodb using Lambda?

Using Lambda (node.js) - how to delete all the items in the Dynamodb table?
There are 500K rows in the table
I have tried using scan method and then loop through each item and then using delete method. It only allow up to 3000 rows only.
Code
exports.handler = function(context, callback) {
getRecords().then((data) => {
data.Items.forEach(function(item) {
deleteItem(item.Id).then((data1) => {
});
});
});
};
var deleteItem = function(id) {
var params = {
TableName: "TableName",
Key: {
"Id": id
},
};
return new Promise(function(resolve, reject) {
client.delete(params, function(err, data) {
if (err) {
reject(err);
} else {
resolve();
}
});
});
}
function getRecords() {
var params = {
TableName: 'TableName',
IndexName: 'Type-index',
KeyConditionExpression: 'Type = :ty',
ExpressionAttributeValues: {
':ty': "1"
},
ProjectionExpression: "Id",
};
return new Promise(function(resolve, reject) {
client.query(params, function(err, data) {
if (err) {
reject(err);
} else {
resolve(data);
}
});
});
}
There is already one right answer, but here is another code snippet to delete all records from Dynamo DB.
const AWS = require("aws-sdk");
AWS.config.update({
region: "us-east-1",
});
const docClient = new AWS.DynamoDB.DocumentClient();
const getAllRecords = async (table) => {
let params = {
TableName: table,
};
let items = [];
let data = await docClient.scan(params).promise();
items = [...items, ...data.Items];
while (typeof data.LastEvaluatedKey != "undefined") {
params.ExclusiveStartKey = data.LastEvaluatedKey;
data = await docClient.scan(params).promise();
items = [...items, ...data.Items];
}
return items;
};
const deleteItem = (table, id) => {
var params = {
TableName: table,
Key: {
id: id,
},
};
return new Promise(function (resolve, reject) {
docClient.delete(params, function (err, data) {
if (err) {
console.log("Error Deleting ", id,err);
reject(err);
} else {
console.log("Success Deleting ", id,err);
resolve();
}
});
});
};
exports.handler = async function (event, context, callback) {
try {
const tableName = "<table>";
// scan and get all items
const allRecords = await getAllRecords(tableName);
// delete one by one
for (const item of allRecords) {
await deleteItem(tableName, item.id);
}
callback(null, {
msg: "All records are deleted.",
});
} catch (e) {
callback(null, JSON.stringify(e, null, 2));
}
};
A Scan operation consumes Read capacity. Each Read returns up to 4 kb of data. When this limit is reached, the Scan returns only what it has found until there. If you need more, you need to issue another Scan request.
This, you'll need two loops: 1) loop to delete all records returned at each Scan; 2) loop to keep scanning multiple times, until you reach the end of the table
Make sure you use consistent Reads or wait 1 or 2 second(s) before issuing another Scan, otherwise you may get repeated items in different Scans.
exports.handler = function(context, callback) {
clearRecords();
};
clearRecords = function() {
getRecords().then((data) => {
data.Items.forEach(function(item) {
deleteItem(item.Id).then((data1) => {});
});
clearRecords(); // Will call the same function over and over
});
}
Observe that Lambda has a timeout limit of 15 minutes. Since you have 500K items in your table, it's likely that your Lambda will timeout and you'll need to trigger it more than once. You could also make your Lambda call itself after 14:50, for example, just take a look at the AWS SDK documentation for triggering Lambda functions. For this matter, you might also want to check the getRemainingTimeInMillis() method from the context object.

Resources