Data returned from promises with AWS-SDK for node and bluebird - node.js

I'm trying to fetch for each region in AWS all their Elastic IPs registered. The piece of code that I'm currently handling is the following:
logger.info('About to fetch Regions');
ec2.describeRegionsPromised({}).then(function (data) {
var addressesPromises = [];
logger.info('Fetched Regions');
logger.info(data);
_.forEach(data.Regions, function (region) {
var ec2Addresses = _.create(ec2, {region: region.RegionName});
addressesPromises.push(ec2Addresses.describeAddressesPromised());
});
logger.info('About to fetch addresses per region');
return Promise.all(addressesPromises);
}).then(function (data) {
logger.info('Fetched addresses per region');
logger.debug(data);
}).catch(function (err) {
logger.error('There was an error when fetching regions and addresses');
logger.error(err);
});
This works ok, but my problem is that I'm looking at the second .then promised-callback function data parameter and its data is an array with the same length of the regions returned on the first request.
I know that I'm only using 1 Elastic IP in one region. For alll the other regions I don't have any associated.
The Regions returned are the following (it's actually a formatted JSON):
Regions=[RegionName=eu-west-1, Endpoint=ec2.eu-west-1.amazonaws.com, RegionName=ap-southeast-1, Endpoint=ec2.ap-southeast-1.amazonaws.com, RegionName=ap-southeast-2, Endpoint=ec2.ap-southeast-2.amazonaws.com, RegionName=eu-central-1, Endpoint=ec2.eu-central-1.amazonaws.com, RegionName=ap-northeast-1, Endpoint=ec2.ap-northeast-1.amazonaws.com, RegionName=us-east-1, Endpoint=ec2.us-east-1.amazonaws.com, RegionName=sa-east-1, Endpoint=ec2.sa-east-1.amazonaws.com, RegionName=us-west-1, Endpoint=ec2.us-west-1.amazonaws.com, RegionName=us-west-2, Endpoint=ec2.us-west-2.amazonaws.com]
In JSON it would be:
{ Regions: [] } //and so on
And the Elastic IP returned are the following:
[ { Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] },
{ Addresses: [ [PublicIp=XX.XX.XXX.XXX, AllocationId=eipalloc-XXXXXXXX, Domain=vpc] ] } ]
On the response, I have an array of objects where their object key-values are all the same per each region request, which is false.
I would have expected in the second response the values resolution per each region, having the rest of them set to null, undefined, or similar.
To sum up. I don't get why resolving the values of an array of promises (using .all) will get an array of identical values in each spot - not being the expected result.
What's going on here? Thanks in advance!

I found the issue.
As #Roamer-1888 indicated the creation of the ec2Addresses object was incorrect. Instead, I should have created a new instance using the AWS SDK constructor for EC2 objects. However, the key-point of the issue was another one. Firstly the code...
logger.info('About to fetch Regions');
ec2.describeRegionsPromised({}).then(function (data) {
var addressesPromises = [];
logger.info('Fetched Regions');
_.forEach(data.Regions, function (region) {
var ec2Addresses = AWS.ec2(
{
region: region.RegionName,
endpoint: region.Endpoint
}
);
addressesPromises.push(ec2Addresses.describeAddressesPromised());
});
logger.info('About to fetch addresses per region');
return Promise.all(addressesPromises);
}).then(function (data) {
logger.info(arguments);
logger.info('Fetched addresses per region');
logger.debug(data);
}).catch(function (err) {
logger.error('There was an error when fetching regions and addresses');
logger.error(err);
});
As you can notice here, ec2Addresses is created invoking AWS.ec2() and not new AWS.ec2(), this is because the AWS-promised module creates the object and return it back promisifying it. (https://github.com/davidpelayo/aws-promised/blob/master/ecs.js):
'use strict';
var AWS = require('aws-sdk');
var memoize = require('lodash/function/memoize');
var promisifyAll = require('./lib/util/promisifyAll');
function ecs(options) {
return promisifyAll(new AWS.ECS(options));
}
/**
* Returns an instance of AWS.ECS which has Promise methods
* suffixed by "Promised"
*
* e.g.
* createService => createServicePromised
*
* #param options
*/
module.exports = memoize(ecs);
The issue was the last line of code:
module.exports = memoize(ecs)
This line of code was caching the previous execution including its previous configuration.
It turns out I ended up debugging the application and I realise the regions and endpoints were the array of promises were being executed were the same, and there was an error there.
By deleting the memoize(ecs) the expected result is the one I'm getting:
info: Fetched addresses per region
debug: Addresses=[], Addresses=[], Addresses=[], Addresses=[], Addresses=[], Addresses=[PublicIp=XX.XXX.XXX.XXX, AllocationId=eipalloc-XXXXXX, Domain=vpc], Addresses=[], Addresses=[], Addresses=[]
Thanks for reading and helping.

I found out a way of requesting addresses of different regions without creating a new EC2 object. In other words, by reusing the existing EC2 instance, by switching the endpoint like follows:
_.forEach(data.Regions, function (region) {
//var ec2Addresses = AWS.ec2(
// {
// region: region.RegionName,
// endpoint: region.Endpoint
// }
//);
var ep = new AWS.Endpoint(region.Endpoint);
ec2.config.region = region.RegionName;
ec2.endpoint = ep;
addressesPromises.push(ec2.describeAddressesPromised());
});
logger.info('About to fetch addresses per region');

Related

Error: Expected one matching request for criteria no parameters

I've found several answers to this problem, but all of them involve having parameters and not including them on the call. But my case is different because I do not have parameters.
According to all the material, I have consulted, I cannot find what I'm doing wrong.
Basically, it is this:
in the service I'm testing, I have a call like this:
public getData(): Observable<string[][]> {
const url = `${this.config.baseUrl}/data/`;
return this.http.get<string[][]>(url);
}
in providers of the spec file
{ provide: ConfigService, useValue: { baseUrl: '/base-url', tokenAuth: true } },
and
httpMock = TestBed.inject(HttpTestingController);
service = TestBed.inject(DataService);
and the test
it('should return the values of data when calling the API,' done => {
const mockValue: [['data1','data2'],['data3','data4'],['data5','data6']];
const sub = service.getData().subscribe(
value => {
expect(value).toHaveLength(3);
httpMock.verify();
done();
},
error => {
throw error;
},
);
subs.add(sub);
httpMock.expectOne('/base-url/data').flush(mockValue);
});
But when I ran the test, I received the following:
Expected one matching request for criteria "Match URL: /base-url/data/", found none.

Elasticsearch node js point in time search_phase_execution_exception

const body = {
query: {
geo_shape: {
geometry: {
relation: 'within',
shape: {
type: 'polygon',
coordinates: [$polygon],
},
},
},
},
pit: {
id: "t_yxAwEPZXNyaS1wYzYtMjAxN3IxFjZxU2RBTzNyUXhTUV9XbzhHSk9IZ3cAFjhlclRmRGFLUU5TVHZKNXZReUc3SWcAAAAAAAALmpMWQkNwYmVSeGVRaHU2aDFZZExFRjZXZwEWNnFTZEFPM3JReFNRX1dvOEdKT0hndwAA",
keep_alive: "1m",
},
};
Query fails with search_phase_execution_exception at onBody
Without pit query works fine but it's needed to retrieve more than 10000 hits
Well, using PIT in NodeJS ElasticSearch's client is not clear, or at least is not well documented. You can create a PIT using the client like:
const pitRes = await elastic.openPointInTime({
index: index,
keep_alive: "1m"
});
pit_id = pitRes.body.id;
But there is no way to use that pit_id in the search method, and it's not documented properly :S
BUT, you can use the scroll API as follows:
const scrollSearch = await elastic.helpers.scrollSearch({
index: index,
body: {
"size": 10000,
"query": {
"query_string": {
"fields": [ "vm_ref", "org", "vm" ],
"query": organization + moreQuery
},
"sort": [
{ "utc_date": "desc" }
]
}
}});
And then read the results as follows:
let res = [];
try {
for await (const result of scrollSearch) {
res.push(...result.body.hits.hits);
}
} catch (e) {
console.log(e);
}
I know that's not the exact answer to your question, but I hope it helps ;)
The usage of point-in-time for pagination of search results is now documented in ElasticSearch. You can find more or less detailed explanations here: Paginate search results
I prepared an example that may give an idea about how to implement the workflow, described in the documentation:
async function searchWithPointInTime(cluster, index, chunkSize, keepAlive) {
if (!chunkSize) {
chunkSize = 5000;
}
if (!keepAlive) {
keepAlive = "1m";
}
const client = new Client({ node: cluster });
let pointInTimeId = null;
let searchAfter = null;
try {
// Open point in time
pointInTimeId = (await client.openPointInTime({ index, keep_alive: keepAlive })).body.id;
// Query next chunk of data
while (true) {
const size = remained === null ? chunkSize : Math.min(remained, chunkSize);
const response = await client.search({
// Pay attention: no index here (because it will come from the point-in-time)
body: {
size: chunkSize,
track_total_hits: false, // This will make query faster
query: {
// (1) TODO: put any filter you need here (instead of match_all)
match_all: {},
},
pit: {
id: pointInTimeId,
keep_alive: keepAlive,
},
// Sorting should be by _shard_doc or at least include _shard_doc
sort: [{ _shard_doc: "desc" }],
// The next parameter is very important - it tells Elastic to bring us next portion
...(searchAfter !== null && { search_after: [searchAfter] }),
},
});
const { hits } = response.body.hits;
if (!hits || !hits.length) {
break; // No more data
}
for (hit of hits) {
// (2) TODO: Do whatever you need with results
}
// Check if we done reading the data
if (hits.length < size) {
break; // We finished reading all data
}
// Get next value for the 'search after' position
// by extracting the _shard_doc from the sort key of the last hit
searchAfter = hits[hits.length - 1].sort[0];
}
} catch (ex) {
console.error(ex);
} finally {
// Close point in time
if (pointInTime) {
await client.closePointInTime({ body: { id: pointInTime } });
}
}
}

ecs.runTask not executing in Lambda

I have a lambda function that is supposed to start an ecs task when invoked. It gets all the way down to the "Starting execution..." log then it logs "done.". It seems to just skip right over ecs.runTask(). I have tried getting the returned json output by setting the runtask function to a variable, but that has not helped. I have also tried changing some of my parameters and that has not worked as well.
const AWS = require('aws-sdk');
var ecs = new AWS.ECS()
exports.handler = async (event) => {
var params = {
cluster: "ec2-cluster",
enableECSManagedTags: true,
launchType: "FARGATE",
count: 1,
platformVersion: 'LATEST',
networkConfiguration: {
awsvpcConfiguration: {
assignPublicIp: "ENABLED",
securityGroups: [ "sg" ],
subnets: [ "subnet" ]
}
},
startedBy: "testLambda",
taskDefinition: "definition"
}
console.log("Starting execution...");
ecs.runTask(params, function(err, data) {
console.log(err, data)
});
// console.log(myReturn)
console.log("done.")
}
When I run this locally everything works great. When I run this in lambda however it does not start my task.
In your case, you will need to add Promise to ecs.runTask(). In the AWS Lambda documentation, they mentioned that we have to
Make sure that any background processes or callbacks in your code are complete before the code exits.
meaning that we need to await for the ecs.runTask() process to resolve. Here is an example of how we can apply async/await on aws-sdk. From the references above, the way to make your code work would be:
const AWS = require('aws-sdk');
var ecs = new AWS.ECS()
exports.handler = async (event) => {
var params = {
cluster: "ec2-cluster",
enableECSManagedTags: true,
launchType: "FARGATE",
count: 1,
platformVersion: 'LATEST',
networkConfiguration: {
awsvpcConfiguration: {
assignPublicIp: "ENABLED",
securityGroups: [ "sg" ],
subnets: [ "subnet" ]
}
},
startedBy: "testLambda",
taskDefinition: "definition"
}
console.log("Starting execution...");
// Added promise here
await ecs.runTask(params).promise();
// console.log(myReturn)
console.log("done.")
This is a common mistake that we might make when we first get into Lambda, especially when we try to make our Lambda functions work with some other AWS services via aws-sdk.

How to assign serviceAccount using gcloud compute nodejs client?

I'm trying to create a new virtual machine using gcloud compute nodejs client:
const Compute = require('#google-cloud/compute');
const compute = new Compute();
async function createVM() {
try {
const zone = await compute.zone('us-central1-a');
const config = {
os: 'ubuntu',
http: true,
https: true,
metadata: {
items: [
{
key: 'startup-script-url',
value: 'gs://<path_to_startup_script>/startup_script.sh'
},
],
},
};
const data = await zone.createVM('vm-9', config);
const operation = data[1];
await operation.promise();
return console.log(' VM Created');
} catch (err) {
console.error(err);
return Promise.reject(err);
}
}
I have a serviceAccount with the needed roles for this VM to call other resources but I can't figure how to where to assign the serviceAccount when creating the new VM. Any pointers are greatly appreciated, I haven't been able to find any documentation and I'm stuck.
You can specify the service account to use in the new VM by adding a serviceAccounts field within the options for config passed into createVM. Here is an example snippet:
zone.createVM('name', {
serviceAccounts: [
{
email: '...',
scopes: [
'...'
]
}
]
})
Reference:
Service Account and Access Scopes or Method: instances.insert
createVM - The config object can take all the parameters of the instance resource.

How to get EC2 public ip using aws-sdk Javascript

I want to get the EC2 instance's public ip using aws-sdk for Javascript. Upon executing the code below, the return gives { Reservations: [] }.
'use strict';
const AWS = require('aws-sdk');
AWS.config.loadFromPath('./aws.json');
AWS.config.update({ region: 'ap-northeast-1' });
const ec2 = new AWS.EC2({ apiVersion: '2016-11-15' });
const params = {
Filters: [
{
Name: 'ip-address',
Values: [
'ip-address'
]
}
],
InstanceIds: [
"i-0acf483a5cbdfdbeb"
]
};
ec2.describeInstances(params, function (err, data) {
if (err) {
console.log(err);
}
console.log(data);
});
The credentials used has been verified on IAM and is allowed access to the EC2 instance. Why can't its public ip be retrieved?
Node: 7.1.0
OS: CentOS 7.3/Windows 10
I think that you don't need 'Filters' part in your params object.
Use this:
const params = {
InstanceIds: [
"i-0acf483a5cbdfdbeb"
]
};
To get public ip use data.Reservations[0].Instances[0].PublicIpAddress
These are all the parameters that you need:
var params = {
Filters: [
{
Name: 'instance-id',
Values: [
'i-0492dce5669fd6d22'
]
},
],
};
Documentation

Resources