Not able to connect to AWS through aws-sdk node js - node.js

I am new to AWS SDK and trying to follow the documents there. https://aws.amazon.com/developers/getting-started/nodejs/ I am trying to create a bucket and upload some text file there. But I am getting the below mentioned error.
{ Error: connect ETIMEDOUT 52.216.206.51:443
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1104:14)
message: 'connect ETIMEDOUT 52.216.206.51:443',
errno: 'ETIMEDOUT',
code: 'NetworkingError',
syscall: 'connect',
address: '52.216.206.51',
port: 443,
region: 'us-east-1',
hostname:
'node-sdk-sample-c58bc12a-70be-4578-9c18-1c815962653b.s3.amazonaws.com',
retryable: true,
time: 20
I have tried giving credential in credential file under userid/.aws/credentials
[default]
aws_access_key_id = my cred...
aws_secret_access_key = my cred....
Also I have tried giving the credential in the code itself
AWS.config.update({accessKeyId: 'my cred', secretAccessKey: 'my secret key', region: 'us-east-1'});
`// Load the SDK and UUID
var AWS = require('aws-sdk');
var uuid = require('node-uuid');
AWS.config.update({accessKeyId: '......', secretAccessKey: '.....', region: 'us-east-1'});
var s3 = new AWS.S3();
var bucketName = 'node-sdk-sample-' + uuid.v4();
var keyName = 'hello_world.txt';
s3.createBucket({Bucket: bucketName}, function() {
var params = {Bucket: bucketName, Key: keyName, Body: 'Hello World!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to " + bucketName + "/" + keyName);
});
});`

I am able to resolve this issue. The issue was related to proxy. I was trying to access this from behind a firewall. After configuring the proxy setting, I am able to connect to AWS.
var proxy = require('proxy-agent');
AWS.config.update({ httpOptions: { agent: proxy('ur proxy url') } });

const AWS = require('aws-sdk'); //require aws sdk library
var params = {
Bucket: awsBucketName,
Key: path + fileName,
Body: value.data
};
//s3 upload start
s3bucket.upload(params, (err, data) => {
if (err) {
return reject(err);
}
return resolve(data.key);
});
//get signed url for downloading the uploaded file
const url = await s3bucket.getSignedUrl('getObject', {
Bucket: awsBucketName,
Key: key,
Expires: 60*3
});

Related

Making POST call outside of the GCP environment to create workload identity

I need to access GCP resources outside of the GCP environment from AWS using a AWS lambda. So, I found this document [accessing GCP resources from AWS][1] which provides a way to access the GCP resources and asks to create a workload identity pool.
I need to create a Workload identity pool in GCP using a REST API call. The REST API call has to run outside of the GCP environment, that is, in this case from the AWS environment. My GCP's IAM user doesn't have privileges to create a workload identity pool (due to org policy reasons). But, I've a service account which has admin privileges to create a workload identity pool and all the required permissions to access the required resources once the pool is created.
I'm a newbie to GCP and figuring out ways of calling a POST REST API call using my service account credentials. Any help is much appreciated.
Edited
Pasting the sample code I've been trying to make the REST call.
const {google} = require('googleapis');
const util = require('util');
const https = require('https');
const aws4 = require('aws4');
const auth = new google.auth.GoogleAuth({
keyFile: 'serviceAccountCreds.json',
scopes: ['https://www.googleapis.com/auth/cloud-platform'],
});
async function createSignedRequestParams(jsonBodyParams) {
const getAccessToken = await auth.getAccessToken();
console.log(`createSignedRequestParams() - this.credentials:${getAccessToken !== null}`);
// Set up the request params object that we'll sign
const requestParams = {
path: '/v1beta/projects/serviceAccountdev/locations/global/workloadIdentityPools?workloadIdentityPoolId=12345',
method: 'POST',
host: 'iam.googleapis.com',
headers: { 'Content-Type': 'application/json' },
body: jsonBodyParams
};
console.log(`createSignedRequestParams() - (signed) requestParams:${util.inspect(requestParams)}`);
return requestParams;
}
const jsonBodyParams = {
"description": "createWorkloadIdentityPool",
"display-name": "devAccount"
};
async function request(requestParams, jsonBodyParams) {
console.log(`request() requestParams:${util.inspect(requestParams)} jsonBodyParams:${jsonBodyParams}`);
// return new pending promise
return new Promise((resolve, reject) => {
const req = https.request(requestParams);
if (['POST', 'PATCH', 'PUT'].includes(requestParams.method)) {
req.write(jsonBodyParams);
}
req.end();
// Stream handlers for the request
req.on('error', (err) => {
console.log(`request() req.on('error') err:${util.inspect(err)}`);
return reject(err);
});
req.on('response', (res) => {
let dataJson = '';
res.on('data', chunk => {
dataJson += chunk;
});
res.on('end', () => {
const statusCode = res.statusCode;
const statusMessage = res.statusMessage;
const data = JSON.parse(dataJson);
console.log(`request() res.on('end')`, { statusCode, statusMessage, data });
resolve({ statusMessage, statusCode, data });
});
});
});
}
async function postTheRequest(reqParams, jsonBodyParams) {
try {
const response = await request(reqParams, jsonBodyParams);
return response;
} catch (error) {
console.log(error);
}
}
reqParams = createSignedRequestParams(jsonBodyParams);
postTheRequest(reqParams, jsonBodyParams);
output of the above code
[Running] node "c:\Users\av250044\.aws\GCP_Code_examples\registerTheWorkloadIdentifier.js"
request() requestParams:Promise { <pending> } jsonBodyParams:[object Object]
request() req.on('error') err:{ Error: connect ECONNREFUSED 127.0.0.1:443
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1106:14)
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'connect',
address: '127.0.0.1',
port: 443 }
{ Error: connect ECONNREFUSED 127.0.0.1:443
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1106:14)
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'connect',
address: '127.0.0.1',
port: 443 }
Wondering if I'm passing the PATH and host are correct. Please let me know your thoughts on my code sample.
[1]: https://cloud.google.com/iam/docs/access-resources-aws#iam-workload-pools-add-aws-rest

AWS lambda function delete files from S3 folder

I import some data from Funnel to S3 bucket. After that, Lambda function copy data to table in Redshift and I tried to delete all copied object from bucket folder but I keep getting timeout.
This is my code:
const Promise = require('bluebird');
const {Pool} = require('pg');
const AWS = require('aws-sdk');
async function emptyS3Directory(bucket, dir) {
const listParams = {
Bucket: bucket,
Prefix: dir
};
var s3 = new AWS.S3();
s3.listObjectsV2(listParams, function(err, data) // Here I always getting timeout{
});
.....
}
EDIT....
This is code of the function.
async function DeleteAllDataFromDir(bucket, dir) {
const listParams = {
Bucket: bucket,
Prefix: dir
};
var s3 = new AWS.S3();
const listedObjects = await s3.listObjects(listParams).promise();
console.log("reponse", listedObjects);
if (listedObjects.Contents.length === 0) return;
const deleteParams = {
Bucket: bucket,
Delete: { Objects: [] }
};
listedObjects.Contents.forEach(({ Key }) => {
deleteParams.Delete.Objects.push({ Key });
});
await s3.deleteObjects(deleteParams).promise();
if (listedObjects.IsTruncated) await DeleteAllDataFromDir(bucket, dir);
}
The first time I set the time out to 2 minutes, then I changed it to 10 minutes and I get the same error::
{
"errorType": "NetworkingError",
"errorMessage": "connect ETIMEDOUT IP:port",
"code": "NetworkingError",
"message": "connect ETIMEDOUT IP:port",
"errno": "ETIMEDOUT",
"syscall": "connect",
"address": "IP",
"port": port,
"region": "eu-west-2",
"hostname": "hostName",
"retryable": true,
"time": "2020-12-10T08:36:29.984Z",
"stack": [
"Error: connect ETIMEDOUT 52.95.148.74:443",
" at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1107:14)"
]
}
It appears that your bucket may reside in a different region than your lambda function based on the nature of the error.
Provide the region hash as an option when constructing your S3 client.
var s3 = new AWS.S3({region: 'bucket-region-hash'});
To figure the region hash, go to S3 Management Console.
Then from the sidebar, click "Buckets". In the resulting view, you'll find the region hash. It's the one marked in gold as shown in the picture below.

AWS Elasticsearch returns No living Connection

Helo Everyone, Im using AWS-Elasticsearch service and access it using Lambda. The When I try to connect elasticsearch from Lambda It throws an error
{ Error: No Living connections
at sendReqWithConnection (/var/task/node_modules/elasticsearch/src/lib/transport.js:226:15)
at next (/var/task/node_modules/elasticsearch/src/lib/connection_pool.js:214:7)
at /var/task/node_modules/async-listener/glue.js:188:31
at process._tickCallback (internal/process/next_tick.js:61:11)
message: 'No Living connections',
body: undefined,
status: undefined }
I'm using Nodejsto connect es domain
const elasticsearch = require('elasticsearch');
const httpAwsEs = require('http-aws-es');
const AWS = require('aws-sdk');
const esClient = new elasticsearch.Client({
host: 'endpointAddress',
connectionClass: httpAwsEs,
httpAuth: 'userName:Passwod',
amazonES: {
region: 'us-east-1',
credentials: new AWS.EnvironmentCredentials('AWS')
}
});
module.exports = esClient;
I've tested with on another account which was working fine, What would be the issue,
Thanks for Reading.

AWS Credentials error: could not load credentials from any providers. ElasticSearch Service node.js

I was having a problem that I think should be posted on the internet. I may not know the internal issue, but I think I have a solution. Anyway the problem:
I'm hosting an ElasticSearch Service on AWS, and I'm trying to access that service locally and or through my ec2 service hosted on AWS.
But when I try to locally I get this error: Request Timeout after 30000ms
When I try it on my ec2 I get this error: AWS Credentials error: Could not load credentials from any providers
Here was how I set up the credentials and made the query:
const AWS = require('aws-sdk');
const connectionClass = require('http-aws-es');
const elasticsearch = require('elasticsearch');
try {
var elasticClient = new elasticsearch.Client({
host: "https://some-elastic.us-east-1.es.amazonaws.com/",
log: 'error',
connectionClass: connectionClass,
amazonES: {
region: 'us-east-1',
credentials: new AWS.Credentials('id', 'key')
}
});
elasticClient.indices.delete({
index: 'foo',
}).then(function (resp) {
console.log("Successful query!");
console.log(JSON.stringify(resp, null, 4));
}, function (err) {
console.trace(err.message);
});
} catch (err) {
console.log(err);
} finally {
}
So as stated I kept getting this error. I tried many other variations to pass the credentials.
My vague understanding of the problem is that the credentials being set in the amazonES object are being ignored, or that the region isn't being passed along with the credentials. So AWS doesn't know where to search for the credentials.
Anyway here is the solution:
AWS.config.update({
secretAccessKey: 'key',
accessKeyId: 'id',
region: 'your region ex. us-east-1'
});
var elasticClient = new elasticsearch.Client({
host: "https://some-elastic.us-east-1.es.amazonaws.com/",
log: 'error',
connectionClass: connectionClass,
amazonES: {
credentials: new AWS.EnvironmentCredentials('AWS'),
}
});
It's a bit of a buggy situation. I couldn't find this solution anywhere online and I hope it helps someone out who runs into the same errors in the future.

why is coinbase api returning an error while the code was working fine?

i was using the above code to interact with coinbase api.it was working fine but until lately it returns an error.Here is the code
var coinbase = require('coinbase');
var mysecret = 'apisecret'
var mykey = 'apikey'
var client = new coinbase.Client({'apiKey': mykey, 'apiSecret': mysecret});
client.getAccounts({}, function(err, accounts) {
if (err)throw err;
console.log(accounts)
});
here is the error i get
if (err)throw err
^
Error: unable to get local issuer certificate
at TLSSocket.onConnectSecure (_tls_wrap.js:1058:34)
at TLSSocket.emit (events.js:198:13)
at TLSSocket._finishInit (_tls_wrap.js:636:8)
Coinbase have updated SSL certificates, to circumvent this, you will need to set strictSSL to false when you create a new client
const coinbase = require('coinbase')
const mysecret = 'apisecret'
const mykey = 'apikey'
const client = new coinbase.Client({ apiKey: mykey, apiSecret: mysecret, strictSSL: false })
client.getAccounts({}, function(err, accounts) {
if (err) throw err
console.log(accounts)
})
or pass in the new certificates
const coinbase = require('coinbase')
const mysecret = 'apisecret'
const mykey = 'apikey'
const caFile = fs.readFileSync('./caFileLocation')
const client = new coinbase.Client({ apiKey: mykey, apiSecret: mysecret, caFile })
client.getAccounts({}, function(err, accounts) {
if (err) throw err
console.log(accounts)
})

Resources