I'm using Amazon S3 as photo storage and I'm constantly getting this error:
RequestTimeout: Your socket connection to the server was not read from or written to within the timeout period. Idle connections will be closed.
Here's the code:
function(source, name, callback) {
var awsUtils = this;
fs.stat(source, function(err, file_info) {
console.log(file_info);
var bodyStream = fs.createReadStream(source);
var params = {
Key : name,
ContentLength : file_info.size,
Body : bodyStream
};
awsUtils.s3bucket.putObject(params, function (err, data) {
if(err) {
console.error('AWSDriverUtils-unable to upload: ' + err);
callback(false);
} else {
console.log('AWSDriverUtils-upload success:', data);
callback(true);
}
});
});
}
The upload image file is small, about 44.0 KB only. While researching, I have found someone saying:
Amazon S3 will send that error response after 20 seconds of inactivity. The error indicates that Amazon S3 was attempting to read the request body, but no new data arrived over a period of 20 seconds.
What should I do to fix this, maybe a flush() function?
I have also tried to comment out ContentLength in params.
Related
I have an RDS SQL Server configured for which I need to get all successful/failed login attempts and create a report out of it (csv file) and upload it to S3 bucket.
I am using below queries to get both successful and failed logins:
# Fetch failed login attempts
SELECT *
FROM msdb.dbo.rds_fn_get_audit_file ('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default )
WHERE action_id = 'LGIF';
# Fetch successful login attempts
SELECT *
FROM msdb.dbo.rds_fn_get_audit_file ('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default)
WHERE action_id ='LGIS';
I need to do it through node js based lambda function in AWS. How can I do it? I am totally new to node js and could not find any example.
You'll need to write a lambda that can talk to SQL, REF: AWS Lambda NodeJS call to SQL Server returns no data and no errors
var sql = require("mssql");
// config for your database
var config = {
user: 'xxuser',
password: 'xxxx',
server: 'mydns',
database: 'tavier'
};
module.exports.rdsquery = async event => {
console.log('called rdsquery')
try{
// connect to your database
await sql.connect(config, function (err) {
console.log('connected')
if (err)
console.log('rdsquery: '+err)
// create Request object
var request = new sql.Request();
// query to the database and get the records
request.query('SELECT *
FROM msdb.dbo.rds_fn_get_audit_file
('D:\\rdsdbdata\\SQLAudit\\*.sqlaudit', default, default )
WHERE action_id = 'LGIF'', function (err, recordset) {
if (err)
console.log('rdsquery-sql: '+err)
// send records as a response
console.log('logging recordset')
console.log(recordset);
return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify(recordset)};
});
});
}
catch(e)
{
console.log('rdsquery-catch: '+e)
return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify('ERR: '+e)};
}
//return {statusCode: 200, headers: {'Access-Control-Allow-Origin': '*'},body: JSON.stringify('test here')};
};
Instead of returning the recordset, save it to S3, REF: https://stackoverflow.com/a/40188305/495455
var AWS = require('aws-sdk');
function putObjectToS3(bucket, key, data){
var s3 = new AWS.S3();
var params = {
Bucket : bucket,
Key : key,
Body : data
}
s3.putObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
}
I assume your file is generated on your own server not in the cloud.
IMHO don't see the reason to use a lambda as the goal is just to upload
a file into location and here is how it could be done without lambda.
Lambda and your SQL Server possibly will not be in the same cloud
Extra effort to setup lambda and posting data which is only moved to s3
What you need is :
a script to generate the report
a shell script upload-report-s3.sh
Use aws cli to upload generated file aws s3 cp ./path-to-report-file.csv s3://your-bucket/extra-path/target-location.csv
a cron job to integrate point 1) and 2)
Note: S3 bucket is already created
I have created a Lambda function in AWS that export logs from Cloudfront to Elasticsearch.
From the AWS console, I still have a warining in front of Invocation error, though the metrics show there is none for more than 24hours.
A typical workflow of logs looks like
START RequestId: 302f0b95-7856-11e8-9486-55b3f10e7d4e Version: $LATEST
Request complete
END RequestId: 302f0b95-7856-11e8-9486-55b3f10e7d4e
REPORT RequestId: 302f0b95-7856-11e8-9486-55b3f10e7d4e Duration: 794.93 ms Billed Duration: 800 ms Memory Size: 128 MB Max Memory Used: 52 MB
There is no error in the logs, and the only thing I guess could trigger this invocation error is that sometimes two request starts at the same time
09:01:47
START RequestId: 63cd00e1-7856-11e8-8f96-1f900def8e65 Version: $LATEST
09:01:47
START RequestId: 63e1e7f3-7856-11e8-97e6-3792244f6ab0 Version: $LATEST
Except from this, I don't understand where this error comes from.
Do I miss something? Or do I have to wait more than 24hours before the satus change? May be there is a way to pinpoint the error with AWS console/API that I did not find about?
Would be happy to hear what you think about this.
Edit: In case you'd like to take a look at the code itself.
var aws = require('aws-sdk');
var zlib = require('zlib');
var async = require('async');
const CloudFrontParser = require('cloudfront-log-parser');
var elasticsearch = require('elasticsearch');
var s3 = new aws.S3();
var client = new elasticsearch.Client({
host: process.env.ES_HOST,
log: 'trace',
keepAlive: false
});
exports.handler = function(event, context, callback) {
var srcBucket = event.Records[0].s3.bucket.name;
var srcKey = event.Records[0].s3.object.key;
async.waterfall([
function fetchLogFromS3(next){
console.log('Fetching compressed log from S3...');
s3.getObject({
Bucket: srcBucket,
Key: srcKey
},
next);
},
function uncompressLog(response, next){
console.log("Uncompressing log...");
zlib.gunzip(response.Body, next);
},
function publishNotifications(jsonBuffer, next) {
console.log('Filtering log...');
var json = jsonBuffer.toString();
var records;
CloudFrontParser.parse(json, { format: 'web' }, function (err, accesses) {
if(err){
console.log(err);
} else {
records = accesses;
}
});
var bulk = [];
records.forEach(function(record) {
bulk.push({"index": {}})
bulk.push(record);
});
client.bulk({
index: process.env.ES_INDEXPREFIX,
type: 'log',
body: bulk
}, function(err, resp, status) {
if(err) {
console.log('Error: ', err);
}
console.log(resp);
next();
});
console.log('CloudFront parsed:', records);
}
], function (err) {
if (err) {
console.error('Failed to send data: ', err);
} else {
console.log('Successfully send data.');
}
callback(null, 'success');
});
};
You need to explicitly return information back to the caller.
Here's the related documentation:
The Node.js runtimes v6.10 and v8.10 support the optional callback
parameter. You can use it to explicitly return information back to the
caller. The general syntax is:
callback(Error error, Object result); Where:
error – is an optional parameter that you can use to provide results
of the failed Lambda function execution. When a Lambda function
succeeds, you can pass null as the first parameter.
result – is an optional parameter that you can use to provide the
result of a successful function execution. The result provided must be
JSON.stringify compatible. If an error is provided, this parameter is
ignored.
If you don't use callback in your code, AWS Lambda will call it
implicitly and the return value is null.
When the callback is called (explicitly or implicitly), AWS Lambda
continues the Lambda function invocation until the event loop is
empty.
The following are example callbacks:
callback(); // Indicates success but no information returned to the caller.
callback(null); // Indicates success but no informatio returned to the caller.
callback(null, "success"); // Indicates success with information returned to the caller.
callback(error); // Indicates error with error information returned to the caller
https://docs.aws.amazon.com/lambda/latest/dg/nodejs-prog-model-handler.html
I'm trying to make a call to the Amazon Rekognition service with NodeJS. The call is going through but I receive an InvalidImageFormatException error in which it says:
Invalid Input, input image shouldn't be empty.
I'm basing my code off an S3 example:
var AWS = require('aws-sdk');
var rekognition = new AWS.Rekognition({region: 'us-east-1'});
//Create a bucket and upload something into it
var params = {
Image: {
S3Object: {
Bucket: "MY-BUCKET-NAME",
Name: "coffee.jpg"
}
},
MaxLabels: 10,
MinConfidence: 70.0
};
var request = rekognition.detectLabels(params, function(err, data) {
if(err){
console.log(err, err.stack); // an error occured
}
else{
console.log(data); // successful response
}
});
The documentation states that the service only accepts PNG or JPEG images but I can't figure out what is going on.
I've developed a nodeJS API (using express) which allow users to login and get a list of files that they have stored in a remote server. And as you understand, the code must be non-blocking so the webserver can still responds to logging in requests, even if there are some users fetching theirs files lists.
Every time a user make a request to get his files list, the listOfFiles function is called.
This is the code:
exports.listOfFiles = function(req,res){
db.Account.find({where: {id:1}}).then(function(newAcc){
console.log("encontrou a account");
getFiles('/', newAcc.accessToken, '0', newAcc, function(error){
if (error) {
log.error('Error getting files');
}else{
console.log("callback!")
}
});
});
}
getFiles function: this function is responsible for fetching the file list from the remote server, and store them in a postgres database
function getFiles(path, accessToken, parentID, newAcc, callback){
var client = new ExternalAPI.Client({
key: config.get("default:clientId"),
secret: config.get("default:clientSecret")
});
client._oauth._token = accessToken;
var options = {
removed : false,
deleted : false,
readDir: true
}
//this is the instruction that fetch an array of items
//(metadata only) from a remote server
client.stat(path, options, function(error, entries) {
if (error) {
if (error.status == 429) {
console.log(accessToken + 'timeout')
setTimeout(
getFiles(path, accessToken, parentID, callback),
60000);
}else{
log.error(error);
callback(error,null);
}
}
else {
//When the array os items arrives:
console.log("RECEIVED FILES")
var inserted = 0;
var items = entries._json.contents;
for(var file in items){
var skyItemID = uuid.v1();
var name = items[file].path.split('/').pop();
var itemType;
if (items[file].is_dir) {
itemType = 'folder';
}else{
itemType = 'file';
}
newAcc.createItem({
name : name,
lastModified: items[file].modified,
skyItemID: skyItemID,
parentID: parentID,
itemSize: items[file].bytes,
itemType : itemType,
readOnly: items[file].read_only,
mimeType: items[file].mime_type
}).then(function(item){
console.log(item.name)
if (++inserted == items.length) {
console.log(inserted)
console.log(items.length)
console.log("callsback")
callback();
}
}).catch(function(error){
log.error('[DROPBOX] - Filename with special characters');
callback(new Error);
return;
});
}
}
});
}
The problem here is, the moment that webserver prints console.log("RECEIVED FILES") in our console, it stops responding to all other requests, such as log in or fetch files requests from other users.
And it starts responding again when it prints console.log("callback!"). So, i'm assuming that somehow nodeJS is blocking itself until getFiles function is finished and called back.
I think that this is not a normal behaviour. Shouldn't nodeJS be responding to responds to other requests even if there are some operations running in background? Shouldn't getFiles function being run in background and not affecting/blocking all other requests? What am I doing wrong?
Thanks!
I am facing the same kind of problem for long time server http request blocks the service for response other client requests. This is my topic. What is the correct behavior for Node.js while Node.js is requesting a long time http request for other servers Currently, I got no answer for that. If you got the answer, please reply me. Thanks.
I'm trying to upload a PDF to an S3 bucket using the Knox library, but I keep getting 505 errors and the PDFs won't save. My code:
// all of this works well
var knox = require('knox');
var client = knox.createClient(require('../path/to/config.js').knox);
client.putFile('tmp/file', '/prefix/key',
function(err, res) {
if (err) {
console.log("Error PUTing file in S3:", err);
}
console.log("S3 RESPONSE:", res.statusCode); // returns 505
}
);
Anyone have any insight into what I'm doing wrong? I've also tried setting my own headers using client.put(..), but I got the same 505 response.
2 Possible reasons.
1) If this is your complete code, then you forgot to enter the key,secret and bucket.
var client = knox.createClient({
key: '<api-key-here>'
, secret: '<secret-here>'
, bucket: 'learnboost'
});
2) There is a space in the file name that you are trying to upload.
This isn't an answer per se, and I'm still unsure about the 505 response above, but the AWS SDK that Amazon puts out works great if anyone is having similar issues with Knox. The above just becomes:
var aws = require('aws-sdk');
aws.config.loadFromPath('./path/to/config.json');
var s3 = new aws.S3();
var params = { Bucket: 'your-bucket',
Key: 'your-key',
Body: fs.readFileSync('/path/to/file.pdf') };
s3.putObject(params, function(err, data) {
if (err) {
console.log("Error PUTing file:", err);
}
console.log("S3 RESPONSE:", data);
});