I am trying to write a function that takes the mp3 url of the recording and then uploads that to S3. However, I keep getting a runtime error and the callback is never reached. If I move the callback below s3.upload(...) then the statement "attempting to upload mp3 is never logged.
exports.handler = function(context, event, callback) {
const twiml = new Twilio.twiml.VoiceResponse();
var AWS = require('aws-sdk');
var s3 = new AWS.S3();
var getUri = require('get-uri');
AWS.config.update({
accessKeyId: "...",
secretAccessKey: "..."
});
var client = context.getTwilioClient();
const recording_id = event.RecordingSid;
const uri = event.RecordingUrl + ".mp3";
getUri(uri, function (err, rs) {
if (err) {
console.log(err.message);
throw err;
}
var params = {
ACL: "public-read",
Body: rs,
Bucket: "...",
Key: "audio.mp3",
ContentType: 'audio/mp3'
};
s3.upload(params, function(err,data) {
console.log("attempting to upload mp3");
if (err) {
console.log("there is an error");
console.log(err.status);
throw err.message;
}
else {
console.log("Your upload has been successful.");
}
callback(null, twiml);
});
});
console.log("at the end");
};
Is there any other way to access the recording and put them in my public s3 bucket? Why is this never executing s3.upload(...).
Any insights into this is helpful! Thanks in advance!
app.get('/uploadsong',function(req,res){
console.log("Hi there")
var URI = 'http://sensongsmp3download.info/Kaala%20(2018)%20-%20Sensongsmp3.info/Thanga%20Sela%20--%20Sensongsmp3.Info.mp3';
var buffer = [];
request
.get(URI)
.on('error', function(err) {
console.log("error")
}).on('data',function(data){
buffer.push(data);
}).on('end',function(){
var completeSong = Buffer.concat(buffer);
var data = {
Body:completeSong,
Key: 'sample.mp3',
ContentType: 'audio/mp3'
}
s3Bucket.putObject(data, function(err, data){
if (err)
{
console.log('Error uploading data: ', data);
} else
{
console.log('upload successfull')
res.send('done');
}
})
})
})
here are the modules i have used
var request = require('request');
I contacted Twilio regarding this they responded that Twilio Functions have a strict 5 second time-out and the upload from the Twilio Function to S3 Bucket takes more than 5 seconds. My workaround was sending a string with all mp3 URLs separated by comma and a space. The lambda function would then parse through all the links and store all links in an array which would be used for audio playback.
Related
I am trying to upload a file to AWS S3 using [putObject][1] but it results in files of 0 byte size.
I do get successful response back from the putObject call.
Node.js code:
const aws = require("aws-sdk");
const s3 = new aws.S3();
module.exports = {
upload: function(req, res, next) {
console.log("Going to upload");
console.log(req.files);
let uploadFile = req.files.file;
const s3PutParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name,
Body: uploadFile.data,
ACL: "public-read"
};
const s3GetParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name
};
console.log(s3PutParams);
s3.putObject(s3PutParams, function(err, response) {
if (err) {
console.error(err);
} else {
console.log("Response is", response);
var url = s3.getSignedUrl("getObject", s3GetParams);
console.log("The URL is", url);
res.json({
returnedUrl: url,
publicUrl: `https://${process.env.S3_BUCKET_NAME}.s3.amazonaws.com/${uploadFile.name}`
});
}
});
}
};
Testing through POSTMAN:
Backend Console log
Can anyone help me in figuring out what is wrong?
EDIT on 11/20:
#EmmanuelNK helped in spotting the fact that Buffer.byteLength(req.files.file.data) is 0. He had the below questions:
Are you trying to write the whole buffer into memory or are you trying to stream it to s3?
Sorry if the answer is not to the point, still getting my feet wet.
Basically I want to upload an image to S3 and then later use that URL to show it on a webpage. In other words like a photobucket
how you are using upload
For now I am just testing my backend code (posted in the question) using postman. Once I get that going, will have a file upload form on the front end calling this route.
Is that helpful? Thanks in advance for your help.
If you're using express-fileupload as the file uploading middleware, and you've set the useTempFiles option to true, keep in mind that your data file buffer will be empty (check usage), which correlates to the issue you're facing. To get around this, simply read the temp. file once more to get the intended file buffer.
import fs from 'fs';
// OR
const fs = require('fs');
// in your route
let uploadFile = req.files.file;
// THIS
fs.readFile(uploadedFile.tempFilePath, (err, uploadedData) => {
if (err) { throw err; }
const s3PutParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name,
Body: uploadData, // <--- THIS
ACL: "public-read"
};
const s3GetParams = {
Bucket: process.env.S3_BUCKET_NAME,
Key: uploadFile.name
};
console.log(s3PutParams);
s3.putObject(s3PutParams, function(err, response) {
if (err) {
console.error(err);
throw err;
} else {
console.log("Response is", response);
var url = s3.getSignedUrl("getObject", s3GetParams);
console.log("The URL is", url);
res.json({
returnedUrl: url,
publicUrl: `https://${process.env.S3_BUCKET_NAME}.s3.amazonaws.com/${uploadFile.name}`
});
}
});
});
I am trying to write a lambda script that can pull an image from a site and store it in S3. The problem I'm having is what kind of object to pass as the Body attribute into the S3.putObject method. In the documentation here it says it should be either new Buffer('...') || 'STRING_VALUE' || streamObject, but I'm not sure how to convert the https response into one of those. Here is what I've tried:
var AWS = require('aws-sdk');
var https = require('https');
var Readable = require('stream').Readable;
var s3 = new AWS.S3();
var fs = require('fs');
var url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1d/AmazonWebservices_Logo.svg/500px-AmazonWebservices_Logo.svg.png';
exports.handler = function(event, context) {
https.get(url, function(response) {
var params = {
Bucket: 'example',
Key: 'aws-logo.png',
Body: response // fs.createReadStream(response); doesn't work, arg should be a path to a file...
// just putting response errors out with "Cannot determine length of [object Object]"
};
s3.putObject(params, function(err, data) {
if (err) {
console.error(err, err.stack);
} else {
console.log(data);
}
});
});
};
As indicated in the comments, Lambda allows to save files in /tmp. But you don't really need it...
response does not contain the content of the file, but the http response (with its status code and headers).
You could try something like this:
var AWS = require('aws-sdk');
var https = require('https');
var s3 = new AWS.S3();
var url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/1d/AmazonWebservices_Logo.svg/500px-AmazonWebservices_Logo.svg.png';
exports.handler = function(event, context) {
https.get(url, function(res) {
var body = '';
res.on('data', function(chunk) {
// Agregates chunks
body += chunk;
});
res.on('end', function() {
// Once you received all chunks, send to S3
var params = {
Bucket: 'example',
Key: 'aws-logo.png',
Body: body
};
s3.putObject(params, function(err, data) {
if (err) {
console.error(err, err.stack);
} else {
console.log(data);
}
});
});
});
};
try this package https://www.npmjs.com/package/request
var request = require('request');
exports.handler = function (event, context) {
s3.putObject({
Bucket: 'example',
Key: 'aws-logo.png',
Body: request.get(url, {followRedirect: false})
}, function (err, data) {
if (err) console.error(err, err.stack);
else console.log(data);
})
}
I am attempting to read a file that is in a aws s3 bucket using
fs.readFile(file, function (err, contents) {
var myLines = contents.Body.toString().split('\n')
})
I've been able to download and upload a file using the node aws-sdk, but I am at a loss as to how to simply read it and parse the contents.
Here is an example of how I am reading the file from s3:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myKey.csv'}
var s3file = s3.getObject(params)
You have a couple options. You can include a callback as a second argument, which will be invoked with any error message and the object. This example is straight from the AWS documentation:
s3.getObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
Alternatively, you can convert the output to a stream. There's also an example in the AWS documentation:
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).createReadStream().pipe(file);
This will do it:
new AWS.S3().getObject({ Bucket: this.awsBucketName, Key: keyName }, function(err, data)
{
if (!err)
console.log(data.Body.toString());
});
Since you seem to want to process an S3 text file line-by-line. Here is a Node version that uses the standard readline module and AWS' createReadStream()
const readline = require('readline');
const rl = readline.createInterface({
input: s3.getObject(params).createReadStream()
});
rl.on('line', function(line) {
console.log(line);
})
.on('close', function() {
});
If you are looking to avoid the callbacks you can take advantage of the sdk .promise() function like this:
const s3 = new AWS.S3();
const params = {Bucket: 'myBucket', Key: 'myKey.csv'}
const response = await s3.getObject(params).promise() // await the promise
const fileContent = response.Body.toString('utf-8'); // can also do 'base64' here if desired
I'm sure the other ways mentioned here have their advantages but this works great for me. Sourced from this thread (see the last response from AWS): https://forums.aws.amazon.com/thread.jspa?threadID=116788
here is the example which i used to retrive and parse json data from s3.
var params = {Bucket: BUCKET_NAME, Key: KEY_NAME};
new AWS.S3().getObject(params, function(err, json_data)
{
if (!err) {
var json = JSON.parse(new Buffer(json_data.Body).toString("utf8"));
// PROCESS JSON DATA
......
}
});
I couldn't figure why yet, but the createReadStream/pipe approach didn't work for me. I was trying to download a large CSV file (300MB+) and I got duplicated lines. It seemed a random issue. The final file size varied in each attempt to download it.
I ended up using another way, based on AWS JS SDK examples:
var s3 = new AWS.S3();
var params = {Bucket: 'myBucket', Key: 'myImageFile.jpg'};
var file = require('fs').createWriteStream('/path/to/file.jpg');
s3.getObject(params).
on('httpData', function(chunk) { file.write(chunk); }).
on('httpDone', function() { file.end(); }).
send();
This way, it worked like a charm.
I prefer Buffer.from(data.Body).toString('utf8'). It supports encoding parameters. With other AWS services (ex. Kinesis Streams) someone may want to replace 'utf8' encoding with 'base64'.
new AWS.S3().getObject(
{ Bucket: this.awsBucketName, Key: keyName },
function(err, data) {
if (!err) {
const body = Buffer.from(data.Body).toString('utf8');
console.log(body);
}
}
);
I had exactly the same issue when downloading from S3 very large files.
The example solution from AWS docs just does not work:
var file = fs.createWriteStream(options.filePath);
file.on('close', function(){
if(self.logger) self.logger.info("S3Dataset file download saved to %s", options.filePath );
return callback(null,done);
});
s3.getObject({ Key: documentKey }).createReadStream().on('error', function(err) {
if(self.logger) self.logger.error("S3Dataset download error key:%s error:%#", options.fileName, error);
return callback(error);
}).pipe(file);
While this solution will work:
var file = fs.createWriteStream(options.filePath);
s3.getObject({ Bucket: this._options.s3.Bucket, Key: documentKey })
.on('error', function(err) {
if(self.logger) self.logger.error("S3Dataset download error key:%s error:%#", options.fileName, error);
return callback(error);
})
.on('httpData', function(chunk) { file.write(chunk); })
.on('httpDone', function() {
file.end();
if(self.logger) self.logger.info("S3Dataset file download saved to %s", options.filePath );
return callback(null,done);
})
.send();
The createReadStream attempt just does not fire the end, close or error callback for some reason. See here about this.
I'm using that solution also for writing down archives to gzip, since the first one (AWS example) does not work in this case either:
var gunzip = zlib.createGunzip();
var file = fs.createWriteStream( options.filePath );
s3.getObject({ Bucket: this._options.s3.Bucket, Key: documentKey })
.on('error', function (error) {
if(self.logger) self.logger.error("%#",error);
return callback(error);
})
.on('httpData', function (chunk) {
file.write(chunk);
})
.on('httpDone', function () {
file.end();
if(self.logger) self.logger.info("downloadArchive downloaded %s", options.filePath);
fs.createReadStream( options.filePath )
.on('error', (error) => {
return callback(error);
})
.on('end', () => {
if(self.logger) self.logger.info("downloadArchive unarchived %s", options.fileDest);
return callback(null, options.fileDest);
})
.pipe(gunzip)
.pipe(fs.createWriteStream(options.fileDest))
})
.send();
With the new version of sdk, the accepted answer does not work - it does not wait for the object to be downloaded. The following code snippet will help with the new version:
// dependencies
const AWS = require('aws-sdk');
// get reference to S3 client
const s3 = new AWS.S3();
exports.handler = async (event, context, callback) => {
var bucket = "TestBucket"
var key = "TestKey"
try {
const params = {
Bucket: Bucket,
Key: Key
};
var theObject = await s3.getObject(params).promise();
} catch (error) {
console.log(error);
return;
}
}
If you want to save memory and want to obtain each row as a json object, then you can use fast-csv to create readstream and can read each row as a json object as follows:
const csv = require('fast-csv');
const AWS = require('aws-sdk');
const credentials = new AWS.Credentials("ACCESSKEY", "SECRETEKEY", "SESSIONTOKEN");
AWS.config.update({
credentials: credentials, // credentials required for local execution
region: 'your_region'
});
const dynamoS3Bucket = new AWS.S3();
const stream = dynamoS3Bucket.getObject({ Bucket: 'your_bucket', Key: 'example.csv' }).createReadStream();
var parser = csv.fromStream(stream, { headers: true }).on("data", function (data) {
parser.pause(); //can pause reading using this at a particular row
parser.resume(); // to continue reading
console.log(data);
}).on("end", function () {
console.log('process finished');
});
var fileStream = fs.createWriteStream('/path/to/file.jpg');
var s3Stream = s3.getObject({Bucket: 'myBucket', Key: 'myImageFile.jpg'}).createReadStream();
// Listen for errors returned by the service
s3Stream.on('error', function(err) {
// NoSuchKey: The specified key does not exist
console.error(err);
});
s3Stream.pipe(fileStream).on('error', function(err) {
// capture any errors that occur when writing data to the file
console.error('File Stream:', err);
}).on('close', function() {
console.log('Done.');
});
Reference: https://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/requests-using-stream-objects.html
Bellow is the code that I am using to upload a file to S3 using NodeJS and aws-sdk. Everything seems to work fine, however the browser just hangs. Also when I try to open the image on S3, it shows up broken.
module.exports = function(app) {
app.post('/upload', function(req, res){
var AWS = require('aws-sdk');
var accessKeyId = process.env.AWS_ACCESS_KEY;
var secretAccessKey = process.env.AWS_SECRET_KEY;
//pull var from heroku else send to dev
AWS.config.update({
accessKeyId: accessKeyId,
secretAccessKey: secretAccessKey
});
var s3 = new AWS.S3();
var file = req.files.filechooser;
if (file) {
var params = {
Bucket: 'mybucket',
Body: new Buffer(file, 'binary'),
ACL:'public-read',
ContentType: file.type,
Key: "ugc/" + file.name,
ContentLength: file.size
};
s3.putObject(params, function(err, data){
if (err) {
console.log("Error uploading data: ", err);
} else {
console.log("Successfully uploaded data to myBucket/myKey");
}
}).
on('httpUploadProgress', function(chunk) {
console.log("Uploaded", chunk.loaded, "of", chunk.total, "bytes");
}).
on('httpDone', function() {
console.log("done");
});
console.log("called");
} else {
console.log('Nothing to upload.');
}
});
}
You need to complete the call to putObject, after rehistering for listeners, to actually start the upload with send. By default, if callback is provided to an S3 method returning a Response, then you must initiate the request manually with send();
on('httpUploadProgress', function(chunk) {
console.log("Uploaded", chunk.loaded, "of", chunk.total, "bytes");
}).
on('httpDone', function() {
console.log("done");
}).
send(); // Actually initiate the request
See
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Request.html
http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Request.html#send-property
For details.
Trying to upload an image using nodejs, express & request module but keep getting 415 from Amazon S3 instance.
fs.createReadStream(req.files.image.path).pipe(request.post(defaults.url, {
form: {
param_1: '',
param_2: ''
}
}, function(error, response, body){
if(error){
callback(error, null);
}else{
if(response.statusCode === 200){
callback({}, body);
}else{
callback(body, response);
}
}
}));
I think the image is not getting appended on the request but i'm not 100%. Any advise?
pipe expects a Writable stream as parameter. you can use res object of express directly as the pipe destination. But if you like to upload to s3, you can read the file from req stream and use putObject to write it to s3
var fs = require ('fs')
fs.readFile(req.files.image.path, function (err, data) {
var AWS = require('./aws_config')
var s3 = new AWS.S3()
var bucket = ''
s3.putObject({
ACL: 'public-read', // by default private access
Bucket: bucket,
Key: file_name,
Body: data
}, function (err, data) {
if (err) {
console.log(err)
res.send(500, {msg: 'image upload failed', error: err})
} else {
console.log('S3 upload Successful')
res.send({})
}
});
If you like to download you can use pipe to redirect the read object to response directly
app.get('/download/:file', function(req, res, next) {
var AWS = require('./aws_config')
var s3 = new AWS.S3()
s3.getObject({
Bucket: '',
Key: req.params.file
}, function (err, data) {
if (err) console.log (err)
var fs = require ('fs')
var filePath = __dirname + "/downloads/" + req.params.file
fs.writeFile(filePath, data.Body, function (err) {
if (err) console.log(err)
else {
res.attachment(filePath)
var filestream = fs.createReadStream(filePath);
filestream.pipe(res);
// TODO: delete file from server ?
}
});
})
})