Unsupported body payload object - node.js

I want to upload a large file using nodejs!
I do as described in the AWS documentation
like this:
var fs = require('fs');
var zlib = require('zlib');
var body = fs.createReadStream('bigfile').pipe(zlib.createGzip());
var s3obj = new AWS.S3({params: {Bucket: 'myBucket', Key: 'myKey'}});
s3obj.upload({Body: body}).
on('httpUploadProgress', function(evt) {
console.log('Progress:', evt.loaded, '/', evt.total);
}).
send(function(err, data) { console.log(err, data) });
Or as shown in the aws-sdk example
var AWS = require('aws-sdk')
var uploadParams = {Bucket: process.argv[2], Key: '', Body: ''}
var file = process.argv[3]
var fs = require('fs')
var fileStream = fs.createReadStream(file)
fileStream.on('error', function(err) {
console.log('File Error', err)
})
uploadParams.Body = fileStream
var path = require('path')
uploadParams.Key = path.basename(file)
s3.upload (uploadParams, function (err, data) {
if (err) {
console.log("Error", err)
} if (data) {
console.log("Upload Success", data.Location)
}
})
But anyway I get the error
Unsupported body payload object
I searched the entire Internet and did not find an answer, what could be the problem ((does anyone have any ideas why it does not work ??

Related

Can't upload files from Lambda to S3

I tested on my localhost, then checked on s3 and saw that there was a new file created.
But when testing on Lambda, although there is no error, there is no file on S3. The log of s3.upload(params).promise() is also not displayed.
var fs = require('fs');
var AWS = require('aws-sdk');
exports.handler = async (event, context, callback) => {
context.callbackWaitsForEmptyEventLoop = false
try {
AWS.config.update({
accessKeyId: accessKeyId,
secretAccessKey: secretAccessKey
});
var s3 = new AWS.S3();
var path = 'myfile.txt';
var file_buffer = fs.readFileSync(path);
console.log(file_buffer);
var params = {
Bucket: 'bucket-dev',
Key: '2222.txt',
Body: file_buffer
};
console.log("1111");
s3.upload(params).promise()
.then(function(data) {
console.log("Successfully uploaded to");
callback(null, "All Good");
})
.catch(function(err) {
console.error(err, err.stack);
callback(err);
});
console.log("2222");
return context.logStreamName
} catch (err) {
console.log(err);
callback(err);
}
}
Thanks
Try not to mix and match async and callback. Something like this might be closer to what you want...
var fs = require("fs");
var AWS = require("aws-sdk");
exports.handler = async (event, context) => {
AWS.config.update({
accessKeyId,
secretAccessKey,
});
const s3 = new AWS.S3();
const path = "myfile.txt";
const file_buffer = fs.readFileSync(path);
const params = {
Bucket: "bucket-dev",
Key: "2222.txt",
Body: file_buffer,
};
console.log("1111");
const res = await s3.upload(params).promise();
console.log("Successfully uploaded", res);
return "All good";
};

Upgrade aws-sdk to version 3 - streaming S3 upload

I am trying to upgrade a program to aws-sdk version 3, but I am getting the error:
NotImplemented: A header you provided implies functionality that is not implemented
The function get_s3_stream needs to return a stream to the main program.
SDK version 2, this works:
var AWS = require("aws-sdk");
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
const stream = require('stream');
function get_s3_stream() {
var pass = new stream.PassThrough();
var params = {Bucket: "bucketname", Key: "testfile1.txt", Body: pass};
s3.upload(params, function(err, data) {
console.log(err, data);
});
return pass;
}
const inputStream = stream.Readable.from(["input string1"])
const outStream = get_s3_stream()
inputStream.pipe(outStream);
SDK version 3, doesn't work:
const { S3Client, PutObjectCommand } = require("#aws-sdk/client-s3");
const s3Client = new S3Client({ region: "us-east-1" });
const stream = require('stream')
function get_s3_stream() {
const pass = new stream.PassThrough();
var params = {Bucket: "bucketname", Key: "testfile2.txt", Body: pass};
s3Client.send(new PutObjectCommand(params, function(err, data) {
console.log(err, data);
}));
return pass;
};
const inputStream = stream.Readable.from(["input string2"])
const outStream = get_s3_stream()
inputStream.pipe(outStream);
How can a stream be sent to S3 with the new version?
Apparently this is a know issue: https://github.com/aws/aws-sdk-js-v3/issues/1920.
A workaround in order to use passthrough streams would be to use Upload from #aws-sdk/lib-storage:
const { S3Client } = require("#aws-sdk/client-s3");
const { Upload } = require('#aws-sdk/lib-storage');
const stream = require('stream')
const s3Client = new S3Client({ region: "us-east-1" });
function get_s3_stream() {
const pass = new stream.PassThrough();
const upload = new Upload({
client: s3Client,
params: {
Bucket: 'bucketname',
Key: 'testfile2.txt',
Body: pass,
ContentType: 'text/plain',
},
});
upload.done().then((res, error) => {
console.log(res);
});
return pass;
}

An issue with reading a gzipped file (.gz) with IBM Cloud Function (Action: Node.js 12)

I can read the data.json.gz file on my local machine with the code mentioned below (node --version: v14.15.0). But when I try to use the same in IBM Cloud with an Action (Node.js 12) to read the same file from an Object Store Bucket, I get the below error
["stderr: ERROR: undefined - input_buf.on is not a function"].
I am very new to NodeJS; Can someone help to identify the issue here?
I do appreciate your support.
Code that works on Local machine (Windows 10):
function decompressFile(filename) {
var fs = require("fs"),
zlib = require("zlib"),
var input = fs.createReadStream(filename);
var data = [];
input.on('data', function(chunk){
data.push(chunk);
}).on('end', function(){
var buf = Buffer.concat(data);
zlib.gunzip(buf, function(err, buffer) {
if (!err) {
var dataString = buffer.toString()
console.log(dataString, dataString+'\n');
var dataJSON = JSON.parse(dataString.toString('utf8'));
}else{
console.log(err);
}
});
});
}
decompressFile("data.json.gz");
Code that does not work on IBM Cloud Function and Object Store Bucket:
// Get file contents of gzipped item
async function getGzippedItem(cosClient, bucketName, itemName) { // <<< async keyword added
const fs = require('fs');
const zlib = require('zlib');
return await cosClient.getObject({ // <<< turned into assignment with await
Bucket: bucketName,
Key: itemName
}).promise()
.then((instream=fs.createReadStream(itemName)) => {
if (instream != null) {
var data = [];
var input_buf = instream.Body
input_buf.on('data', function(chunk){
data.push(chunk);
}).on('end', function() {
var buf = Buffer.concat(data);
zlib.gunzip(buf, function (err, buffer) {
if (!err) {
var dataString = buffer.toString()
var dataJSON = JSON.parse(dataString.toString('utf8'));
} else {
console.log(err);
}
});
});
return buf
}
})
.catch((e) => {
console.error(`ERROR: ${e.code} - ${e.message}\n`);
});
};
async function main(params) {
bucketName = 'bucket'
itemName = 'data.json.gz'
var ibm = require('ibm-cos-sdk');
var util = require('util');
var fs = require('fs');
// Initializing configuration
const myCOS = require('ibm-cos-sdk');
var config = {
endpoint: 'endpoint',
apiKeyId: 'apiKeyId',
ibmAuthEndpoint: 'ibmAuthEndpoint',
serviceInstanceId: 'serviceInstanceId',
};
var cosClient = new myCOS.S3(config);
gzippedItemContent = await getGzippedItem(cosClient, bucketName, itemName) // <<< await keyword added
console.log(">>>>>>>>>>>>>>>: ", typeof gzippedItemContent, gzippedItemContent )
}
The message is telling you, that your input_buf object is not of the type you expect it to be. The result of your createReadStream() call is just a stream:
[Stream] the readable stream object that can be piped or read from (by registering 'data' event listeners).
So you should be able to access the value directly
(not declaring var input_buf = instream.Body):
var getObjectStream = cosClient.getObject({
Bucket: 'BUCKET',
Key: 'KEY'
}).createReadStream();
getObjectStream.on('data', function(c) {
data += c.toString();
});
Have a look at the test section of the ibm-cos-sdk-js project, it is describing how to use the API.

Amazon Polly Character Limit Increase

According to a recent AWS announcement, the new character limit for Polly is 3,000 -- double the previous limit of 1,500.
Previously, my application had a character limit of 1,450 to account for characters I'm adding programatically to the user's input. In view of the above announcement, I thought increasing my character limit to 2,950 would be safe. However, my audio files are empty (0 kb) anytime I surpass about 2,450 characters. I'm baffled by this and of course I would like to use the extra 500 characters if possible.
Here is my code:
var AWS = require('aws-sdk'),
fs = require('fs');
const Fs = require('fs')
const path = require('path');
AWS.config.loadFromPath(path.join(__dirname, 'config.json'));
var mysql = require('mysql');
var localfile = path.join(__dirname, 'myverse.mp3');
var connection = mysql.createConnection({
connectionLimit : 10,
host : '...',
user : '...',
password : '...',
database: '...',
});
[some irrelevant code omitted here.]
connection.query('SELECT versetext, book, mp3, id, reference, userid FROM publicverses where mp3 = "empty" limit 1',
function (error, results, fields) {
console.log(error);
var scripture = results[0].versetext + ".";
var userid = results[0].userid;
var book = results[0].book;
var reference = results[0].reference.replace(":", " verse ").replace(",", " and ");
if (reference.includes("-")){
var reference = reference.replace("verse", "verses");
}
console.log(scripture + " " + book.replace("1", "first").replace("2", "second").replace("3", "third") + " " + reference);
var myverse = "<speak><prosody volume='x-loud'><break time='1s'/>" + scripture + " " + book.replace("1", "first").replace("2", "second").replace("3", "third") + " " + reference + "<break time='1s'/></prosody></speak>";
var link = "https://s3.amazonaws.com/publicverses/" + book.replace(/ /g, "")+reference.replace(/ /g, "")+"_user"+userid+"_"+randomnumber+".mp3";
writeit();
fs.createWriteStream(localfile);
var myvalue = fs.createReadStream(localfile);
setTimeout(uploadit, 2000)
function linkit(){
'use strict';
connection.query('update publicverses set mp3 = ? where mp3 = "empty" limit 1', [link],
function (error, results, fields) {
console.log(error)
})
}
function writeit() {
'use strict';
const Polly = new AWS.Polly({
signatureVersion: 'v4',
region: 'us-east-1'
})
let params = {
'Text': myverse.replace(" Job ", " Jobe "),
'LexiconNames': [ 'kjv' ],
'TextType': 'ssml',
'OutputFormat': 'mp3',
'VoiceId': 'Matthew'
}
Polly.synthesizeSpeech(params, (err, data) => {
if (err) {
console.log(err.code)
} else if (data) {
if (data.AudioStream instanceof Buffer) {
Fs.writeFile("./myverse.mp3", data.AudioStream, function(err) {
if (err) {
return console.log(err)
}
console.log("Verse recorded successfully!")
})
}
}
})
}
function uploadit () {
'use strict';
console.log('Preparing to upload the verse.')
var s3 = new AWS.S3({apiVersion: '2006-03-01'});
var uploadParams = {Bucket: 'publicverses', key: '/test.mp3', Body: myvalue, ACL: 'public-read'};
var file = 'MyVerse.mp3';
var fs = require('fs');
var fileStream = fs.createReadStream(file);
fileStream.on('error', function(err) {
console.log('File Error', err);
});
uploadParams.Body = fileStream;
var path = require('path');
uploadParams.Key = book.replace(/ /g, "")+reference.replace(/ /g, "")+"_user"+userid+"_"+randomnumber+".mp3";
// call S3 to retrieve upload file to specified bucket
s3.upload (uploadParams, function (err, data) {
if (err) {
console.log("Error", err);
} if (data) {
console.log("Upload Success", data.Location);
linkit();
addanother();
}
});
}
});
}

untar/decompress to a stream in node

I am trying to write an AWS Lambda that will take a tar.gz from a S3 bucket, inflate it and then unpack it whilst streaming the files back to another S3 bucket.
I have this code:
var AWS = require('aws-sdk');
var fs = require('fs');
var zlib = require('zlib');
var uuid = require('uuid/v4');
var tar = require('tar-stream')
var pack = tar.pack()
var s3 = new AWS.S3();
exports.handler = (event, context, callback) => {
var bucket = event.Records[0].s3.bucket.name;
var key = event.Records[0].s3.object.key;
var file = 'S3://' + bucket + '/' + key;
console.log(bucket)
console.log(key)
var readParams = {
Bucket: bucket,
Key: key
};
var dataStream = s3.getObject(readParams).createReadStream();
var extract = tar.extract()
extract.on('entry', function(header, stream, next) {
console.log(header.name)
var writeParams = {
Bucket: process.env.JOB_PROCESSING_BUCKET,
Key: uuid() + '-' + header.name,
Body: stream
};
s3.upload(writeParams).
on('httpUploadProgress', function(evt) {
console.log('Progress:', evt.loaded, '/', evt.total);
}).
send(function(err, data) {
if (err) console.log("An error occurred", err);
console.log("Uploaded the file at", data.Location);
});
stream.on('end', function() {
next() // ready for next entry
})
stream.resume() // just auto drain the stream
})
extract.on('finish', function() {
// all entries read
})
dataStream.pipe(zlib.createGunzip()).pipe(extract);
callback(null, 'Gunzip Lambda Function');
};
It pulls the file, sorts the gzipping out and then i can see each file being extracted on entry. The code then tries to steam the file to S3 which creates a 0kb file hangs around like its reading the stream then continues onto the next.
Why cant it seem to read/processes the stream body?
Is there a better way of doing this?
Thanks
I don't know if it's the best solution but the following code works for me.
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const tar = require('tar-stream');
const zlib = require('zlib');
const stream = require('stream');
const uuid = require('uuid');
exports.get = (event, context) => {
var params = {
Bucket: event.Records[0].s3.bucket.name,
Key: event.Records[0].s3.object.key
};
var dataStream = s3.getObject(params).createReadStream();
var extract = tar.extract();
extract.on('entry', function(header, inputStream, next) {
inputStream.pipe(uploadFromStream(s3,header));
inputStream.on('end', function() {
next(); // ready for next entry
});
inputStream.resume(); // just auto drain the stream
});
extract.on('finish', function() {
// all entries read
});
dataStream.pipe(zlib.createGunzip()).pipe(extract);
}
function uploadFromStream(s3,header) {
var pass = new stream.PassThrough();
var writeParams = {
Bucket: process.env.JOB_PROCESSING_BUCKET,
Key: uuid.v1() + '-' + header.name,
Body: pass
};
s3.upload(writeParams, function(err, data) {
context.done(err, data);
});
return pass;
}
Tried for a couple of hours to get this to work, turns out the 'finish' event has been replaced with 'end'. So - answer above works great, just small change -
inputStream.on('end', function() {
next(); // ready for next entry
});
- Should be -
inputStream.on('finish', function() {
next(); // ready for next entry
});

Resources