node.js save http response as pdf file - node.js

I have AWS Lambda function that return pdf file like arr.
I want to call function and save pdf file, but after saving I can't open it, it brocken. I cant undestand why, i tried differente ways to create pdf, by the way i can get arr and using online converter convert arr to file, and its work, but when i create pdf file using node code its always broken. I tried internal node moduls like fs, and external like pdfkit.
`const https = require('https');
const fs = require('fs');
const PDFDocument = require('pdfkit');
const options = {
host: 'uek9w0hztc.execute-api.eu-north-1.amazonaws.com',
path: '/pdfmaker',
method: 'POST',
headers: {
url: 'https://www.linkedin.com'
}
}
const req = https.request(options, res => {
let data = [];
const headerDate = res.headers && res.headers.date ? res.headers.date : 'no response date';
console.log('Status Code:', res.statusCode);
console.log('Date in Response header:', headerDate);
res.on('data', chunk => {
data.push(chunk);
});
res.on('end', () => {
console.log('Response ended: ');
// fs.writeFileSync('index.pdf', Buffer.from(data));
// fs.writeFileSync("index_v2.pdf", Buffer.from(data), 'binary', (err) => {
// if(err) {
// return console.log(err);
// }
// console.log("The file was saved!");
// });
// const doc = new PDFDocument();
// doc.pipe(fs.createWriteStream('output.pdf'));
let writeStream = fs.createWriteStream('pdf123.pdf')
writeStream.once('open', (fd) =>{
writeStream.write(new Buffer.from(data, 'binary'))
writeStream.on('finish', () => {
console.log('wrote all data to file');
});
writeStream.end()
})
});
}).on('error', err => {
console.log('Error: ', err.message);
});
req.end();`
I tried internal node moduls like fs, and external like pdfkit.
I expect someone give me a hint where the problem is.

Related

Download file from third party server and upload to S3

I have a Lambda Node function which is called by a webhook from a thirdparty server. The TP server sends a file download URL and some other data.
The download URL is temporary, so I need to push the file to an S3 for long term storage.
The rudimentary function below, downloads the file and then tries to upload to the S3.
This works when the file is a plain text, but images/pdfs etcs are corrupted when they reach the S3.
const AWS = require("aws-sdk");
const https = require('https');
const path = require('path');
const s3 = new AWS.S3({apiVersion: '2006-03-01'});
exports.handler = async (event, context, callback) => {
var payload = event.body;
const url_host = payload.host;
const url_path = payload.path; //URL of file which needs to be downloaded
const get_params = {
host: url_host,
path: url_path,
port: 443,
method: 'GET',
headers: { }
};
var resp = await https_get_processor(get_params); //File downloaded here
var uploadParams = {
Bucket: "bucket_name",
Key: '',
Body: resp //Preparing to upload the received file
};
uploadParams.Key = path.basename(url_path); //Generating filename
s3.upload (uploadParams, function (err, data) {
if (err) {
console.log("Error", err);
} if (data) {
console.log("Upload Success", data.Location);
}
});
response = {...} //Generic Response
return response;
};
async function https_get_processor(get_params)
{
return await new Promise((resolve, reject) =>
{
var data = "";
const req = https.request(get_params, res => {
res.on('data', chunk => { data += chunk })
res.on('end', () =>
{
resolve(data);
})
});
req.on('error', (err) => {
reject(err);
});
req.end();
});
}
Response is a Buffer in such case, so try changing request processing by pushing each chunk into an array, and then merge Buffer chunks and pass them.
Try this:
var data = [];
const req = https.request(get_params, res => {
res.on('data', chunk => data.push(chunk))
res.on('end', () =>
{
resolve(Buffer.concat(data));
})

How to make express download link with GridFsBucket?

As the title says, how do you make a direct download link with a file from mongoDB(GridFsBucket) using express?
The file should be downloadable from memory, as i dont want to save it temporarily on the server.
I have this method:
async function downloadFileFromDB(fileId) {
var gridfsbucket = new mongoose.mongo.GridFSBucket(mongoose.connection.db, {
chunkSizeBytes: 1024,
bucketName: 'filesBucket'
});
try {
const stream = gridfsbucket.openDownloadStream(fileId)
const fileBuffer = Buffer.from(stream)
return fileBuffer
} catch (err) {
stream.on('error', () => {
console.log("Some error occurred in download:" + error);
})
console.log(err);
}
}
And this route:
router.get('/download-file', async (req,res) => {
const fileId = req.query.fileId
const ObjectFileId = new ObjectId(fileId)
const fileBuffer = await fileFacade.downloadFileFromDB(ObjectFileId)
res.download(fileBuffer)
})
But res.download wants a path and not a buffer. Aswell im not sure i can make a buffer directly from the openDownloadStream method.
Can anyone help?
I believe you need to write the data to your res object. I accomplished this like:
const readStream = gridfs.openDownloadStreamByName(filename);
readStream.on("data", (chunk) => {
res.write(chunk);
});
readStream.on("end", () => {
res.status(200).end();
mongoClient.close();
});
readStream.on("error", (err) => {
console.log(err);
res.status(500).send(err);
});
So, you may just have to do:
res.write(fileBuffer).end();
//// Instead of doing:
// res.download(fileBuffer);

cloud function reads file but it's not returning the content on the client side

I am using the code below to read a json file in google firebase storage and then return the content of the file. The code works but all I am getting on the client side is null
exports.updateProductCatalogue = functions.https.onCall(async (data, context) => {
const filepath = data.filepath
const bucketname = data.bucket
const remoteFile = bucket.file("storeid.json");
let buffer = '';
remoteFile.createReadStream()
.on('error', function(err) {console.log(err)})
.on('data', function(response) {
buffer += response
console.log(buffer)
})
.on('end', function() {
//console.log(buffer);
console.log("FINISHED!!")
})
return buffer
})
this is my client side js call
function getUpdatedCatalogue(){
var getStorageData = firebase.functions().httpsCallable('updateProductCatalogue');
var callData = {
"bucket":"test"
}
getStorageData(callData).then(function(result){
console.log(result)
}).catch(function(error){
console.log(error)
})
}
The cloud console.log is showing that the content is read and shown in log but client side console.log is returning null. Here is the file file i am reading.
Why am I not getting the file content returned and displayed on client side? how can I fix this?
The problem is that you're returning the buffer before the stream finishes reading the file.
Try this (not tested),
exports.updateProductCatalogue = functions.https.onCall(async (data, context) => {
const filepath = data.filepath;
const bucketname = data.bucket;
const remoteFile = bucket.file("storeid.json");
return new Promise(resolve, reject) => {
let buffer = '';
remoteFile.createReadStream()
.on('error', function(err) {
console.log(err);
reject(err);
})
.on('data', function(response) {
buffer += response;
console.log(buffer);
})
.on('end', function() {
console.log("FINISHED!!")
resolve(buffer);
});
});
});

NodeJS String to Buffer pdf file

Hi guys i'm trying to download a pdf file and save it on my disk. The API send me a string. But the following code not working.
axios.get('https://myapi.com/download', config).then((res) => {
var buff = Buffer.from(res.data, 'binary');
fs.writeFile('file.pdf', buff, function (err) {
if (err) throw err;
console.log('Saved!');
});
}).catch((e) => {
console.log(e);
})
I've tried it, and working ...
fs.readFile('./download.pdf','binary', function (err, data) {
var str = data.toString();
var buff = Buffer.from(str, 'binary');
fs.writeFile('novopdf.pdf',buff, () => {
console.log('ok');
})
});
You need to config axios get request as follows
const response = await Axios({
method: 'GET',
url: url,
responseType: 'stream'
})
response.data.pipe(Fs.createWriteStream(path)) // path is location where you want to write the file.
Then check for end event on the response object.

Downloading images with node.js [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
I'm trying to write a script to download images using node.js. This is what I have so far:
var maxLength = 10 // 10mb
var download = function(uri, callback) {
http.request(uri)
.on('response', function(res) {
if (res.headers['content-length'] > maxLength*1024*1024) {
callback(new Error('Image too large.'))
} else if (!~[200, 304].indexOf(res.statusCode)) {
callback(new Error('Received an invalid status code.'))
} else if (!res.headers['content-type'].match(/image/)) {
callback(new Error('Not an image.'))
} else {
var body = ''
res.setEncoding('binary')
res
.on('error', function(err) {
callback(err)
})
.on('data', function(chunk) {
body += chunk
})
.on('end', function() {
// What about Windows?!
var path = '/tmp/' + Math.random().toString().split('.').pop()
fs.writeFile(path, body, 'binary', function(err) {
callback(err, path)
})
})
}
})
.on('error', function(err) {
callback(err)
})
.end();
}
I, however, want to make this more robust:
Are there libraries that do this and do this better?
Is there a chance that response headers lie (about length, about content type)?
Are there any other status codes I should care about? Should I bother with redirects?
I think I read somewhere that binary encoding is going to be deprecated. What do I do then?
How can I get this to work on windows?
Any other ways you can make this script better?
Why: for a feature similar to imgur where users can give me a URL, I download that image, and rehost the image in multiple sizes.
I'd suggest using the request module. Downloading a file is as simple as the following code:
var fs = require('fs'),
request = require('request');
var download = function(uri, filename, callback){
request.head(uri, function(err, res, body){
console.log('content-type:', res.headers['content-type']);
console.log('content-length:', res.headers['content-length']);
request(uri).pipe(fs.createWriteStream(filename)).on('close', callback);
});
};
download('https://www.google.com/images/srpr/logo3w.png', 'google.png', function(){
console.log('done');
});
I ran into this problem some days ago, for a pure NodeJS answer I would suggest using Stream to merge the chunks together.
var http = require('http'),
Stream = require('stream').Transform,
fs = require('fs');
var url = 'http://www.google.com/images/srpr/logo11w.png';
http.request(url, function(response) {
var data = new Stream();
response.on('data', function(chunk) {
data.push(chunk);
});
response.on('end', function() {
fs.writeFileSync('image.png', data.read());
});
}).end();
The newest Node versions won't work well with binary strings, so merging chunks with strings is not a good idea when working with binary data.
*Just be careful when using 'data.read()', it will empty the stream for the next 'read()' operation. If you want to use it more than once, store it somewhere.
You can use Axios (a promise-based HTTP client for Node.js) to download images in the order of your choosing in an asynchronous environment:
npm i axios
Then, you can use the following basic example to begin downloading images:
const fs = require('fs');
const axios = require('axios');
/* ============================================================
Function: Download Image
============================================================ */
const download_image = (url, image_path) =>
axios({
url,
responseType: 'stream',
}).then(
response =>
new Promise((resolve, reject) => {
response.data
.pipe(fs.createWriteStream(image_path))
.on('finish', () => resolve())
.on('error', e => reject(e));
}),
);
/* ============================================================
Download Images in Order
============================================================ */
(async () => {
let example_image_1 = await download_image('https://example.com/test-1.png', 'example-1.png');
console.log(example_image_1.status); // true
console.log(example_image_1.error); // ''
let example_image_2 = await download_image('https://example.com/does-not-exist.png', 'example-2.png');
console.log(example_image_2.status); // false
console.log(example_image_2.error); // 'Error: Request failed with status code 404'
let example_image_3 = await download_image('https://example.com/test-3.png', 'example-3.png');
console.log(example_image_3.status); // true
console.log(example_image_3.error); // ''
})();
var fs = require('fs'),
http = require('http'),
https = require('https');
var Stream = require('stream').Transform;
var downloadImageToUrl = (url, filename, callback) => {
var client = http;
if (url.toString().indexOf("https") === 0){
client = https;
}
client.request(url, function(response) {
var data = new Stream();
response.on('data', function(chunk) {
data.push(chunk);
});
response.on('end', function() {
fs.writeFileSync(filename, data.read());
});
}).end();
};
downloadImageToUrl('https://www.google.com/images/srpr/logo11w.png', 'public/uploads/users/abc.jpg');
if you want progress download try this:
var fs = require('fs');
var request = require('request');
var progress = require('request-progress');
module.exports = function (uri, path, onProgress, onResponse, onError, onEnd) {
progress(request(uri))
.on('progress', onProgress)
.on('response', onResponse)
.on('error', onError)
.on('end', onEnd)
.pipe(fs.createWriteStream(path))
};
how to use:
var download = require('../lib/download');
download("https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png", "~/download/logo.png", function (state) {
console.log("progress", state);
}, function (response) {
console.log("status code", response.statusCode);
}, function (error) {
console.log("error", error);
}, function () {
console.log("done");
});
note: you should install both request & request-progress modules using:
npm install request request-progress --save
This is an extension to Cezary's answer. If you want to download it to a specific directory, use this. Also, use const instead of var. Its safe this way.
const fs = require('fs');
const request = require('request');
var download = function(uri, filename, callback){
request.head(uri, function(err, res, body){
request(uri).pipe(fs.createWriteStream(filename)).on('close', callback);
});
};
download('https://www.google.com/images/srpr/logo3w.png', './images/google.png', function(){
console.log('done');
});
Building on the above, if anyone needs to handle errors in the write/read streams, I used this version. Note the stream.read() in case of a write error, it's required so we can finish reading and trigger close on the read stream.
var download = function(uri, filename, callback){
request.head(uri, function(err, res, body){
if (err) callback(err, filename);
else {
var stream = request(uri);
stream.pipe(
fs.createWriteStream(filename)
.on('error', function(err){
callback(error, filename);
stream.read();
})
)
.on('close', function() {
callback(null, filename);
});
}
});
};

Resources