I want to download a zip file in MEAN stack - node.js

Here is frontend code (Angular):-
download(user) {
this.clientAPI.get(Endpoints.DOWNLOAD).toPromise()
.then(res => {
let blob = new Blob([new Uint8Array(res.file)], { type: 'application/zip' });
console.log('blob',blob);
let a = document.createElement('a');
a.href = (URL.createObjectURL(blob));
a.download = res.filename;
document.body.appendChild(a);
a.click();
a.remove();
// this.downloadComplete(user);
})
.catch(err => console.error("download error = ", err))
}
Here is my backend code (Node Js):-
exports.download = function (req, res) {
let file = process.env.NOCE_ENV === 'local' ? `${process.cwd()}/server/downloads/eFAST-Release-20.5.17.19.zip` :
`${process.cwd()}/server/downloads/eFAST-Release-20.5.17.19.zip`;
let filename = path.basename(file);
res.setHeader('Content-disposition', 'attachment; filename=' + filename);
res.setHeader('Content-type', "application/zip");
let filestream = fs.createReadStream(file);
// console.log('filestream',filenter code hereestream)
res.jsonp({ filename: filename, file: filestream });
};
I am able to download the file but that is not in zip format . that is in .txt format and zero byte.
Please have a look and let me know hpw can i do this ?

You can use res.download("link_to_file") in express to download it.
More info here.
i used res.download but how could i manage this in angular ?
I'm not sure about angular but I think you can use plain js here to navigate to new tab where download will start.
window.open('https://YOUR-LINK-TO-FILE/download','_blank')
By the way, you have typo: process.env.NOCE_ENV

You need to pipe the filestream (from fs.createReadStream()) into res (the response). If you read the nodejs docs about stream and http, one will notice that res is actually also a Writable stream that you can pipe into like:
// set status code
res.setStatus(200); // 200 means ok
// set headers beforehand
// octet-stream means binary
res.setHeader('content-type', 'application/octet-stream');
filestream.pipe(res);
Streams are what make NodeJS powerful and efficient. It's an event-based emitter that passes chunks of data at a threshold limit by a configurable highWaterMark property (I think 16KB per chunk). You normally pipe a Readable into a Writable stream (in your case a readable file stream into a writable http response stream). You can read more about them here: https://nodejs.org/api/stream.html

Related

How to save my cam stream in my server realtime node js?

how can I save my chunks of streams which converted into blobs in my node js server real-time
client.js | I am my cam stream as binary to my node js server
handleBlobs = async (blob) => {
let arrayBuffer = await new Response(blob).arrayBuffer()
let binary = new Uint8Array(arrayBuffer)
this.postBlob(binary)
};
postBlob = blob => {
axios.post('/api',{blob})
.then(res => {
console.log(res)
})
};
server.js
app.post('/api', (req, res) => {
console.log(req.body)
});
how can I store the incoming blobs or binary into one video file at the end of video recording completion.
This appears to be a duplicate of How to concat chunks of incoming binary into video (webm) file node js?, but it doesn't currently have an accepted answer. I'm copying my answer from that post into this one as well:
I was able to get this working by converting to base64 encoding on the front-end with the FileReader api. On the backend, create a new Buffer from the data chunk sent and write it to a file stream. Some key things with my code sample:
I'm using fetch because I didn't want to pull in axios.
When using fetch, you have to make sure you use bodyParser on the backend
I'm not sure how much data you're collecting in your chunks (i.e. the duration value passed to the start method on the MediaRecorder object), but you'll want to make sure your backend can handle the size of the data chunk coming in. I set mine really high to 50MB, but this may not be necessary.
I never close the write stream explicitly... you could potentially do this in your /final route. Otherwise, createWriteStream defaults to AutoClose, so the node process will do it automatically.
Full working example below:
Front End:
const mediaSource = new MediaSource();
mediaSource.addEventListener('sourceopen', handleSourceOpen, false);
let mediaRecorder;
let sourceBuffer;
function customRecordStream(stream) {
// should actually check to see if the given mimeType is supported on the browser here.
let options = { mimeType: 'video/webm;codecs=vp9' };
recorder = new MediaRecorder(window.stream, options);
recorder.ondataavailable = postBlob
recorder.start(INT_REC)
};
function postBlob(event){
if (event.data && event.data.size > 0) {
sendBlobAsBase64(event.data);
}
}
function handleSourceOpen(event) {
sourceBuffer = mediaSource.addSourceBuffer('video/webm; codecs="vp8"');
}
function sendBlobAsBase64(blob) {
const reader = new FileReader();
reader.addEventListener('load', () => {
const dataUrl = reader.result;
const base64EncodedData = dataUrl.split(',')[1];
console.log(base64EncodedData)
sendDataToBackend(base64EncodedData);
});
reader.readAsDataURL(blob);
};
function sendDataToBackend(base64EncodedData) {
const body = JSON.stringify({
data: base64EncodedData
});
fetch('/api', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body
}).then(res => {
return res.json()
}).then(json => console.log(json));
};
Back End:
const fs = require('fs');
const path = require('path');
const express = require('express');
const bodyParser = require('body-parser');
const app = express();
const server = require('http').createServer(app);
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json({ limit: "50MB", type:'application/json'}));
app.post('/api', (req, res) => {
try {
const { data } = req.body;
const dataBuffer = new Buffer(data, 'base64');
const fileStream = fs.createWriteStream('finalvideo.webm', {flags: 'a'});
fileStream.write(dataBuffer);
console.log(dataBuffer);
return res.json({gotit: true});
} catch (error) {
console.log(error);
return res.json({gotit: false});
}
});
Without attempting to implement this (Sorry no time right now), I would suggest the following:
Read into Node's Stream API, the express request object is an http.IncomingMessage, which is a Readable Stream. This can be piped in another stream based API. https://nodejs.org/api/stream.html#stream_api_for_stream_consumers
Read into Node's Filesystem API, it contains functions such as fs.createWriteStream that can handle the stream of chunks and append into a file, with a path of your choice. https://nodejs.org/api/fs.html#fs_class_fs_writestream
After completing the stream to file, as long as the filename has the correct extension, the file should be playable because the Buffer sent across the browser is just a binary stream. Further reading into Node's Buffer API will be worth your time.
https://nodejs.org/api/buffer.html#buffer_buffer

Stream node requests to the cloud with file metadata

Im using koa in order to build a web app, and I want to allow users to upload files to it. The files need to be streamed to the cloud, but I would like to avoid saving the file locally.
The problem is that I need some file metadata before I pipe the upload stream to the writeable stream. I want to have the mime-type and optionally attach other data like the original file name etc.
I tried sending the binary data with the request's "content-type" header set to the file's type, but I would like the request to have the content type application/octet-stream so I can know in the back-end how to handle the request.
I read somewhere that the better option would be to use multipart/form-data but I'm not sure how to structure the request, and how to parse the metadata in order to notify the cloud before I pipe to its write stream.
Here is the code im currently using. Basically, it just pipes the request as is, and I use the request header to know the type of the file:
module.exports = async ctx => {
// Generate a random id that will be part of the filename.
const id = pushid();
// Get the content type from the header.
const contentType = ctx.header['content-type'];
// Get the extension for the file from the content type
const ext = contentType.split('/').pop();
// This is the configuration for the upload stream to the cloud.
const uploadConfig = {
// I must specify a content type, or know the file extension.
contentType
// there is some other stuff here but its not relevant.
};
// Create a upload stream for the cloud storage.
const uploadStream = bucket
.file(`assets/${id}/original.${ext}`)
.createWriteStream(uploadConfig);
// Here is what took me hours to get to work... dev life is hard
ctx.req.pipe(uploadStream);
// return a promise so Koa doesn't shut down the request before its finished uploading.
return new Promise((resolve, reject) =>
uploadStream.on('finish', resolve).on('error', reject)
);
};
Please assume I don't know much about the uploading protocols and managing streams.
Ok so after a lot of searching I found out that there is a parser that works with streams called busboy. It is pretty easy to use, but before jumping into the code I highly suggest everyone dealing with multipart/form-data requests to read this article.
Here is how I solved it:
const Busboy = require('busboy');
module.exports = async ctx => {
// Init busboy with the headers of the "raw" request.
const busboy = new Busboy({ headers: ctx.req.headers });
busboy.on('file', (fieldname, stream, filename, encoding, contentType) => {
const id = pushid();
const ext = path.extname(filename);
const uploadStream = bucket
.file(`assets/${id}/original${ext}`)
.createWriteStream({
contentType,
resumable: false,
metadata: {
cacheControl: 'public, max-age=3600'
}
});
stream.pipe(uploadStream);
});
// Pipe the request to busboy.
ctx.req.pipe(busboy);
// return a promise that resolves to whatever you want
ctx.body = await new Promise(resolve => {
busboy.on('finish', () => {
resolve('done');
});
});
};

Parse Remote CSV File using Nodejs / Papa Parse?

I am currently working on parsing a remote csv product feed from a Node app and would like to use Papa Parse to do that (as I have had success with it in the browser in the past).
Papa Parse Github: https://github.com/mholt/PapaParse
My initial attempts and web searching haven't turned up exactly how this would be done. The Papa readme says that Papa Parse is now compatible with Node and as such Baby Parse (which used to serve some of the Node parsing functionality) has been depreciated.
Here's a link to the Node section of the docs for anyone stumbling on this issue in the future: https://github.com/mholt/PapaParse#papa-parse-for-node
From that doc paragraph it looks like Papa Parse in Node can parse a readable stream instead of a File. My question is;
Is there any way to utilize Readable Streams functionality to use Papa to download / parse a remote CSV in Node some what similar to how Papa in the browser uses XMLHttpRequest to accomplish that same goal?
For Future Visibility
For those searching on the topic (and to avoid repeating a similar question) attempting to utilize the remote file parsing functionality described here: http://papaparse.com/docs#remote-files will result in the following error in your console:
"Unhandled rejection ReferenceError: XMLHttpRequest is not defined"
I have opened an issue on the official repository and will update this Question as I learn more about the problems that need to be solved.
After lots of tinkering I finally got a working example of this using asynchronous streams and with no additional libraries (except fs/request). It works for remote and local files.
I needed to create a data stream, as well as a PapaParse stream (using papa.NODE_STREAM_INPUT as the first argument to papa.parse()), then pipe the data into the PapaParse stream. Event listeners need to be implemented for the data and finish events on the PapaParse stream. You can then use the parsed data inside your handler for the finish event.
See the example below:
const papa = require("papaparse");
const request = require("request");
const options = {/* options */};
const dataStream = request.get("https://example.com/myfile.csv");
const parseStream = papa.parse(papa.NODE_STREAM_INPUT, options);
dataStream.pipe(parseStream);
let data = [];
parseStream.on("data", chunk => {
data.push(chunk);
});
parseStream.on("finish", () => {
console.log(data);
console.log(data.length);
});
The data event for the parseStream happens to run once for each row in the CSV (though I'm not sure this behaviour is guaranteed). Hope this helps someone!
To use a local file instead of a remote file, you can do the same thing except the dataStream would be created using fs:
const dataStream = fs.createReadStream("./myfile.csv");
(You may want to use path.join and __dirname to specify a path relative to where the file is located rather than relative to where it was run)
OK, so I think I have an answer to this. But I guess only time will tell. Note that my file is .txt with tab delimiters.
var fs = require('fs');
var Papa = require('papaparse');
var file = './rawData/myfile.txt';
// When the file is a local file when need to convert to a file Obj.
// This step may not be necissary when uploading via UI
var content = fs.readFileSync(file, "utf8");
var rows;
Papa.parse(content, {
header: false,
delimiter: "\t",
complete: function(results) {
//console.log("Finished:", results.data);
rows = results.data;
}
});
Actually you could use a lightweight stream transformation library called scramjet - parsing CSV straight from http stream is one of my main examples. It also uses PapaParse to parse CSVs.
All you wrote above, with any transforms in between, can be done in just couple lines:
const {StringStream} = require("scramjet");
const request = require("request");
request.get("https://srv.example.com/main.csv") // fetch csv
.pipe(new StringStream()) // pass to stream
.CSVParse() // parse into objects
.consume(object => console.log("Row:", object)) // do whatever you like with the objects
.then(() => console.log("all done"))
In your own example you're saving the file to disk, which is not necessary even with PapaParse.
I am adding this answer (and will update it as I progress) in case anyone else is still looking into this.
It seems like previous users have ended up downloading the file first and then processing it. This SHOULD NOT be necessary since Papa Parse should be able to process a read stream and it should be possible to pipe 'http' GET to that stream.
Here is one instance of someone discussing what I am trying to do and falling back to downloading the file and then parsing it: https://forums.meteor.com/t/processing-large-csvs-in-meteor-js-with-papaparse/32705/4
Note: in the above Baby Parse is discussed, now that Papa Parse works with Node Baby Parse has been depreciated.
Download File Workaround
While downloading and then Parsing with Papa Parse is not an answer to my question, it is the only workaround I have as of now and someone else may want to use this methodology.
My code to download and then parse currently looks something like this:
// Papa Parse for parsing CSV Files
var Papa = require('papaparse');
// HTTP and FS to enable Papa parse to download remote CSVs via node streams.
var http = require('http');
var fs = require('fs');
var destinationFile = "yourdestination.csv";
var download = function(url, dest, cb) {
var file = fs.createWriteStream(dest);
var request = http.get(url, function(response) {
response.pipe(file);
file.on('finish', function() {
file.close(cb); // close() is async, call cb after close completes.
});
}).on('error', function(err) { // Handle errors
fs.unlink(dest); // Delete the file async. (But we don't check the result)
if (cb) cb(err.message);
});
};
download(feedURL, destinationFile, parseMe);
var parseMe = Papa.parse(destinationFile, {
header: true,
dynamicTyping: true,
step: function(row) {
console.log("Row:", row.data);
},
complete: function() {
console.log("All done!");
}
});
Http(s) actually has a readable stream as parameter in the callback, so here is a simple solution
try {
var streamHttp = await new Promise((resolve, reject) =>
https.get("https://example.com/yourcsv.csv", (res) => {
resolve(res);
})
);
} catch (e) {
console.log(e);
}
Papa.parse(streamHttp, config);
const Papa = require("papaparse");
const { StringStream } = require("scramjet");
const request = require("request");
const req = request
.get("https://example.com/yourcsv.csv")
.pipe(new StringStream());
Papa.parse(req, {
header: true,
complete: (result) => {
console.log(result);
},
});
David Liao's solution worked for me, I did tweak it a little bit since I am using local file. He did not include the example how to solve the file access in node if you did get Error: ENOENT: no such file or directory message in your console.
To test your actual working directory and to understand where you must point your path to console log the following, this gave me better understanding of the file location: console.log(process.cwd()).
const fs = require('fs');
const papa = require('papaparse');
const request = require('request');
const path = require('path');
const options = {
/* options */
};
const fileName = path.resolve(__dirname, 'ADD YOUR ABSOLUTE FILE LOCATION HERE');
const dataStream = fs.createReadStream(fileName);
const parseStream = papa.parse(papa.NODE_STREAM_INPUT, options);
dataStream.pipe(parseStream);
let data = [];
parseStream.on('data', chunk => {
data.push(chunk);
});
parseStream.on('finish', () => {
console.log(data);
console.log(data.length);
});

Efficient way to read file in NodeJS

I am receiving an image file sent from an Ajax request:
var data = canvas.toDataURL('image/jpg', 1.0);
$.post({
url: "/upload-image",
data: {
file: data
}
}).done(function(response) {
....
})
}
And on the server side, I want to transmit the image file to an API
function getOptions(buffer) {
return {
url: '.../face_detection',
headers: headers,
method: 'POST',
formData: {
filename: buffer
}
}
}
router.post('/upload-image', function(req, res, next) {
console.log('LOG 0' + Date.now());
var data_url = req.body.file;
var matches = data_url.match(/^data:.+\/(.+);base64,(.*)$/);
var ext = matches[1];
var base64_data = matches[2];
var buffer = new Buffer(base64_data, 'base64');
console.log('LOG 1' + Date.now());
request(getOptions(buffer), function(error, response, body) {
res.json(body);
console.log(Date.now());
});
});
The problem that I have is that the lines between LOG 0 and LOG 1 are very slow, a few seconds. But the image is only 650kb. Is there a way to accelerate this?
Using another method to read the header, avoid the buffer, change the uploading process. I don't know but I'd like to be faster.
Thank you very much.
I would suggest using a library to handle some of this logic. If you would prefer to keep a lean dependency list, you can take a look at the source of some of these modules and base your own solution off of them.
For converting a data URI to a buffer: data-uri-to-buffer
For figuring out a file type: file-type
I would especially recommend the file-type solution. A safer (can't say safest) way to ensure what kind of file a Buffer is is to inspect aspects of the file. file-type seems to at least take a look at the Magic Number of the file to check type. Not foolproof, but if you are accepting files from users, you have to accept the risks involved.
Also have a look at Security Stack Exchange questions for good practices. Although the following say PHP, all server software runs the risk of being vulnerable to user input:
Hacker used picture upload to get PHP code into my site
Can simply decompressing a JPEG image trigger an exploit?
Risks of a PHP image upload form
"use strict";
const dataUriToBuffer = require('data-uri-to-buffer'),
fileType = require("file-type"),
express = require("express"),
router = express.Router(),
util = require("util"),
fs = require("fs"),
path = require("path");
const writeFileAsync = util.promisify(fs.writeFile);
// Keep track of file types you support
const supportedTypes = [
"png",
"jpg",
"gif"
];
// Handle POSTs to upload-image
router.post("/upload-image", function (req, res, next) {
// Did they send us a file?
if (!req.body.file) {
// Unprocessable entity error
return res.sendStatus(422);
}
// Get the file to a buffer
const buff = dataUriToBuffer(req.body.file);
// Get the file type
const bufferMime = fileType(buff); // {ext: 'png', mime: 'image/png'}
// Is it a supported file type?
if (!supportedTypes.contains(bufferMime.ext)) {
// Unsupported media type
return res.sendStatus(415);
}
// Save or do whatever with the file
writeFileAsync(path.join("imageDir", `userimage.${bufferMime.ext}`), buff)
// Tell the user that it's all done
.then(() => res.sendStatus(200))
// Log the error and tell the user the save failed
.catch((err) => {
console.error(err);
res.sendStatus(500);
});
});

Node Express.js - Download file from memory - 'filename must be a string'

I'm trying to package data in memory into a text file and send it to the user, triggering a file download.
I have the following code:
app.get('/download', function(request, response){
fileType = request.query.fileType;
fileName = ( request.query.fileName + '.' + fileType ).toString();
fileData = request.query.fileData;
response.set('Content-disposition', 'attachment; filename=' + fileName );
response.set('Content-type', 'text/plain');
var fileContents = new Buffer(fileData, "base64");
response.status(200).download( fileContents );
});
It keeps throwing an error saying that Content-disposition's filename parameter must be a string. fileName is most certainly a string, so I'm not sure what is going on.
Update:
Thanks to #jfriend00's advice, it is better and more efficient to directly send Buffer to client as file, instead of saving it first in server disk.
To implement, stream.PassThrough() and pipe() can be used, here is an example:
var stream = require('stream');
//...
app.get('/download', function(request, response){
//...
var fileContents = Buffer.from(fileData, "base64");
var readStream = new stream.PassThrough();
readStream.end(fileContents);
response.set('Content-disposition', 'attachment; filename=' + fileName);
response.set('Content-Type', 'text/plain');
readStream.pipe(response);
});
According to Express document, res.download() API is:
res.download(path [, filename] [, fn])
Transfers the file at path as an “attachment”. Typically, browsers will prompt the user for download. By default, the Content-Disposition header “filename=” parameter is path (this typically appears in the browser dialog). Override this default with the filename parameter.
Please note the first parameter of res.download() is a "path", which indicates the path of file in server that would be downloaded. In your code, the first parameter is a Buffer, that's why Node.js complain that "filename parameter must be a string" -- By default, the Content-Disposition header “filename=” parameter is path.
To make your code work using res.download(), you need to save the fileData in server as a file, and then invoke res.download() with that file's path:
var fs = require('fs');
//...
app.get('/download', function(request, response){
//...
var fileContents = Buffer.from(fileData, "base64");
var savedFilePath = '/temp/' + fileName; // in some convenient temporary file folder
fs.writeFile(savedFilePath, fileContents, function() {
response.status(200).download(savedFilePath, fileName);
});
});
Also, please note new Buffer(string[, encoding]) is deprecated now. It is better to use Buffer.from(string[, encoding]).
app.get('/download', (request, response) => {
const fileData = 'SGVsbG8sIFdvcmxkIQ=='
const fileName = 'hello_world.txt'
const fileType = 'text/plain'
response.writeHead(200, {
'Content-Disposition': `attachment; filename="${fileName}"`,
'Content-Type': fileType,
})
const download = Buffer.from(fileData, 'base64')
response.end(download)
})
I found this question via google. My problem was, that I wanted to send a generated index.html file directly from the memory, instead of writing it to the file system first.
This is how it works:
app.get('/', function (req, res) {
//set the appropriate HTTP header
res.setHeader('Content-Type', 'text/html');
//send it to the client
res.send('<h1>This is the response</h1>');
});
Source: https://stackoverflow.com/a/55032538

Resources