Node JS Request + Express Pipe - node.js

I have a problem with streaming video files from a server to another.
I wrote this script
var request = require("request"),
express = require("express");
var app = express();
app.get("/cast", function(req, res) {
var url = req.query.video;
res.writeHead(200, {
'Content-Type': 'video/mp4'
});
request({
url: url,
headers: {
Referer: "http://example.com/1706398/" + url
}
})
.on('response', function(response) {
response.on('data', function(data) {
console.log("data chunk received: " + data.length)
});
response.on('end', function(data) {
console.log('Video completed');
});
})
.pipe(res);
});
app.listen(8080);
But video response sometimes works sometimes is corrupted, instead if request's data is written in a writeable buffer and saved as video file it works with any url.
I cannot found any error or problem in my code, here some urls :
Here some url that I tryed:
https://gist.github.com/FrancisCan/f2bb86f8ff73b45fa192
Thanks :)

Remove the writeHead 200, when you are streaming, you should return http 206 results back (partial content), and not http200. I have the same scenario as you (streaming a video file from a blob container in the cloud to an angular application), there is no need for the http200 response.
Update: adding some code on how I do it:
AzureStorageHelper.prototype.streamBlob = function streamBlob(req, res, blob, params) {
if(!params){
params.container = container;
}
blob_service.getBlobProperties(params.container, blob, function (error, result, response) {
if(!result) return res.status(404).end();
if(error) return res.status(500).end();
var blobLength = result.contentLength;
var range = req.headers.range ? req.headers.range : "bytes=0-";
var positions = range.replace(/bytes=/, "").split("-");
var start = parseInt(positions[0], 10);
var end = positions[1] ? parseInt(positions[1], 10) : blobLength - 1;
var chunksize = (end - start) + 1;
var options = {
rangeStart: start,
rangeEnd: end,
}
//this is what's interesting for your scenario. I used to set this up myself but it's is now handled by the Azure Storage NodejsSDK
/*res.writeHead(206, {
'Accept-Ranges': 'bytes',
'Content-Range': "bytes " + start + "-" + end + "/" + blobLength,
'Content-Type': result.contentType,
'Content-Length': chunksize,
'Content-MD5': result.contentMD5,
});*/
var options = {
rangeStart: start,
rangeEnd: end,
}
//this is an API from the Azure Storage nodejsSDK I use. You might want to check the source of this method in github to see how this lib deals with the http206 responses and the piping
blob_service.getBlobToStream(params.container, blob, res, options, function (error, result, response) {
if (error) {
return res.status(500).end();
}
});
});

Related

NodeJS - prompt download CSV file

I'm trying to get - after some promises have been executed - a CSV result back together with a status reponse having details.
The response does get me the data for the CSV but I cannot seem to get the browser to download this data into a CSV file.
router.post('/getSalesOrders', function (request, response) {
var data = request.body;
salesOrderActions.retrieveSalesOrders(data) //
.then(function (result) {
response.setHeader('Content-disposition', 'attachment; filename=testing.csv');
response.set('Content-Type', 'text/csv');
response.json(result[0].message).send(result[0].file);
})
.catch(function (err) {
console.log(err);
if (err.statusCode) {
response.json(err);
}
else {
var error = output.getCriticalErrorResult(c.titles.SERVICE_CRITICAL_ERROR, c.messages.UNKNOWN_ERROR, err.message);
response.json(error);
}
});
});
My result object gets created in the salesOrderActions:
I am here using npm package json2csv
var fields = ['id',.....];
var csv = csvParser({ data: unmatchedLines, fields: fields });
return {
file: csv,
message:
output.getSuccessResult(
titles.SALES_ORDER_SUCCESS_RETRIEVAL,
salesDataForModel.identifier
)
}
My response to the browser is as follows:
So my message isn't sent it seems and I do get the CSV data but not as a file to download.
How can I manage that?
As a sidenote maybe, my front-end is React
EDIT
Response with octed headers:
Try:
sending Content-Type before Content-Disposition
quoting the filename: filename="testing.csv"
Also HTTP headers are case insensitive, so it should not make a difference, but you should write Content-Disposition (capital D).
response.set('Content-Type', 'text/csv');
response.setHeader('Content-Disposition', 'attachment; filename="testing.csv"');
If this does not work you can change the Content-Type to application/octet-stream
This always forces the browser to download the data sent from the server.
Try this code:
router.post('/getSalesOrders', function (request, response) {
var data = request.body;
var fs = require('fs');
salesOrderActions.retrieveSalesOrders(data) //
.then(function (result) {
//**********
var file = "testing.csv";
response.setHeader('Content-disposition', 'attachment; filename=testing.csv');
response.set('Content-Type', 'text/csv');
var filestream = fs.createReadStream(file);
filestream.pipe(res);
//*********
})
.catch(function (err) {
console.log(err);
if (err.statusCode) {
response.json(err);
}
else {
var error = output.getCriticalErrorResult(c.titles.SERVICE_CRITICAL_ERROR, c.messages.UNKNOWN_ERROR, err.message);
response.json(error);
}
});
});
So actually it turns out it is because I'm doing an Ajax request which doesn't - by default - prompt the browser to download any files.
What I did in the end:
router.post('/getSalesOrders', function (request, response) {
var data = request.body;
salesOrderActions.retrieveSalesOrders(data)
.then(function (result) {
response.json(result);
})
.catch(function (err) {
//...
});
});
And then in my front-end, when receiving the result:
salesOrderService.retrieveSalesOrderData()
.then(function (result) {
self.convertAndDownloadCsv(result.unmatchedLines);
});
convertAndDownloadCsv: function (data) {
if (data && data.length > 0) {
var csvData = csvProcessor({ //using csv2json node js package
data: data,
quotes: '',
del: ';'
});
var filename = "unmatchedLinesFromSalesOrders.csv";
var blob = new Blob([csvData], { type: 'text/csv;charset=utf-8;' });
if (navigator.msSaveBlob) { // IE 10+
navigator.msSaveBlob(blob, filename);
} else {
var link = document.createElement("a");
if (link.download !== undefined) { // feature detection
// Browsers that support HTML5 download attribute
var url = URL.createObjectURL(blob);
link.setAttribute("href", url);
link.setAttribute("download", filename);
link.style.visibility = 'hidden';
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
}
}
}
More info can be found here

How to handle several parallel request in NodeJs

I'm using PhantomJS for taking screenshot inside NodeJS but it can't handle multiple request from users. The problem is when several users send request concurrently they get same result.
This is the code I'm using:
var http = require('http');
var phantom = require('phantom');
var url, img;
http.createServer(function(req, res) {
res.setHeader('Content-Type', 'text/html; charset=utf-8');
res.writeHeader(200, { "Content-Type": "text/html" });
url = req.url;
url = url.replace('/', '');
url = url.trim();
if (!(url == 'favicon.ico')) {
console.log(url);
phantom.create().then(function(ph) {
ph.createPage().then(function(page) {
page.property('viewportSize', { width: 1024, height: 768 }).then(function() {
page.open('http://' + url + '/').then(function(status) {
console.log(status);
page.property('onLoadFinished').then(function() {
if (!(status == 'success')) {
res.write('<html><body><h2>' + status + ' : ' + url + ' is not correct url!</h2></body></html>');
res.end();
page.close();
} else {
setTimeout(function() {
page.renderBase64('jpeg').then(function(img) {
res.write('<html><body><img src="data:image/jpeg;base64,' + img + '"/></body></html>');
res.end();
page.close();
});
}, 4000);
}
});
});
});
});
});
}
}).listen(80, '127.0.0.1');
console.log('Server running at http://127.0.0.1:80/');
You've defined var url, img; outside the scope of http request, meaning they're being shared by different requests (one request might change it while a previous one was still processing it), which is probably what's causing the issue. Move those declarations inside the request handler:
// var url, img; // << move this
http.createServer(function(req, res) {
var url, img; // << here
res.setHeader('Content-Type', 'text/html; charset=utf-8');
res.writeHeader(200, { "Content-Type": "text/html" });

Uploading video to Vimeo with nodejs

I'm trying to upload a video to vimeo with nodejs (https://developer.vimeo.com/apis/advanced/upload at step 3) . This is what I currently do:
Firstly I call the function to read the file:
var options = {
hostname : dataObject.ticket.host,
path : '/upload?ticket_id=' + dataObject.ticket.id,
port : 8080,
method: 'POST'
}
postMovie(options);
I get these parameters from my object:
{
"generated_in": "0.0308",
"stat": "ok",
"ticket": {
"endpoint": "http://126535.cloud.vimeo.com:8080/upload?ticket_id=9d818e8bd066dfd54e53f1be2fa3f958",
"endpoint_secure": "https://126535.cloud.vimeo.com/upload?ticket_id=9d818e8bd066dfd54e53f1be2fa3f958",
"host": "126535.cloud.vimeo.com",
"id": "9d818e8bd066dfd54e53f1be2fa3f958",
"max_file_size": "26843545600"
}
}
This function is called :
function postMovie(options){
// This is an async file read
fs.readFile('public/uploads/4363066343.mp4', function (err, data) {
if (err) {
console.log("FATAL An error occurred trying to read in the file: " + err);
process.exit(-2);
}
// Make sure there's data before we post it
if(data) {
PostData(data,options);
}
else {
console.log("No data to post");
process.exit(-1);
}
});
};
When the file is read:
function PostData(data,options) {
var headers = {
'Content-Type': 'video/mp4',
'Content-Length': data.length
}
options.headers = headers
console.log(options)
// Set up the request
var post_req = http.request(options, function(res) {
res.on('data', function (chunk) {
console.log('Response: ' + chunk);
});
});
// post the data
post_req.write(data);
post_req.end();
post_req.on('error', function(e) {
console.log('problem with request: ' + e.message);
});
}
My post_req.on(error) logs this:
problem with request: write EPIPE
problem with request: write EPIPE
I understand this is because of a time-out at the serverside.
I assume my request is not well formed.
Can someone point out what I did wrong ?
The upload operation will be much simpler with the request module.
var inspect = require('eyespect').inspector();
var request = require('request')
var path = require('path')
var fs = require('fs')
var filePath = path.join(__dirname, '../public/uploads/foo.mp4')
fs.stat(filePath, function(err, stats) {
if (err) {
inspect(err, 'error stating file')
return
}
var fileSize = stats.size
var url = 'https://126535.cloud.vimeo.com/upload?ticket_id=9d818e8bd066dfd54e53f1be2fa3f958'
var opts = {
url: url,
method: 'post',
headers: {
'Content-Length': fileSize,
'Content-Type': 'foo'
}
var r = request(opts)
// pipe the file on disk to vimeo
var readStream = fs.createReadStream(filePath)
readStream.pipe(r)
readStream.on('error', function (err) {
inspect(err, 'error uploading file')
})
readStream.on('end', function (err) {
inspect('file uploaded correctly')
})
})
Request also allows you to set the timeout option as well if the file is big and thus takes a long time to upload

NodeJS - Stream video that is being downloaded

I am writing a program that will stream a video file that is currently being downloaded onto the drive. The problem I am having seems to be getting the browser to actually play the video. the script listens for file changes, and then streams the rest, but the browser doesnt do anything besides display a blank Video page.
var fs = require('fs'),
http = require('http'),
filename = '/home/qrpike/Videos/topgears.mp4';
http.createServer(function (req, res) {
console.log(req.url);
if( req.url == '/video.mp4'){
res.writeHead(200,{
'Content-Type' : 'video/mp4',
'Cache-Control' : 'public',
'Connection' : 'keep-alive',
'Content-Disposition' : 'inline; filename=topgears.mp4;',
'Content-Transfer-Encoding' : 'binary',
'Transfer-Encoding' : 'chunked'
});
fs.open(filename, 'r', function(err, fd) {
if (err) throw new Error('Could not open file');
var position = 0;
fs.stat(filename, read);
fs.watchFile(filename, read.bind(null, null));
function read(err, stat) {
var delta = stat.size - position;
if (delta <= 0) return;
fs.read(fd, new Buffer(delta), 0, delta, position, function(err, bytes, buffer) {
console.log("err", err, "bytes", bytes, "position",position,"delta",delta);
res.write(buffer.toString('binary'));
});
position = stat.size;
}
});
}
}).listen(1337);
console.log('Server running at http://127.0.0.1:1337/');
So this answer depends on growing-file, which in theory does what you want. My concern is that the project hasn't had a commit in two years so I have no idea if it still works. That being said, this worked for me locally (though I didn't test piping to the video file):
var fs = require('fs');
var http = require('http');
var GrowingFile = require('growing-file');
var FILENAME = '/home/dave/Desktop/video/video.ogg';
var server = http.createServer(function(req, res) {
var file;
if (req.url === '/video.ogg') {
res.writeHead(200, {
'Content-Type': 'video/ogg'
});
file = GrowingFile.open(FILENAME);
file.pipe(res);
} else {
res.statusCode = 404;
res.end('Not Found');
}
});
server.listen(1337);

Node.js: run external process tasks with aync functions or with a queue based worker?

I have the following function to resize an image and upload to S3 (using 'gm' and 'knox'):
var http = require('http');
var https = require('https');
var s3 = require('./s3');
var gm = require('gm');
var fs = require('fs');
module.exports.processImageUrl = function(imageUrl, filename, callback) {
var client = http;
if (imageUrl.substr(0, 8) == 'https://') { client = https; }
client.get(imageUrl, function(res) {
if (res.statusCode != 200) {
return callback(new Error('HTTP Response code ' + res.statusCode));
}
gm(res)
.geometry(1024, 768, '>')
.stream('jpg', function(err, stdout, stderr) {
if (!err) {
var file = fs.createWriteStream(__dirname + '/../tmp/' + filename + '.jpg');
stdout.pipe(file);
stdout.on('end', function() {
fs.stat(__dirname + '/../tmp/' + filename +'.jpg', function(err, stats) {
if (!err) {
var headers = {
'Content-Length': stats.size
, 'Content-Type': 'Image/jpeg'
, 'x-amz-acl': 'public-read'
};
var file = fs.createReadStream(__dirname + '/../tmp/' + filename + '.jpg');
s3.putStream(file, '/img/d/' + filename + '.jpg', headers, function(err, res) {
if(err) {
return callback(err);
} else {
fs.unlink(__dirname + '/../tmp/' + filename + '.jpg');
return callback(null, res.client._httpMessage.url);
}
});
}
});
});
} else {
callback(err);
}
});
}).on('error', function(err) {
callback(err);
});
};
potentially if I handle many concurrent requests, many GraphicsMagick processes will be spawned, is it a better practice to implement a queue based worker to process the images and spawn controlled number of processes ?
It's absolutely better to manage this in a queue. You don't want end users to be able to grind your service to a halt due to number of requests, and DOS attacks become trivial when you're not queueing high CPU operations. I would even consider putting the GraphicsMagick code onto a different server / servers so you can easily scale them independent of the http code.

Resources