I'm using node.js and through the socket.io library I receive chunks of data that are actually jpeg images. These images are frames of a realtime video captured from a remote webcam. I'm forced to stream the video as jpeg frames. I'm looking for a way to convert on the fly these jpeg images in a video file (mpeg 4 or mjpeg file). Does node have a library that can do this? I already took a look at the Node-fluent-FFMPEG library but the only examples given were about conversions of jpeg files to a video and not a conversion on the fly from a stream of jpeg images. Or alternatively, does ffmpeg for windows support a stream of jpeg images as input?
FFMPEG supports streams as inputs, as stated in the docs.
You can add any number of inputs to an Ffmpeg command. An input can
be [...] a readable stream
So for instance it supports using
ffmpeg().input(fs.createReadStream('/path/to/input3.avi'));
which creates a Readable stream from the file at '/path/to/input3.avi'.
I don't know anything about FFMPEG, but you may pull your messages coming from socket.io (messages may be a Buffer already) and wrap it with your own implementation of Readable stream.
I think you should look at videofy
var exec = require("child_process").exec;
var escape = require("shell-escape");
var debug = require("debug")("videofy");
var mkdirp = require("mkdirp");
var uid = require("uid2");
/*
* Expose videofy
*/
module.exports = videofy;
/**
* Convert `input` file to `output` video with the given `opts`:
*
* - `rate` frame rate [10]
* - `encoders` the video codec format, default is libx264
*
* #param {String} input
* #param {String} output
* #return
* #api public
*/
function videofy(input, output, opts, fn) {
if (!input) throw new Error('input filename required');
if (!output) throw new Error('output filename required');
var FORMAT = '-%05d';
// options
if ('function' == typeof opts) {
fn = opts;
opts = {};
} else {
opts = opts || {};
}
opts.rate = opts.rate || 10;
opts.codec = opts.codec || 'libx264';
// tmpfile(s)
var id = uid(10);
var dir = 'tmp/' + id;
var tmp = dir + '/tmp' + FORMAT + '.jpg';
function gc(err) {
debug('remove %s', dir);
exec('rm -fr ' + dir);
fn(err);
}
debug('mkdirp -p %s', dir);
mkdirp(dir, function(error) {
if (error) return fn(error);
// convert gif to tmp jpg
var cmd = ['convert', input, tmp];
cmd = escape(cmd);
debug('exec %s', cmd);
// covert jpg collection to video
exec(cmd, function(err) {
if (err) return gc(err);
var cmd = ['ffmpeg'];
cmd.push('-f', 'image2');
cmd.push('-r', String(opts.rate));
cmd.push('-i', tmp);
cmd.push('-c:v', String(opts.codec));
cmd.push(output);
cmd = escape(cmd);
debug("exec %s", cmd);
exec(cmd, gc);
});
});
}
Using require("child_process") you can use ffmpeg, or there are probably npm modules to help with this. ffmpeg will allow you to first take a list of jpegs and convert that to a video, second you can add a list (or just one) jpegs to the beginning or end of videos.
Related
I'm trying to merge an audio file with a video file from the same source (Youtube)
In the following code I first read in the console parameters wirh commander then i define the videoOutput dir and download the highset res. video from youtube with node-ytdl-core. After that I download the audio for the video. and in the callback of the video.on("end", ....)
i call the function merge()
const path = require('path');
const fs = require('fs');
const readline = require("readline");
const ytdl = require('ytdl-core');
const { program } = require('commander');
const ffmpeg = require('ffmpeg');
program
.option("--url, --url <url>", "Youtube video url")
.option("--name, --name <name>", "Name of the video in hard drive")
program.parse(process.argv);
const options = program.opts();
let url = options.url;
let name = options.name;
let videoOutput = path.resolve(`./video${name}.mp4`);
let video = ytdl(url, {
quality: "highestvideo"
});
let starttime = 0;
video.pipe(fs.createWriteStream(videoOutput));
video.once('response', () => {
starttime = Date.now();
});
video.on('progress', (chunkLength, downloaded, total) => {
const percent = downloaded / total;
const downloadedMinutes = (Date.now() - starttime) / 1000 / 60;
const estimatedDownloadTime = (downloadedMinutes / percent) - downloadedMinutes;
readline.cursorTo(process.stdout, 0);
process.stdout.write(`${(percent * 100).toFixed(2)}% downloaded `);
process.stdout.write(`(${(downloaded / 1024 / 1024).toFixed(2)}MB of ${(total / 1024 / 1024).toFixed(2)}MB)\n`);
process.stdout.write(`running for: ${downloadedMinutes.toFixed(2)}minutes`);
process.stdout.write(`, estimated time left: ${estimatedDownloadTime.toFixed(2)}minutes `);
readline.moveCursor(process.stdout, 0, -1);
});
video.on('end', () => {
process.stdout.write('\n\n');
});
// repeat for audio
video = ytdl(url, {
quality: "highestaudio"
});
starttime = 0;
let audioOutput = path.resolve(`./audio${name}.mp3`);
video.pipe(fs.createWriteStream(audioOutput));
video.once('response', () => {
starttime = Date.now();
});
video.on('progress', (chunkLength, downloaded, total) => {
const percent = downloaded / total;
const downloadedMinutes = (Date.now() - starttime) / 1000 / 60;
const estimatedDownloadTime = (downloadedMinutes / percent) - downloadedMinutes;
readline.cursorTo(process.stdout, 0);
process.stdout.write(`${(percent * 100).toFixed(2)}% downloaded `);
process.stdout.write(`(${(downloaded / 1024 / 1024).toFixed(2)}MB of ${(total / 1024 / 1024).toFixed(2)}MB)\n`);
process.stdout.write(`running for: ${downloadedMinutes.toFixed(2)}minutes`);
process.stdout.write(`, estimated time left: ${estimatedDownloadTime.toFixed(2)}minutes `);
readline.moveCursor(process.stdout, 0, -1);
});
function merge(){
ffmpeg()
.input("./videotest.mp4") //your video file input path
.input("./audiotest.mp3") //your audio file input path
.output("./finished.mp4") //your output path
.outputOptions(['-map 0:v', '-map 1:a', '-c:v copy', '-shortest'])
.on('start', (command) => {
console.log('TCL: command -> command', command)
})
.on('error', (error) => console.log("errrrr",error))
.on('end',()=>console.log("Completed"))
.run()
}
video.on('end', () => {
process.stdout.write('\n\n');
merge();
});
But even though the files are there ffmpeg throws me this error:
I also tried this in the video-end callback, because maybe the audio is finished downloading before the video, still doesn't work. I've also tried to rename the outputDirs for the files and keep the old files and rerun the script so the files are 100% there. Still doesn't work.
I have also tried absolute paths ("C:/..." also with backslash "C:\...") but I still get the error message that the input file path can't be empty.
Appreciate any piece of advise or help!
Ok I have found a solution.
First i moved my code for downloading the audio into the video.on("end",... callback of the video download.
I changed from require("ffmpeg") to these 5 lines:
const ffmpeg = require('fluent-ffmpeg');
const ffmpegPath = require('#ffmpeg-installer/ffmpeg').path;
const ffprobePath = require('#ffprobe-installer/ffprobe').path;
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
make sure to run:
npm i fluent-ffmpeg
and
npm i #ffprobe-installer/ffprobe
and
npm i #ffmpeg-installer/ffmpeg
that's my solution!
I'm trying to create a media server using nodejs, and i have no problem streaming any formats but
Since web browsers cant play H265 codec videos i need to convert them to H264 while createReadStream creates a chunk so that i dont have to convert them completely before hand, just that chunk which is sent by the server to the browser.
const path = 'assets/yourfavmov.mkv';
const stat = fs.statSync(path);
const fileSize = stat.size;
const range = req.headers.range;
if (range) {
const rangeArray = range.replace(/bytes=/, "").split("-");
console.log(rangeArray)
const start = parseInt(rangeArray[0], 10);
const end = rangeArray[1] ? parseInt(rangeArray[1], 10) : fileSize-1;
const chunksize = (end-start) + 1;
const fileChunk = fs.createReadStream(path, {start, end});
const head = {
'Content-Range': `bytes ${start}-${end}/${fileSize}`,
'Accept-Ranges': 'bytes',
'Content-Length': chunksize,
'Content-Type': 'video/x-matroska'
};
res.writeHead(206, head);
fileChunk.pipe(res);
} else {
res.end("wont let you stream");
}
I tried to convert the stream using ffmpeg-stream, like so
const converter = new Converter();
const input = converter.createInputStream({
f: "matroska,webm",
vcodec : "hevc"
})
const fileChunk = fs.createReadStream(path, {start, end});
fileChunk.pipe(input);
converter
.createOutputStream({ f: "matroska,webm", vcodec: "h264" })
.pipe(res);
But i have no idea what i did is correct or wrong, so no luck
so is there an way to do it right?
Thanks in advance.
"is there an way to do it right":
Not out of the box, no. What you are trying to do is called "Just in time" transcoding, and it is extremely complex. You must take into account container timestamps, audio priming samples, video sequence headers, and a few other things. ffmpeg does not do this for you. For all intents and purposes, what you want to do is not possible with open source tools today.
What you should do is do the encoding BEFORE the video is requested. (just in time encoding means you re-encode EVERY time someone watches the movie, which is really inefficient)
Netflix is not encoding your videos when you request them - Netflix has a library of videos encoded and ready to go when the request is made.
I want to be able to extract jpegs from a Uint8 array containing the data for a mpeg or avi video.
The module ffmpeg has the function fnExtractFrameToJPG but it only accepts a filename pointing to the video file. I want to be able to extract the frames from the UInt8Array.
One way to do it is to write the UInt8Array to a tmp file and then use the tmp file with ffmpeg to extract the frames:
const tmp = require("tmp");
const ffmpeg_ = require("ffmpeg");
function convert_images(video_bytes_array){
var tmpobj = tmp.fileSync({ postfix: '.avi' })
fs.writeFileSync(tmpobj.name, video_bytes_array);
try {
var process = new ffmpeg(tmpobj.name);
console.log(tmpobj.name)
process.then(function (video) {
// Callback mode
video.fnExtractFrameToJPG('./', { // make sure you defined the directory where you want to save the images
frame_rate : 1,
number : 10,
file_name : 'my_frame_%t_%s'
}, function (error, files) {
if (!error)
tmpobj.removeCallback();
});
});
} catch (e) {
console.log(e.code);
console.log(e.msg);
}
}
Another possibitlity is to use opencv after you save the UInt8Array to a tmp file. Another solution is to use stream and ffmpeg-fluent which would not require using tmp files.
(new information below)
I am trying to set up a lambda function that reacts to uploaded tgz files by uncompressing them and writing the results back to S3. The unzip and untar work fine, but uploading to S3 fails:
/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:350
var buf = self.body.read(self.partSize - self.partBuffer.length) ||
^
TypeError: undefined is not a function
at ManagedUpload.fillStream (/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:350:25)
at Entry.<anonymous> (/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:167:28)
at Entry.emit (events.js:104:17)
at Entry._read (/Users/russell/lambda/gzip/node_modules/tar/lib/entry.js:123:12)
at Entry.end (/Users/russell/lambda/gzip/node_modules/tar/lib/entry.js:82:8)
at Parse._process (/Users/russell/lambda/gzip/node_modules/tar/lib/parse.js:107:13)
at BlockStream.<anonymous> (/Users/russell/lambda/gzip/node_modules/tar/lib/parse.js:47:8)
at BlockStream.emit (events.js:107:17)
at BlockStream._emitChunk (/Users/russell/lambda/gzip/node_modules/tar/node_modules/block-stream/block-stream.js:145:10)
at BlockStream.write (/Users/russell/lambda/gzip/node_modules/tar/node_modules/block-stream/block-stream.js:45:10)
This error occurs when I write to S3, but if instead I write the files locally to disk it works, so the pipeline is correct.
Here is code that demonstrates the problem:
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
var zlib = require('zlib');
var tar = require('tar');
var fstream = require('fstream');
fstream.Reader({'path': 'testdata.tar.gz'})
.pipe(zlib.Unzip())
.pipe(tar.Parse())
.on('entry', function(entry) {
var filename = entry.path;
console.log('got ' + entry.type + ' ' + filename);
if (entry.type == 'File') {
if (1) { // switch between working and nonworking cases
s3.upload({Bucket: 'my_bucket', Key: 'gunzip-test/' + filename, Body: entry}, {},
function(err, data) {
if (err)
console.log('ERROR!');
else
console.log('OK');
});
}
else {
entry.pipe(fstream.Writer({ 'path': '/tmp/mytest/' + filename }));
}
}
});
If the code is set to write to S3 it fails with the above error, if it writes the extracted files locally it succeeds. ENTRY is a stream, and according to the doc should be accepted in the upload Body parameter. I put a print statement in ManagedUpload, where the fail comes, and confirmed that self.body is a stream:
var stream = require('stream');
console.log('is it a stream? ' + ((self.body instanceof stream) ? 'yes' : 'no'));
console.log('self.body.read is ' + self.body.read);
returns
$ got File gunzip.js
is it a stream? yes
self.body.read is undefined
I'm pretty new with aws and node.js, so there could be a basic problem with this, but I've spent a day and haven't found it. I did the upload call with unzip instead of gzip and it worked (using lambda functions to unzip archives in S3 is really sloooooow) Can anyone point me at something I am doing wrong in this code?
Thanks
I think I understand this a little better. I broke the pipeline up into pieces and looked at each one. The problem is that tar.Parse uses fstream and not stream. If I look at the return of the .pipe(tar.Parse()) statement it is a stream, but it is not a stream.Readable or a stream.Writable. fstream does not define a read() method (its reader is based on Stream, it is not a stream.Readable), so tar.Parse, which is based on Stream, does not have one either.
So a refinement of the question is, is this a bug in fstream, or is fstream not intended to be a stream? I think it is a bug - from the README:
"Like FS streams, but with stat on them, and supporting directories and
symbolic links, as well as normal files. Also, you can use this to set
the stats on a file, even if you don't change its contents, or to create
a symlink, etc."
In my case running the stream through stream.PassThrough helped.
var PassThrough = require('stream').PassThrough;
var stream = getStreamSomeHow();
var passthrough = new PassThrough();
stream.pipe(passthrough);
s3.upload({...,Body:passthrough}) //
Your body variable is a Stream object, in which case you will need to use .toString()
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
var zlib = require('zlib');
var tar = require('tar');
var fstream = require('fstream');
fstream.Reader({'path': 'testdata.tar.gz'})
.pipe(zlib.Unzip())
.pipe(tar.Parse())
.on('entry', function(entry) {
var filename = entry.path;
console.log('got ' + entry.type + ' ' + filename);
if (entry.type == 'File') {
if (1) { // switch between working and nonworking cases
s3.upload({Bucket: 'my_bucket', Key: 'gunzip-test/' + filename, Body: entry.toString()}, {},
function(err, data) {
if (err)
console.log('ERROR!');
else
console.log('OK');
});
}
else {
entry.pipe(fstream.Writer({ 'path': '/tmp/mytest/' + filename }));
}
}
});
Im currently working on a project that requires the content to be gZip-ed before it's sent back to the browser.
Im currently using a simple read stream and piping the data to the response of a request, but im not sure the best way to gZip content without blocking requests
The line that send the data is:
require('fs').createReadStream(self.staticPath + Request.url).pipe(Response);
See the following class is the static handler object:
(function(){
var StaticFeeder = function()
{
this.staticPath = process.cwd() + '/application/static';
this.contentTypes = require('./contenttypes')
}
StaticFeeder.prototype.handle = function(Request,Response,callback)
{
var self = this;
if(Request.url == '/')
{
return false;
}
if(Request.url.indexOf('../') > -1)
{
return false;
}
require('path').exists(this.staticPath + Request.url,function(isthere){
/*
* If no file exists, pass back to the main handler and return
* */
if(isthere === false)
{
callback(false);
return;
}
/*
* Get the extention if possible
* */
var ext = require('path').extname(Request.url).replace('.','')
/*
* Get the Content-Type
* */
var ctype = self.contentTypes[ext] !== undefined ? self.contentTypes[ext] : 'application/octet-stream';
/*
* Send the Content-Type
* */
Response.setHeader('Content-Type',ctype);
/*
* Create a readable stream and send the file
* */
require('fs').createReadStream(self.staticPath + Request.url).pipe(Response);
/*
* Tell the main handler we have delt with the response
* */
callback(true);
})
}
module.exports = new StaticFeeder();
})();
Can anyone help me get around this problem, i haven't a clue on how to tell the piping to compress with gZip.
Thanks
Actually, I have a blog post on just this thing. http://dhruvbird.blogspot.com/2011/03/node-and-proxydecorator-pattern.html
You will need to:
npm install compress -g
Before using it though.
The basic idea revolves around using pipes to add functionality.
However, for your use-case, you would be better off putting node.js behind nginx to do all the gzip'ing since node.js is a single process (actually not), and the gzip routines would take up your process' CPU.
You can just pipe it through a compression stream:
var fs = require('fs')
var zlib = require('zlib')
fs.createReadStream(file)
.pipe(zlib.createGzip())
.pipe(Response)
assumes the file is not compressed already and you have already set all the headers for the response.