fluent-ffmpeg get codec data without specifying output - node.js

I am using fluent-ffmpeg node module for getting codec data from a file.
It works if I give an output but I was wondering if there is any option to run fluent-ffmpeg without giving to it an output.
This is what I am doing:
readStream.end(new Buffer(file.buffer));
var process = new ffmpeg(readStream);
process.on('start', function() {
console.log('Spawned ffmpeg');
}).on('codecData', function(data) {
//get recording duration
const duration = data.duration;
console.log(duration)
}).save('temp.flac');
As you can see I am saving the file to temp.flac so I can get the seconds duration of that file.

If you don't want to save the ffmpeg process result to a file, one thing that comes to mind is to redirect the command output to /dev/null.
In fact, as the owner of the fluent-ffmpeg repository said in one comment, there is no need to specify a real file name for the destination when using null format.
So, for example, something like that will work:
let process = new ffmpeg(readStream);
process
.addOption('-f', 'null') // set format to null
.on('start', function() {
console.log('Spawned ffmpeg');
})
.on('codecData', function(data) {
//get recording duration
let duration = data.duration;
console.log(duration)
})
.output('nowhere') // or '/dev/null' or something else
.run()
It remains a bit hacky, but we must set an output to avoid the "No output specified" error.

When no stream argument is present, the pipe() method returns a PassThrough stream, which you can pipe to somewhere else (or just listen to events on).
var command = ffmpeg('/path/to/file.avi')
.videoCodec('libx264')
.audioCodec('libmp3lame')
.size('320x240')
.on('error', function(err) {
console.log('An error occurred: ' + err.message);
})
.on('end', function() {
console.log('Processing finished !');
});
var ffstream = command.pipe();
ffstream.on('data', function(chunk) {
console.log('ffmpeg just wrote ' + chunk.length + ' bytes');
});

Related

How do I queue file for ffmpeg on Node.js?

I need to encode/convert many videos on my (linux) server. And FFMPEG is resource intensive.
I am running Node.js and using fluent-ffmpeg
const Ffmpeg = require("fluent-ffmpeg");
videos.forEach(video=> {
covertVideo(filename, (compressionResult)=>{
//do something here
});
});
function covertVideo(filename, callback){
var ffmpeg = new FfmpegCommand(filename);
ffmpeg.setFfmpegPath(pathToFfmpeg);
ffmpeg.addOptions("-threads 1")
.fps(23)
.complexFilter([
"scale=w='if(gt(a,0.75),240,trunc(320*a/2)*2)':h='if(lt(a,0.75),320,trunc(240/a/2)*2)',pad=w=240:h=320:x='if(gt(a,0.75),0,(240-iw)/2)':y='if(lt(a,0.75),0,(320-ih)/2)':color=black[scaled]"
])
.on('error', function(err) {
console.log('An error occurred: ' + err.message);
return callback(false);
})
.on('end', function() {
console.log('Compression finished !');
return callback(true);
})
.save("./tmp/"+filename);
}
So, I am using foreach loop to iterate through the file to be encoded. At each loop this function above is called.
The problem here is that the foreach enters the second loop and start encoding when the first is still being encoded by FFMPEG thereby showing "Resource temporarily unavailable error".
Is there a way to queue files sent to FFMPEG so that next file is encoded when the previous is done and not concurrently.
OR
Is there a way for the Nodejs foreach to wait for ffmpeg to finish the first video before proceeding to the next loop.

electron fluent-ffmpeg mergeToFile() command promise not triggering

I am trying to use fluent-ffmpeg with my electron app to concatenate multiple audio files together with an image in a video. So if i have three files:
song1.mp3 1:00
song2.mp3 0:30
song3.mp3 2:00
front.jpg
I could create output.mp4 which would be 3:30 seconds long, and play each file one after the other in order. With front.jpg set as the background image.
I am trying to create the concatenated audio file first for this video, then I can just render a vid with two inputs; image and the 3:30second long concatenated audio file. But I'm having difficulty getting my electron app to wait for the ffmpeg job to run and complete.
I know how to do all of these ffmpeg jobs on the command-line, but I've been following this guide for how to package ffmpeg into an electron app that can run on mac/win10/linux environments. I'm developing it on win10 right now.
gur.com/LtykP.png
I have a button:
<button onClick='fullAlbum("upload-${uploadNumber}")'>FULLALBUM</button>
that when I click runs the fullAlbum() function that calls combineMp3FilesOrig to run the actual ffmpeg job:
async function fullAlbum(uploadName) {
//document.getElementById("buttonId").disabled = true;
//get table
var table = $(`#upload_${uploadNumber}_table`).DataTable()
//get all selected rows
var selectedRows = table.rows( '.selected' ).data()
//get outputFile location
var path = require('path');
var outputDir = path.dirname(selectedRows[0].audioFilepath)
//create outputfile
var timestamp = new Date().getUTCMilliseconds();
let outputFilepath = `${outputDir}/output-${timestamp}.mp3`
console.log('fullAlbum() button pressed: ', timestamp)
await combineMp3FilesOrig(selectedRows, outputFilepath, '320k', timestamp);
//document.getElementById("buttonId").disabled = false;
console.log(`fullAlbum() /output-${timestamp}.mp3 should be created now`)
}
function combineMp3FilesOrig(selectedRows, outputFilepath, bitrate, timestamp) {
console.log(`combineMp3FilesOrig(): ${outputFilepath}`)
//begin get ffmpeg info
const ffmpeg = require('fluent-ffmpeg');
//Get the paths to the packaged versions of the binaries we want to use
const ffmpegPath = require('ffmpeg-static').replace('app.asar','app.asar.unpacked');
const ffprobePath = require('ffprobe-static').path.replace('app.asar','app.asar.unpacked');
//tell the ffmpeg package where it can find the needed binaries.
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
//end set ffmpeg info
//create ffmpeg command
console.log(`combineMp3FilesOrig(): create command`)
const command = ffmpeg();
//set command inputs
command.input('C:\\Users\\marti\\Documents\\martinradio\\uploads\\CharlyBoyUTurn\\5. Akula (Club Mix).flac')
command.input('C:\\Users\\marti\\Documents\\martinradio\\uploads\\CharlyBoyUTurn\\4. Civilian Barracks.flac')
return new Promise((resolve, reject) => {
console.log(`combineMp3FilesOrig(): command status logging`)
command.on('progress', function(progress) {
console.info(`Processing : ${progress.percent} % done`);
})
.on('codecData', function(data) {
console.log('codecData=',data);
})
.on('end', function() {
console.log('file has been converted succesfully; resolve() promise');
resolve();
})
.on('error', function(err) {
console.log('an error happened: ' + err.message, ', reject()');
reject(err);
})
console.log(`combineMp3FilesOrig(): add audio bitrate to command`)
command.audioBitrate(bitrate)
console.log(`combineMp3FilesOrig(): tell command to merge inputs to single file`)
command.mergeToFile(outputFilepath);
console.log(`combineMp3FilesOrig(): end of promise`)
});
console.log(`combineMp3FilesOrig(): end of function`)
}
When I click my button once, my console.logs show the promise is entered, the command is created, but the function just ends without waiting for a resolve();
Waiting a couple minutes doesnt change anything.
If I press the button again:
A new command gets created, reaches the end of the promise, but this time actually starts, and triggers the previous command to start. Both jobs then run and their files are rendered at the correct length (12:08) and the correct quality (320k)
Is there something with my promise I need to fix involving async functions and promises in an electron app? I tried editing my ffmpeg command to include
command.run()
At the end of my promise to ensure it gets triggered; but that leads to an err in console saying Uncaught (in promise) Error: No output specified because apparently in fluent-ffmpeg command.mergeToFile(outputFilepath); isnt good enough and I need to include .output(outputFilepath) as well. If I change command.run() to command.output(outputFilepath).run(), when i click my button, the ffmpeg job gets triggered and rendered perfectly fine. EXCEPT THAT THE FILE IS ALWAYS 128kbps
So I'm trying to figure out why my included code block, my ffmpeg command doesn't run the first time when its created.
I've played about with this and I'm seeing the same issue with your original code, the file is being output with 128k bitrate.
This code seems to work, though the max bitrate I'm getting is 320k (I presume this is a limitation of the codec).
After testing again I think I'm getting the same behaviour as you are, in that the file takes some time to generate. If I return a Promise from the combineMp3FilesOrig function, then await in the click handler. I disable the button until the call is complete, obviously your button id will be different.
function combineMp3FilesOrig(selectedRows, outputFile, bitrate) {
const command = ffmpeg();
var count = selectedRows.length;
for(var i = 0; i < count; i++){
command.input(selectedRows[i].audioFilepath)
}
return new Promise((resolve, reject) => {
command.on('progress', function(progress) {
console.info(`Processing : ${progress.percent} % done`);
})
.on('codecData', function(data) {
console.log('codecData=',data);
})
.on('end', function() {
console.log('file has been converted succesfully');
resolve();
})
.on('error', function(err) {
console.log('an error happened: ' + err.message);
reject(err);
}).audioBitrate(bitrate).mergeToFile(outputFile);
});
}
async function convertFiles() {
document.getElementById("buttonId").disabled = true;
await combineMp3FilesOrig(selectedRows, 'output.mp3', '320k');
document.getElementById("buttonId").disabled = false;
}

Unknown method process.openStdin()

I'm trying to pipe grep results into nodejs script. I've found, that I should receive data from process.stdin.
Also I've found several ways to work with stdin. But they are different and I can't find all information about it. I know four ways (first 3 start with var data = ""):
1) Most popular in search results
process.stdin.resume();
process.stdin.setEncoding( 'utf8' );
process.stdin.on('data', function(chunk) { data += chunk; });
process.stdin.on('end', function() { console.log('data: ' + data); });
2) Looks like the first one, but with unknown function process.openStdin()
var stdin = process.openStdin();
stdin.on('data', function(chunk) { data += chunk; });
stdin.on('end', function() { console.log('data: ' + data); });
3) In the documentation I've read that calling stdin.resume() changes stdin to 'old type'. So if we didn't called 'resume' - we can use 'readable' event
process.stdin.setEncoding('utf8');
process.stdin.on('readable', function() { data += process.stdin.read(); });
process.stdin.on('end', function() { console.log('data: ' + data); });
4) Use module readline. It is very usefull as long as grep results are in mutiple lines and there I don't need to split received data by myself. But for a long time i couldn't understand why all information is piped to stdout directly. Then i i've found that we can pass empty object instead of process.stdout while creating interface and data wouldn't piped to output.
var readline = require('readline'),
//rl = readline.createInterface(process.stdin, process.stdout);
rl = readline.createInterface(process.stdin, {});
rl.on('line', function(data) { console.log('line: ' + data); });
5) My own variant. Use another module 'split' - it allows to read from stream and devide data into chuks by specified symbol (\r?\n by default). I used it to work with socket and as soon as stdin is also readable stream - we can use it here.
var split = require('split');
process.stdin.setEncoding('utf8');
process.stdin.pipe(split()).on('data', function(data) { console.log('line: ' + data); });
My question is "What is process.openStdin();????"
I've searched every page in google, but didn't found any documentation on this function!
Also while searching I've discovered, that official documentation for nodejs is ugly - not mentioned since which version methods are available, no detailed description on many objects/methods, no user comments. And this method (openStdin) - exists and works, but nowhere discribed! WTF???
While writing the question I've found the answer :)
It is in source code of nodejs:
process.openStdin = function() {
process.stdin.resume();
return process.stdin;
};
But I wonder, why is it not described in documentation? If it is a function for private use only, why is it used by many people, who wrote about working with stdin?

How do you use Node.js to stream an MP4 file with ffmpeg?

I've been trying to solve this problem for several days now and would really appreciate any help on the subject.
I'm able to successfully stream an mp4 audio file stored on a Node.js server using fluent-ffmpeg by passing the location of the file as a string and transcoding it to mp3. If I create a file stream from the same file and pass that to fluent-ffmpeg instead it works for an mp3 input file, but not a mp4 file. In the case of the mp4 file no error is thrown and it claims the stream completed successfully, but nothing is playing in the browser. I'm guessing this has to do with the meta data being stored at the end of an mp4 file, but I don't know how to code around this. This is the exact same file that works correctly when it's location is passed to ffmpeg, rather than the stream. When I try and pass a stream to the mp4 file on s3, again no error is thrown, but nothing streams to the browser. This isn't surprising as ffmpeg won't work with the file locally as stream, so expecting it to handle the stream from s3 is wishful thinking.
How can I stream the mp4 file from s3, without storing it locally as a file first? How do I get ffmpeg to do this without transcoding the file too? The following is the code I have at the moment which isn't working. Note that it attempts to pass the s3 file as a stream to ffmpeg and it's also transcoding it into an mp3, which I'd prefer not to do.
.get(function(req,res) {
aws.s3(s3Bucket).getFile(s3Path, function (err, result) {
if (err) {
return next(err);
}
var proc = new ffmpeg(result)
.withAudioCodec('libmp3lame')
.format('mp3')
.on('error', function (err, stdout, stderr) {
console.log('an error happened: ' + err.message);
console.log('ffmpeg stdout: ' + stdout);
console.log('ffmpeg stderr: ' + stderr);
})
.on('end', function () {
console.log('Processing finished !');
})
.on('progress', function (progress) {
console.log('Processing: ' + progress.percent + '% done');
})
.pipe(res, {end: true});
});
});
This is using the knox library when it calls aws.s3... I've also tried writing it using the standard aws sdk for Node.js, as shown below, but I get the same outcome as above.
var AWS = require('aws-sdk');
var s3 = new AWS.S3({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_KEY,
region: process.env.AWS_REGION_ID
});
var fileStream = s3.getObject({
Bucket: s3Bucket,
Key: s3Key
}).createReadStream();
var proc = new ffmpeg(fileStream)
.withAudioCodec('libmp3lame')
.format('mp3')
.on('error', function (err, stdout, stderr) {
console.log('an error happened: ' + err.message);
console.log('ffmpeg stdout: ' + stdout);
console.log('ffmpeg stderr: ' + stderr);
})
.on('end', function () {
console.log('Processing finished !');
})
.on('progress', function (progress) {
console.log('Processing: ' + progress.percent + '% done');
})
.pipe(res, {end: true});
=====================================
Updated
I placed an mp3 file in the same s3 bucket and the code I have here worked and was able to stream the file through to the browser without storing a local copy. So the streaming issues I face have something to do with the mp4/aac container/encoder format.
I'm still interested in a way to bring the m4a file down from s3 to the Node.js server in it's entirety, then pass it to ffmpeg for streaming without actually storing the file in the local file system.
=====================================
Updated Again
I've managed to get the server streaming the file, as mp4, straight to the browser. This half answers my original question. My only issue now is that I have to download the file to a local store first, before I can stream it. I'd still like to find a way to stream from s3 without needing the temporary file.
aws.s3(s3Bucket).getFile(s3Path, function(err, result){
result.pipe(fs.createWriteStream(file_location));
result.on('end', function() {
console.log('File Downloaded!');
var proc = new ffmpeg(file_location)
.outputOptions(['-movflags isml+frag_keyframe'])
.toFormat('mp4')
.withAudioCodec('copy')
.seekInput(offset)
.on('error', function(err,stdout,stderr) {
console.log('an error happened: ' + err.message);
console.log('ffmpeg stdout: ' + stdout);
console.log('ffmpeg stderr: ' + stderr);
})
.on('end', function() {
console.log('Processing finished !');
})
.on('progress', function(progress) {
console.log('Processing: ' + progress.percent + '% done');
})
.pipe(res, {end: true});
});
});
On the receiving side I just have the following javascript in an empty html page:
window.AudioContext = window.AudioContext || window.webkitAudioContext;
context = new AudioContext();
function process(Data) {
source = context.createBufferSource(); // Create Sound Source
context.decodeAudioData(Data, function(buffer){
source.buffer = buffer;
source.connect(context.destination);
source.start(context.currentTime);
});
};
function loadSound() {
var request = new XMLHttpRequest();
request.open("GET", "/stream/<audio_identifier>", true);
request.responseType = "arraybuffer";
request.onload = function() {
var Data = request.response;
process(Data);
};
request.send();
};
loadSound()
=====================================
The Answer
The code above under the title 'updated again' will stream an mp4 file, from s3, via a Node.js server to a browser without using flash. It does require that the file be stored temporarily on the Node.js server so that the meta data in the file is moved from the end of the file to the front. In order to stream without storing the temporary file, you need to actual modify the file on S3 first and make this meta data change. If you have changed the file in this way on S3 then you can modify the code under the title 'updated again' so that the result from S3 is piped straight into the ffmpeg constructor, rather than into a file stream on the Node.js server, then providing that file location to ffmepg, as the code does now. You can change the final 'pipe' command to 'save(location)' to get a version of the mp4 file locally with the meta data moved to the front. You can then upload that new version of the file to S3 and try out the end to end streaming. Personally I'm now going to create a task that modifies the files in this way as they are uploaded to s3 in the first place. This allows me to record and stream in mp4 without transcoding or storing a temp file on the Node.js server.
One of the main issues here is that you cannot seek on a piped stream. So you would have to store the file first. However, if you want to just stream from the beginning you can use a slightly different construction and pipe. Here is an example of the most straight forward way to do it.
// can just create an in-memory read stream without saving
var stream = aws.s3(s3Bucket).getObject(s3Path).createReadStream();
// fluent-ffmpeg supports a readstream as an arg in constructor
var proc = new ffmpeg(stream)
.outputOptions(['-movflags isml+frag_keyframe'])
.toFormat('mp4')
.withAudioCodec('copy')
//.seekInput(offset) this is a problem with piping
.on('error', function(err,stdout,stderr) {
console.log('an error happened: ' + err.message);
console.log('ffmpeg stdout: ' + stdout);
console.log('ffmpeg stderr: ' + stderr);
})
.on('end', function() {
console.log('Processing finished !');
})
.on('progress', function(progress) {
console.log('Processing: ' + progress.percent + '% done');
})
.pipe(res, {end: true});
Blockquote
How can I stream the mp4 file from s3, without storing it locally as a file first? How do I get ffmpeg to do this without transcoding the file too? The following is the code I have at the moment which isn't working. Note that it attempts to pass the s3 file as a stream to ffmpeg and it's also transcoding it into an mp3, which I'd prefer not to do.
AFAIK - if the moov atom is in the right place in media file, for S3 hosted mp4, nothing special is require for streaming because you can rely on http for that. If the client request "chunked" encoding it will get just that, a chunked stream terminated by the "END-OF" marker shown below.
0\r\n
\r\n
By including the chunked header, the client is saying " I want a stream" . Under the covers, S3 is just nginx or apache isn't it? They both honor the headers.
test it with curl CLI as your client...
> User-Agent: curl/7.28.1-DEV
> Host: S3.domain
> Accept: */*
> Transfer-Encoding: chunked
> Content-Type: video/mp4
> Expect: 100-continue
May want to try out adding the codecs to the "Content-Type:" header. I dont know, but dont think it would be required for this type of streaming ( the atom resolves that stuff )
I had an issue buffering file streams from the S3 file object. The s3 filestream does not have the correct headers set and it seems it does not implement piping correctly.
I think a better solution is to use this nodejs module called s3-streams. It sets the correct headers and buffers the output so that the stream can be correctly piped to the output response socket. It saves you from saving the filestream locally first before restreaming it.

Parsing the STDERR output of node.js child_process line by line

I'm writing a simple online conversion tool using FFMPEG and Node.js. I'm trying to figure out how to parse each line of the conversion output received from FFMPEG and only display pertinent results client side in the browser. In my case I want the encoding time counter that FFMPEG spits out on the command line.
My function thus far is:
function metric(ffmpeg, res) {
ffmpeg.stdout.on('data', function(data) {
res.writeHead(200, {'content-type': 'text/html'});
res.write('received upload:\n\n');
console.log(data);
});
ffmpeg.stderr.on('data', function (data) {
var temp += data.toString();
var lines = temp.split('\n');
//for debugging purposes
for(var i = 0;i<lines.length;i++) {
console.log('this is line: ' + i + '----' + lines[i]);
}
res.write(lines);
});
ffmpeg.on('exit', function (code) {
console.log('child process exited with code ' + code);
res.end();
});
}
What this ends up returning is multiple arrays, each of which includes the data from the previous array as well as the next data chunk. For example, the function returns array 1:{0=>A, 1=>B}, array 2:{0=>A, 1=>B, 2=>C}, array 3:{0=>A, 1=>B, 2=>C, 3=>D}, and so on.
I'm quite new to Node so I'm probably missing something simple. Any guidance would be much appreciated!
This should do the job:
var buff = new Buffer(data);
console.log(buff.toString('utf8'));
For more information on buffers, here is a link to the doc: http://nodejs.org/docs/v0.4.2/api/buffers.html

Resources