I am creating a server-side video renderer in node.js. I need to add images on an existing video file every frame. I need to set the specific position of each frame in the rendered video. I am holding all the frames of the images in Readable. This is my code that works, but does not take into account the position of the images. How can I modify it? Of course I have a list with images coordinates - but I don't know how to make a filter out of this.
this.stream is a list of images.
const filter = ["[1:v]format=argb,setpts=PTS+" + 0 + "/TB[out]",
{
filter: "overlay",
options: {
enable: "between(t," + 0 + "," + 9 + ")",
x: "0",
y: "0",
},
inputs: "[0:v][out]",
outputs: "tmp",
}
]
const Options = [
"-crf 16",
"-f mp4",
"-vcodec libx264",
"-movflags frag_keyframe+empty_moov",
"-pix_fmt yuv420p",
];
var ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffm.path);
var command = ffmpeg();
const outputStream = fs.createWriteStream(this.Path + "test.mp4");
command.input(this.Path + "input.mp4");
command.input(this.stream).inputFPS(23);
command.outputOptions(Options);
command.fps(23);
command.complexFilter(filter, "tmp");
command.output(outputStream);
Instead of using const filter I suggest using the build-in fluent-ffmpeg functions. This code will add an image positioned (50, 50):
video_path and image_path are file paths to video and image.
overlay=50:50 will place your image at position x=50, y=50 from top-left.
Then you save the file and handle errors
const command = ffmpeg(video_path)
.input(image_path)
.complexFilter([
`overlay=50:50`,
])
.saveToFile("./public/vid.mp4")
.on("error", (err) => {
console.log(err);
})
.on("end", () => {
console.log("File saved.");
});
For more info see github issue: https://github.com/fluent-ffmpeg/node-fluent-ffmpeg/issues/770
Related
I am using sharp to resize bulk of image. So I am resizing them to 500px by preserving their aspect ratio. Also I want to resize height to 500px and auto resize width if height is greater than with and vice versa. To do that I need to get image, height from Image buffer. I know there are pretty number of packages available to do so. But I was hoping if I can do that using sharp buffer itself.
Yes you can get the width and height of an image with sharp by using the metadata() function :
const image = await sharp(file.buffer)
const metadata = await image.metadata()
console.log(metadata.width, metadata.height)
You can get a lot more information from metadata , here is the documentation : https://sharp.pixelplumbing.com/api-input#metadata
To get the dimensions that are recorded in the header of the input image:
const image = await sharp(file.buffer);
const metadata = await image.metadata();
console.log(metadata.width, metadata.height);
However, operations like image.resize(...) will not affect the .metadata(). To get the dimensions after performing operations on the image, use .toBuffer({ resolveWithObject: true }):
const image = await sharp(file.buffer);
const resizedImage = image.resize(640);
const { info } = await resizedImage.png().toBuffer({ resolveWithObject: true });
console.log(info.width, info.height);
Sharp is very flexible, it has a number of options for resizing images. Using an option of fit: "contain" should accomplish what you wish.
Others are available of course, documented here: https://sharp.pixelplumbing.com/api-resize#resize
You can also specify the background color to fill space within the resized image, I'm using white here.
The code will look something like this:
const fs = require("fs");
const path = require("path");
const sharp = require("sharp");
const inputDir = "./input-images";
const outputDir = "./output-images";
const requiredDimension = 500;
const inputImages = fs.readdirSync(inputDir).map(file => path.join(inputDir, file));
function resizeImage(imagePath) {
sharp(imagePath)
.resize( { width: requiredDimension, height: requiredDimension, fit: "contain", background: { r: 255, g: 255, b: 255, alpha: 1 }})
.toFile(path.join(outputDir, path.basename(imagePath) + "-resized" + path.extname(imagePath)), (err, info) => {
if (err) {
console.error("An error occurred resizing image:", err);
}
});
}
// Ensure output dir exists...
if (!fs.existsSync(outputDir)) {
fs.mkdirSync(outputDir)
}
inputImages.forEach(resizeImage);
I tried to capture and save an image from screen with robotjs (http://robotjs.io/) but when I open the file bitmap the image is not in a valid format. This is my code:
var robot = require("robotjs");
var fs = require("fs");
var size = 10;
var img = robot.screen.capture(0, 0, size, size);
fs.writeFileSync('img.bmp',img.image);
Jimp supports converting Raw Pixel Buffer into PNG out-of-the-box and works a lot faster.
let robot = require("robotjs");
let Jimp = require('jimp');
const img = robot.screen.capture(0, 0, width, height).image;
new Jimp({data: img, width, height}, (err, image) => {
image.write(fileName);
});
The image will be saved with the wrong colors. To fix it, you can use the following code:
function screenCaptureToFile2(robotScreenPic, path) {
return new Promise((resolve, reject) => {
try {
const image = new Jimp(robotScreenPic.width, robotScreenPic.height);
let pos = 0;
image.scan(0, 0, image.bitmap.width, image.bitmap.height, (x, y, idx) => {
image.bitmap.data[idx + 2] = robotScreenPic.image.readUInt8(pos++);
image.bitmap.data[idx + 1] = robotScreenPic.image.readUInt8(pos++);
image.bitmap.data[idx + 0] = robotScreenPic.image.readUInt8(pos++);
image.bitmap.data[idx + 3] = robotScreenPic.image.readUInt8(pos++);
});
image.write(path, resolve);
} catch (e) {
console.error(e);
reject(e);
}
});
}
var pic = robot.screen.capture();
screenCaptureToFile2(pic)
Note that your img.image Buffer from Robotjs is a raw buffer with pixels; not a BMP or PNG or any other format.
You should do some data conversion and probably save it using a library that supports writing to file (I do not see that in Robotjs).
Please look at this other question, which also uses robot.screen.capture and saves file to a PNG file using Jimp library. That code answers your question too.
I am working on one Meteor App where I am using CollectionFS to upload Files.
I am able to upload and generate thumbnails for Images.
But my Issue is : How should I create thumbnails for Videos?
I can see that it is possible via command line: https://superuser.com/questions/599348/can-imagemagick-make-thumbnails-from-video
But how can I apply this to my Meteor code.
Here is what I am doing:
VideoFileCollection = new FS.Collection("VideoFileCollection", {
stores: [
new FS.Store.FileSystem("videos", {path: "/uploads/videos"}),
new FS.Store.FileSystem("videosthumbs", {path: "/uploads/videosthumbs",
beforeWrite: function(fileObj) {
// We return an object, which will change the
// filename extension and type for this store only.
return {
extension: 'png',
type: 'image/png'
};
},
transformWrite: function(fileObj, readStream, writeStream) {
gm(readStream, fileObj.name()).stream('PNG').pipe(writeStream);
}
})
]
});
What is happening here that video is getting Uploaded to "videos" folder and one PNG is created under "videosthumbs" with 0 Bytes and thumbnail is not getting generated.
I have also read at : https://github.com/aheckmann/gm#custom-arguments
that we can use : gm().command() - Custom command such as identify or convert
Can Anybody advise me on what can be done to handle this situation?
Thanks and Regards
Checked the link that you have added and here is a rough solution that might help you
ffmpeg -ss 600 -i input.mp4 -vframes 1 -s 420x270 -filter:v 'yadif' output.png
Here is a function that i have made.
var im = require('imagemagick');
var args = [
"ffmpeg", "-ss", "600", "-i", "input.mp4", "-vframes", " 1", "-s", "420x270", "-filter:v", "'yadif'", "output.png"
];
// Function to convert and
im.convert(args, function(err)
if (err) throw err;
});
i try to create a video thumbnail with fluent-ffmpeg here is my code
var ffmpeg = require('fluent-ffmpeg');
exports.thumbnail = function(){
var proc = new ffmpeg({ source: 'Video/express2.mp4',nolog: true })
.withSize('150x100')
.takeScreenshots({ count: 1, timemarks: [ '00:00:02.000' ] }, 'Video/', function(err, filenames) {
console.log(filenames);
console.log('screenshots were saved');
});
}
but i keep getting this error
"mate data contains no duration, aborting screenshot creation"
any idea why,
by the way am on windows, and i put the ffmpeg folder in c/ffmpeg ,and i added the ffmpeg/bin in to my environment varableļ¼ i dont know if fluent-ffmpeg need to know the path of ffmpeg,but i can successfully create a thumbnail with the code below
exec("C:/ffmpeg/bin/ffmpeg -i Video/" + Name + " -ss 00:01:00.00 -r 1 -an -vframes 1 -s 300x200 -f mjpeg Video/" + Name + ".jpg")
please help me!!!
I think the issue can be caused by the .withSize('...') method call.
The doc says:
It doesn't interract well with filters. In particular, don't use the size() method to resize thumbnails, use the size option instead.
And the size() method is an alias of withSize().
Also - but this is not the problem in your case - you don't need to set either the count and the timemarks at the same time. The doc says:
count is ignored when timemarks or timestamps is specified.
Then you probably could solve with:
const ffmpeg = require('fluent-ffmpeg');
exports.thumbnail = function(){
const proc = new ffmpeg({ source: 'Video/express2.mp4',nolog: true })
.takeScreenshots({ timemarks: [ '00:00:02.000' ], size: '150x100' }, 'Video/', function(err, filenames) {
console.log(filenames);
console.log('screenshots were saved');
});
}
Have a look at the doc:
https://github.com/fluent-ffmpeg/node-fluent-ffmpeg#screenshotsoptions-dirname-generate-thumbnails
FFmpeg needs to know the duration of a video file, while most videos have this information in the file header some file don't, mostly raw videos like a raw H.264 stream.
A simple solution could be to remux the video prior to take the snapshot, the FFmpeg 0.5 command for this task it's quite simple:
ffmpeg -i input.m4v -acodec copy -vcodec copy output.m4v
This command tells FFmpeg to read the "input.m4v" file, to use the same audio encoder and video encoder (no encoding at all) for the output, and to output the data into the file output.m4v.
FFmpeg automatically adds all extra metadata/header information needed to take the snapshot later.
Try this code to create thumbnails from Video
// You have to Install Below packages First
var ffmpegPath = require('#ffmpeg-installer/ffmpeg').path;
var ffprobePath = require('#ffprobe-installer/ffprobe').path;
var ffmpeg = require('fluent-ffmpeg');
ffmpeg.setFfmpegPath(ffmpegPath);
ffmpeg.setFfprobePath(ffprobePath);
var proc = ffmpeg(sourceFilePath)
.on('filenames', function(filenames) {
console.log('screenshots are ' + filenames.join(', '));
})
.on('end', function() {
console.log('screenshots were saved');
})
.on('error', function(err) {
console.log('an error happened: ' + err.message);
})
// take 1 screenshots at predefined timemarks and size
.takeScreenshots({ count: 1, timemarks: [ '00:00:01.000' ], size: '200x200' }, "Video/");
I am building an app with node.js, I successfully uploaded the video, but I need to generate a video thumbnail for it. Currently I use node exec to execute a system command of ffmpeg to make the thumbnail.
exec("C:/ffmpeg/bin/ffmpeg -i Video/" + Name + " -ss 00:01:00.00 -r 1 -an -vframes 1 -f mjpeg Video/" + Name + ".jpg")
This code is coming from a tutorial from http://net.tutsplus.com/tutorials/javascript-ajax/how-to-create-a-resumable-video-uploade-in-node-js/
the code above did generate a jpg file but it's not a thumbnail but a video screen shot, I wonder is there any other method to generate video thumbnail, or how to exec the ffmpeg command to make a real thumbnail (resized), and I prefer png file.
Reference to GitHub fluent-ffmpeg project.
Repeating example from original StackOverflow answer:
var proc = new ffmpeg('/path/to/your_movie.avi')
.takeScreenshots({
count: 1,
timemarks: [ '600' ] // number of seconds
}, '/path/to/thumbnail/folder', function(err) {
console.log('screenshots were saved')
});
Resize by adding a -s widthxheight option to your command.
There is a node module for this:
video-thumb
It basically just wraps a call to exec ffmpeg
I recommend using https://www.npmjs.com/package/fluent-ffmpeg to call ffmpeg from Node.js
Using media-thumbnail, you can easily generate thumbnails from your videos. The module basically wraps the ffmpeg thumbnail functionality.
const mt = require('media-thumbnail')
mt.forVideo(
'./path/to/video.mp4',
'./path/to/thumbnail.png', {
width: 200
})
.then(() => console.log('Success'), err => console.error(err))
You can also create thumbnails from your images using this package.
Instead I would recommend using thumbsupply. In addition to provide you with thumbnails, it caches them to improve performance significantly.
npm install --save thumbsupply
After installing the module, you can use it in a following way.
const thumbsupply = require('thumbsupply')("com.example.application");
thumbsupply.generateThumbnail('some-video.mp4')
.then(thumb => {
// serve thumbnail
})
app.post('/convert', upload.any(), (req, res) => {
console.log("calling", req.files)
let thumbNailName = req.files[0].filename.split('.')
var gm = require('gm');
gm('./src/Upload/'+req.files[0].filename)// get pdf file from storage folder
.thumb(
50, // Width
50, // Height
'./src/thumbnail/'+thumbNailName[0]+'.png', // Output file name
80, // Quality from 0 to 100
function (error, stdout, stderr, command) {
if (!error) {
console.log("processing");
} else {
console.log("error")
}
}
);
})