Html5 video source with base64 - base64

I am trying to play html5 video encoded with base64 but its not working. It works without encoding. What is the problem?
var s = 'http://clips.vorwaerts-gmbh.de/VfE_html5.mp4';
var video = document.createElement('video');
document.body.appendChild(video);
video.src = "data:video/mp4;base64," + btoa(s);//not working
//video.src = s;//works
video.autoplay = true;
video.controls = true;

var s = 'http://clips.vorwaerts-gmbh.de/VfE_html5.mp4';
fetch(s).then((res) => res.blob()).then((blob) => {
const fileReader = new FileReader()
fileReader.readAsDataURL(blob)
fileReader.onload = () => {
const videoEl = document.createElement('video')
videoEl.src = fileReader.result
videoEl.controls = true
document.body.appendChild(videoEl)
}
})
You can do this, but it's not recommended. You should use an objectURL instead of a dataURL.

function displayVideo (blob){
var video = document.getElementById("video");
video.src = window.URL.createObjectURL(blob);
}

let reader;
reader = new FileReader();
return new Promise((resolve) => {
reader.readAsDataURL(files);
reader.onload = (data) => {
resolve(data.target.result);
};
});

Related

How to get Dicom lossless image from google cloud?

This is my code to get a dicom image from Google Cloud. It works well, but the image is lossy.
router.get("/dicomWebRetrieveInstance/dcmfile", async (req, res, next) => {
const writeFile = util.promisify(fs.writeFile);
emphasized textconst fileName = 'rendered_image.jpeg';
const cloudRegion = "us";
const projectId = "neurocaredicom";
const datasetId = "System_1";
const dicomStoreId = "Site_1A";
const studyUid = "1.2.276.0.7230010.3.1.2.296485376.1.1521713579.1849134";
const seriesUid = "1.2.276.0.7230010.3.1.3.2`enter code here`96485376.1.1521713580.1849651";
const instanceUid = "1.2.276.0.7230010.3.1.4.296485376.1.1521713580.1849652";
const parent = `projects/${projectId}/locations/${cloudRegion}/datasets/${datasetId}/dicomStores/${dicomStoreId}`;
const dicomWebPath = `studies/${studyUid}/series/${seriesUid}/instances/${instanceUid}/rendered`;
const request = {parent, dicomWebPath};
const rendered =
await healthcare.projects.locations.datasets.dicomStores.studies.series.instances.retrieveRendered(
request,
{
headers: "application/octet-stream; transfer-syntax=* ",
responseType: 'arraybuffer',
}
);
const fileBytes = Buffer.from(rendered.data);
await writeFile(fileName, fileBytes);
var options = {
root: path.join(__dirname),
};
// read binary data
var bitmap = fs.readFileSync(fileName);
// convert binary data to base64 encoded string
res.status(200).sendFile(fileName, options, function (err) {
if (err) {
next(err);
} else {
console.log(
`Retrieved rendered image and saved to ${fileName} in current directory`
);
}
});
});
Any solution to that problem would be appreciated, so anyone can help.

How can i make the chatbot (bot framework) send an attached file from any folder to the user (NodeJS)?

How can i make the chatbot send an attached file from any folder to the user ?
I have the code below but he doesn't work, he show anything.
Can you help me please.
const { TextPrompt, AttachmentPrompt } = require('botbuilder-dialogs');
constructor(luisRecognizer, bookingDialog) {
super('MainDialog');
this.addDialog(new TextPrompt('TextPrompt'))
.addDialog(new AttachmentPrompt('AttachmentPrompt'))
.addDialog(bookingDialog)
.addDialog(new WaterfallDialog(MAIN_WATERFALL_DIALOG, [
this.introStep.bind(this),
this.sendAttachmentStep.bind(this),
this.finalStep.bind(this)
]));
}
async sendAttachmentStep(stepContext) {
var base64Name = "Book1.xlsx";
var base64Type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
var base64Url = "http://localhost:49811/v3/attachments/.../views/original";
var att = await stepContext.prompt('AttachmentPrompt', {
name: base64Name,
contentType: base64Type,
contentUrl: base64Url,
});
var nex = await stepContext.next();
return {
att, nex
}
}
You just need to load the file as base64 in to your code:
var fs = require('fs');
function base64_encode(file) {
// read binary data
var bitmap = fs.readFileSync(file);
// convert binary data to base64 encoded string
return new Buffer(bitmap).toString('base64');
}
async sendAttachmentStep(stepContext) {
var base64Name = "Book1.xlsx";
var base64Type = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet";
var file = require(./<yourFile>);
var base64File = base64_encode(file);
var att = await stepContext.prompt('AttachmentPrompt', {
name: base64Name,
contentType: base64Type,
contentUrl: `data:${ base64Type };base64,${ base64File }`,
});
var nex = await stepContext.next();
return {
att, nex
}
}
I have find how to do it, I have used the package axios to have the data and after transform it in base64.
async attachmentsStep(stepContext, next) {
var activity = stepContext.context.activity;
if (activity.attachments && activity.attachments.length > 0) {
var attachment = activity.attachments[0];
var base64Url = attachment.contentUrl;
console.log(process.env.PATH);
var axios = require('axios');
var excel = await axios.get(base64Url, { responseType: 'arraybuffer' });
var base64str = Buffer.from(excel.data).toString('base64');
// base64str = 'data:' + base64Type + ';base64,' + base64str;
this.base64str = base64str;
var nex = await stepContext.next();
return {
base64str,
nex
};
}
}
thanks all of you for your response

Using PDFKit to store a PDF in S3 on the fly

I'm trying to create a pdf of some images (in datauri format) is nodejs and having the pdf stored in my S3. The return of the function is expected to provide the s3 URL of the file.
I'm using parse-server here for the server and node-canvas to create the canvas of the images and then PDFKit to create the pdf from canvas elements. (jsPdf didn't work out). Now I want this pdf to be sent to my s3 using the AWS-SDK and finally return the URL of the file. Below is my code that works till canvas generation. I don't know if pdf is created or not in the first place, even before being sent to s3. And oh! The entire thing is running on heroku.
Parse.Cloud.define('getBulkMeta',async (req)=>{
const PDFDocument = require('pdfkit'),
{Canvas,loadImage} = require('canvas');
try {
let baseImg = await loadImage('data:image/png;base64,'+req.params.labels[0]);
let labels = req.params.labels,
allCanvas = [],
rowH = baseImg.naturalHeight,
rowW = baseImg.naturalWidth,
perpage = req.params.size[1],
pages = Math.ceil(labels.length/perpage),
imgInd = 0,
g = 10;
size = req.params.size[0];
for(var p=0;p<pages;p++){
let canvas = new Canvas(rowW*((size=='A4')?2:1),rowH*((size=='A4')?2:1)),
ctx = canvas.getContext("2d");
ctx.beginPath();
ctx.rect(0,0,canvas.width,canvas.height)
ctx.fillStyle = "#fff";
ctx.fill();
if(perpage == 1){
let img = await loadImage('data:image/png;base64,'+labels[imgInd++]);
ctx.drawImage(img,g,g,rowW-(2*g),rowH-(2*g));
} else {
var thisImgInd = 0;
for (var r=0;r<2;r++){
for(var c=0;c<2;c++){
let img = await loadImage('data:image/png;base64,'+labels[imgInd++]);
ctx.drawImage(img,g+(c*(rowW-g/2)),g+(r*(rowH-g/2)),rowW-(1.5*g),rowH-(1.5*g));
thisImgInd++
if(thisImgInd>=perpage||imgInd>=labels.length){break;}
}
if(thisImgInd>=perpage||imgInd>=labels.length){break;}
}
}
allCanvas.push(canvas)
}
var thisPDF = new PDFDocument({layout: 'landscape',size:size});
var bcoded;
thisPDF.pipe(()=>{bcoded = new Buffer.from(thisPDF).toString('base64')});
allCanvas.forEach((c,i)=>{
if(i){thisPDF.addPage();}
thisPDF.image(c.toDataURL(),0,0,thisPDF.page.width,thisPDF.page.width);
})
thisPDF.end();
const S3_BUCKET = process.env.S3_BUCKET;
aws.config.region = process.env.AWS_REGION;
aws.config.signatureVersion = 'v4';
let s3 = new aws.S3();
let fileName = req.params.name;
let s3Params = {
Bucket: S3_BUCKET,
Body: bcoded,
Key: fileName,
ContentType : 'application/pdf',
ACL: 'public-read'
};
s3.putObject(s3Params, (err, data) => {
if(err){
console.log('\n\n\n\n\n\n\n'+err+'\n\n\n\n\n\n\n');
throw 'Error: '+ (err);
}
let returnData = {
signedRequest: data,
url: `https://${S3_BUCKET}.s3.amazonaws.com/${fileName}`
};
return (returnData);
})
} catch (e) {throw e;}
})
Update. I have got it to save the pdf file in s3 with the below code:
Parse.Cloud.define('getBulkMeta',async (req)=>{
const PDFDocument = require('pdfkit'),
{Canvas,loadImage} = require('canvas');
try {
let baseImg = await loadImage('data:image/png;base64,'+req.params.labels[0]);
let labels = req.params.labels,
allCanvas = [],
rowH = baseImg.naturalHeight,
rowW = baseImg.naturalWidth,
perpage = req.params.size[1],
pages = Math.ceil(labels.length/perpage),
imgInd = 0,
g = 10;
size = req.params.size[0];
for(var p=0;p<pages;p++){
let canvas = new Canvas(),
ctx = canvas.getContext("2d");
canvas.height = rowH*((size=='A4')?2:1);
canvas.width = rowW*((size=='A4')?2:1);
ctx.beginPath();
ctx.rect(0,0,canvas.width,canvas.height)
ctx.fillStyle = "#fff";
ctx.fill();
if(perpage == 1){
let img = await loadImage('data:image/png;base64,'+labels[imgInd++]);
ctx.drawImage(img,g,g,rowW-(2*g),rowH-(2*g));
} else {
var thisImgInd = 0;
for (var r=0;r<2;r++){
for(var c=0;c<2;c++){
let img = await loadImage('data:image/png;base64,'+labels[imgInd++]);
ctx.drawImage(img,g+(c*(rowW-g/2)),g+(r*(rowH-g/2)),rowW-(1.5*g),rowH-(1.5*g));
thisImgInd++
if(thisImgInd>=perpage||imgInd>=labels.length){break;}
}
if(thisImgInd>=perpage||imgInd>=labels.length){break;}
}
}
allCanvas.push(canvas)
}
var thisPDF = new PDFDocument({layout: 'landscape',size:size});
let buffers = [],pdfData,returnData='Hi';
thisPDF.on('data', buffers.push.bind(buffers));
thisPDF.on('end',() => {
pdfData = Buffer.concat(buffers);
const S3_BUCKET = process.env.S3_BUCKET;
aws.config.region = process.env.AWS_REGION;
aws.config.signatureVersion = 'v4';
let s3 = new aws.S3();
let fileName = req.params.name;
let s3Params = {
Bucket: S3_BUCKET,
Body: pdfData,
Key: (+new Date())+'-'+fileName,
ContentType : 'application/pdf',
ACL: 'public-read'
};
s3.putObject(s3Params,(err, data) => {
delete pdfData,thisPDF;
pdfData = null;thisPDF = null;
if(err){ throw 'Error: '+ (err); }
returnData = { signedRequest: data, url: `https://${S3_BUCKET}.s3.amazonaws.com/${fileName}` };
})
})
allCanvas.forEach((c,i)=>{
if(i){thisPDF.addPage();}
thisPDF.image(c.toDataURL(),0,0,{fit:[thisPDF.page.width,thisPDF.page.height]});
})
thisPDF.end();
return returnData;
} catch (e) {throw e;}
})
However, returnData always gives "Hi" as the output and it also appears the function isn't closing - Heroku throws memory exceeded error everytime.
Since you are using async functions to create the PDF and to send it to S3, your cloud function is returning before these operations are actually completed. That's why you always have Hi in your returnData var. You need to create a promise and await for this promise to finish in the end of these two operations. It should be something like this:
await (new Promise((resolve, reject) => {
var thisPDF = new PDFDocument({layout: 'landscape',size:size});
let buffers = [];
thisPDF.on('data', buffers.push.bind(buffers));
thisPDF.on('end',() => {
pdfData = Buffer.concat(buffers);
const S3_BUCKET = process.env.S3_BUCKET;
aws.config.region = process.env.AWS_REGION;
aws.config.signatureVersion = 'v4';
let s3 = new aws.S3();
let fileName = req.params.name;
let s3Params = {
Bucket: S3_BUCKET,
Body: pdfData,
Key: (+new Date())+'-'+fileName,
ContentType : 'application/pdf',
ACL: 'public-read'
};
s3.putObject(s3Params,(err, data) => {
delete pdfData,thisPDF;
pdfData = null;thisPDF = null;
if(err){ reject(err); }
returnData = { signedRequest: data, url: `https://${S3_BUCKET}.s3.amazonaws.com/${fileName}` };
resolve();
})
})
});
BTW, instead of using the AWS SDK, you could be using the Parse S3 Adapter and saving the PDF as a regular Parse file.

Invalid Character Base64 Firebase.Storage()

I have been trying to post an image to Firebase.Storage() for over a week now with no success. First using a blob/file which ended up not working due to Node.JS not having a blob type. Now with posting as base64. I have tried every fix I could find on here with no success. The error I am getting is:
Firebase Storage: String does not match format 'base64: Invalid character found
client side
handleImage = ( event ) => {
const target = event.target
const files = target.files
if (files.length > 0) {
const fileToLoad = files[0];
const fileReader = new FileReader();
fileReader.onload = event => {
const srcData = event.target.result; // data: base64
console.log(srcData)
let encodedImage = srcData.split(/,(.+)/)[1];
const options = {
file: encodedImage,
fileName: fileToLoad.name
}
axios.post(`${process.env.API_URL}/api/dashboard/post-image`, options)
}
fileReader.readAsDataURL(fileToLoad)
}
}
server side
router.post('/post-image', (req, res, next) => {
console.log(req.body.file)
const fileName = req.body.fileName
const message = req.body.file
const storageRef = firebase.app().storage().ref()
storageRef.child(`${fileName}`).putString(message, 'base64')
.then(snapshot => {
console.log(snapshot, 'uploaded image')
})
})
Any help would be greatly appreciated.

Create a animated Gif from an array of url images in NodeJs

I have an array of urls of image which could be of different type (png, jpg) and I would like with it to build a gif, all in nodeJS.
Since I'm very new to this language I'm struggling a bit with it, any leads, ideas ?
I looked at gifencoder
with this exemple :
var GIFEncoder = require('gifencoder');
var encoder = new GIFEncoder(854, 480);
var pngFileStream = require('png-file-stream');
var fs = require('fs');
pngFileStream('test/**/frame?.png')
.pipe(encoder.createWriteStream({ repeat: -1, delay: 500, quality: 10 }))
.pipe(fs.createWriteStream('myanimated.gif'));
What I get, it looks for png files maching the expression.
Since I have an array of urls of different types of images, should I use fs.createReadStream ?
Thanks
const GIFEncoder = require("gif-encoder-2");
const { createCanvas, Image } = require("canvas");
const { createWriteStream, readdir } = require("fs");
const { promisify } = require("util");
const path = require("path");
const fs = require("fs");
var outputDir = path.join(__dirname, "/output")
const createGif = async (fileName, frameURLs) => {
return new Promise(async resolve1 => {
try {
// find the width and height of the image
const [width, height] = await new Promise(resolve2 => {
const image = new Image();
image.onload = () => resolve2([image.width, image.height]);
image.src = frameURLs[0];
});
// base GIF filepath on which algorithm is being used
const dstPath = path.join(__dirname, "/output", `${fileName}.gif`);
// create a write stream for GIF data
const writeStream = createWriteStream(dstPath);
// when stream closes GIF is created so resolve promise
writeStream.on("close", () => {
resolve1(dstPath);
});
// encode with the neuquant algorithm
const encoder = new GIFEncoder(width, height, "neuquant");
// pipe encoder's read stream to our write stream
encoder.createReadStream().pipe(writeStream);
encoder.start();
encoder.setDelay(200);
const canvas = createCanvas(width, height);
const ctx = canvas.getContext("2d");
// draw an image for each file and add frame to encoder
const promisses = frames.map(frame => {
return new Promise(resolve3 => {
const image = new Image();
image.onload = () => {
ctx.drawImage(image, 0, 0);
encoder.addFrame(ctx);
resolve3();
};
image.src = frame; // set the frame url
});
});
// run all the frame promisses
await Promise.all(promisses);
// close the writing stream
writeStream.close();
} catch (error) {
console.error("error");
console.error(error);
throw error;
}
});
};

Resources