I want to save an image to my node.js folder.
Save function:
function saveBrochure(brochure, image, file) {
return uploadBrochure(file).then(
data => {
uploadImage(image);
return brochureModel.addBrochure(brochure);
},
error => {
return error;
}
);
}
upload function:
function uploadImage(image) {
var path = 'my-path/' + image.filename;
fs.writeFile(path, image, function(err) {
if (err) {
console.log(err);
}
});
}
My image object looks like this
destination:"path"
encoding:"7bit"
fieldname:"image"
filename:"filename"
mimetype:"image/png"
originalname:"filename"
path:"path/filename.png"
size:155217
After the upload, I can see that a corrupted image file has been added to my project.
Any idea what I'm doing wrong?
Related
I have been trying to download image file using JS force for node js and able to create a file on local after retrieving data and converting it to base64 format but image if showing "file not supported message" whereas being able to download javascript type of file with correct data.
I am querying the attachment field of knowledge article in salesforce.
Following is my query :
SELECT Body__c, Attachment__Name__s, Attachment__ContentType__s, Attachment__Length__s, Attachment__Body__s, Id, KnowledgeArticleId, Title, UrlName FROM Knowledge__kav
I am sending GET request to Attachment__Body__s field of article.
Following is my node js code:
function createFile(attachmentBody,attachmntContentType,attachmntName){
var req = {
url: attachmentBody,
method: 'GET',
headers: {
"Content-Type": attachmntContentType
}
};
var test = conn.request(req, function(err, resp) {
if (err) {
console.log(err)
} else {
var fileBuffer=Buffer.from(resp, 'binary').toString('base64');
console.log('fileBuffer--- '+ fileBuffer);
fs.writeFile('./downloadedAttachments/'+attachmntName,fileBuffer,'base64', function(err){
if (err) throw err
console.log('File saved.')
})
}
});
}
Please help me with this.
I am successfully able to download the file in the correct format. following is my updated code :
function createFile(knbid,attachmntName,callback) {
var file_here = conn.sobject('Knowledge__kav').record(knbid);
file_here.retrieve(function (err, response) {
if (err) {
return console.error(err);
callback(0)
} else {
var obj = fs.createWriteStream('./downloadedAttachments/'+attachmntName, {defaultEncoding: 'binary'})
//console.log('blob--'+JSON.stringify(file_here.blob('Attachment__Body__s')));
var stream = file_here.blob('Attachment__Body__s').pipe(obj);
stream.on('finish', function (err, result) {
if (err)
console.log('not downloaded'+knbid);
else
console.log('downloaded-'+knbid);
})
}
});
}
I set up a website that basically uses Nodejs to fetch the image and after that sends it to ejs to display on the page, what happens is that sometimes the image appears and sometimes it looks like the website loads before the image can be loaded by the node.
I left the two ways I tried, one commented and the other that was the last one I tried.
This is app.js
function retornaImagens(id){
let imagens= {
assPacUrl: ''
}
/*
if (fs.existsSync(`${__dirname}\\arquivos\\consultas\\${id}\\assPac.png`)) {
fs.readFile(
`${__dirname}\\arquivos\\consultas\\${id}\\assPac.png`, 'base64',
(err, base64Image) => {
if (err) {
console.log(err)
} else {
imagens.assPacUrl = `data:image/png;base64, ${base64Image}`
}
}
)
}
*/
fs.access(`${__dirname}\\arquivos\\consultas\\${id}\\assPac.png`,(err)=>{
if(err){}else{
fs.readFile(
`${__dirname}\\arquivos\\consultas\\${id}\\assPac.png`, 'base64',
(err, base64Image) => {
if (err) {
console.log(err)
} else {
imagens.assPacUrl = `data:image/png;base64, ${base64Image}`
}
}
)
}
})
return imagens;
}
app.get('/consultas/:id/termo',(req,res)=>{
var imagens = retornaImagens(req.params.id);
Consulta.findOne({link:`/consultas/${req.params.id}/login`}).populate('medico paciente').exec((err,consulta)=>{
if(err){
console.log(err)
}else{
console.log(imagens)
res.render('consulta/termo',{consulta:consulta,id:req.params.id,imagens})
}
})
})
This is the ejs file
<img src="<%= imagens.assPacUrl %>">
If you have tips to make the code cleaner and consume less memory, please send me.
The problem was in the loading time, it was taking longer to load the image so the program continued and rendered the website empty, adding a settimeOut.
function enviaTermo(data){
Consulta.findOne({link:data.link}).populate('medico paciente').exec((err, consulta) => {
if (err) {
console.log(err)
} else {
console.log(imagens)
io.to(consulta._id).emit('termo', { consulta: consulta, imagens: imagens })
}
})
}
setTimeout(() => {
enviaTermo(data)
}, 450);
I am trying to read a pdf file from fs and send it through email using sendgrid.
My folder structure is like this
/
-src
--controllers
---travelplan.js
-pdf
In the travelplan.js if I do it like this
fs.readFile('pdf/204.pdf', function (err, data) {
if (err) {
console.log("THIS ERROR IS AWESOME", err)
}
})
everything works fine. No problem.
But if read it like this
let pdf_number = 204;
fs.readFile(`pdf/${pdf_number}.pdf`, function (err, data) {
if (err) {
console.log("THIS ERROR IS AWESOME", err)
}
})
This doesn't work. Pdf doesn't send correctly.
Then I tried this
let pdf_number = 204;
var pdf_path = path.join(__dirname, '..', 'pdf',pdf_number);
fs.readFile(pdf_path, function (err, data) {
if (err) {
console.log("THIS ERROR IS AWESOME", err)
}
})
This also doesn't work.
How do I read a pdf file by passing the pdf file name as an argument?
Can any one help me that why i got this issue I run this code locally it runs perfectly but at aws lambda i got this error even i increase the time over lambda time out function as well memory.
In this code i do a basic task for get call i just convert a xlsx to json and in post i just convert a test dir to zip file.I tried it from last few hrs for uploading at aws lambda now I am stuck and seeing continously this error can anyone help me out from this situation thanks in advance.
here is my code
index.js
"use strict"
const fs = require("fs");
const path = require("path");
const ctrlFuns = require("./functionality");
const output = fs.createWriteStream(path.join(__dirname,
"./assets/docs.zip"));
const archiver = require("archiver");
const zipArchive = archiver("zip", {
gzip: true,
zlib: {
level: 9
} // Sets the compression level.
});
exports.handleHttpRequest = function (event, context, callback) {
if (event.http_method == "GET") {
ctrlFuns.xlsxToJson().then((jsonObjs) => {
callback(null, {
users: jsonObjs,
});
}).catch((err) => {
callback(err);
});
}
else if (event.http_method == "POST") {
fs.readFile(path.join(__dirname + "/test/test.xlsx"), (err, file) => {
if (err) {
callback(err);
} else {
//pipe archive data to the file
zipArchive.pipe(output);
zipArchive.append(file, {
name: "test.xlsx",
prefix: "test-data" //used for folder name in zip file
});
// to catch this error explicitly
zipArchive.on("error", (err) => {
callback(err);
});
//to perform end tasks while zip converted
zipArchive.on("end", () => {
fs.readFile(path.join(__dirname + "/assets/docs.zip"), (err,
success) => {
if (err) {
callback(err);
} else {
callback(null, success.toString("base64"));
}
});
});
//filnalizing the zip file for user use
zipArchive.finalize();
}
});
}
else {
callback(null, "run default case");
}
} //handler-closes
here is my functionality.js
/**
* OBJECTIVE: TO CREATE THE FUNCTINALITY
*/
"use strict"
const XLSX = require("xlsx");
const fs = require("fs");
const path = require("path");
var ctrlFuns = {};
ctrlFuns.xlsxToJson = function () {
return new Promise((resolve, reject) => {
fs.readFile(path.join(__dirname + "/test/test.xlsx"), (err, file) => {
if (err) {
reject(err);
} else {
let workbook = XLSX.read(file.buffer, {
type: "buffer"
});
//if workbook is null
if (!workbook) {
reject("Workbook not found.");
}
/* Getting first workbook sheetName */
let first_sheet_name = workbook.SheetNames[0];
/* Get worksheet */
let worksheet = workbook.Sheets[first_sheet_name];
/**Convert Into JSON */
resolve(XLSX.utils.sheet_to_json(worksheet, {
raw: true
}));
}
});
})
} //fun-closes
module.exports = ctrlFuns;
when I saw the logs at cloud watch then i got:
START RequestId: 720cf48f-01c4-11e9-b715-9d54f664a1e8 Version: $LATEST
2018-12-17T06:24:45.756Z 720cf48f-01c4-11e9-b715-9d54f664a1e8 Error: EROFS: read-only file system, open '/var/task/assets/docs.zip'
END RequestId: 720cf48f-01c4-11e9-b715-9d54f664a1e8
with below error message:
{
"errorMessage": "RequestId: 98b9e509-01c7-11e9-94dc-03cfdf0dae93 Process exited before completing request"
}
The error seems self-explanatory:
Error: EROFS: read-only file system, open '/var/task/assets/docs.zip'
/var/task is where your Lambda function code is located, and in the actual Lambda environment, that filesystem is read-only. If you need to write to a file, you need to write to /tmp.
Q: What if I need scratch space on disk for my AWS Lambda function?
Each Lambda function receives 500MB of non-persistent disk space in its own /tmp directory.
https://aws.amazon.com/lambda/faqs/
Note that you also need to clean up after yourself and remove any temporary files you created, because once a function finishes executing, its container is available for reuse by a later invocation of the same function... which means this same temp space may persist for a short time and be seen again (but only by this same function).
I am trying to write an import script in Nodejs that pulls data from the web and formats it and then sends it to my API.
Part of that includes pulling artist data from LastFM, fetching the images for each artist and sending them off to my API to resize and save.
The import script is just ran in terminal.
The part of the import script that is responsible for pulling the images down and sending off to my API looks like:
_.forEach(artist.images, function(image){
console.log('uploading image to server ' + image.url)
request.get(image.url)
.pipe(request.post('http://MyAPI/files/upload', function(err, files){
if (err) {
console.log(err);
}
console.log('back from upload');
console.log(files);
}));
});
And the files.upload action looks like:
upload: function(req, res){
console.log('saving image upload');
console.log(req.file('image'));
res.setTimeout(0);
var sizes = [
['avatar', '280x210'],
['medium', '640x640'],
['large', '1024x768'],
['square', '100x100'],
['smallsquare', '50x50'],
['avatarsquare', '32x32']
];
//resize to the set dimensions
//for each dimension - save the output to gridfs
_.forEach(sizes, function(bucket){
var width = bucket[1, 0], height = bucket[1, 2];
// Let's create a custom receiver
var receiver = new Writable({objectMode: true});
receiver._write = function(file, enc, cb) {
gm(file).resize(width, height).upload({
adapter: require('skipper-gridfs'),
uri: 'mongodb://localhost:27017/sonatribe.' + bucket[0]
}, function (err, uploadedFiles) {
if (err){
return res.serverError(err);
}
else{
return res.json({
files: uploadedFiles,
textParams: req.params.all()
});
}
});
cb();
};
/* req.file('image').upload(receiver, function(err, files){
if(err) console.log(err);
console.log('returning files');
return files;
});*/
});
}
However, console.log(req.file('image')); is not what I'd hope - probably because this code is expecting the image to be uploaded as part of a multi-part form upload with a field named image - which it is not...
I'm trying to figure out how the file will end up inside my action but my google foo is completely out of action today and I'm fairly (very) new to Node.
Anyone able to offer some pointers?