How to gzip a directory/folder using pako module in Nodejs? - node.js

I am trying to gzip my folder with the help of Pako library. I couldn't found any related content about it. Can someone explain me how to use pako to gzip directory. I am using it in my lambda function along with EFS.
let bundle = fs.readdirSync(tempDir);
let zippedFile = pako.gzip(bundle);
My folder location looks like this data/temp/
Error
TypeError: strm.input.subarray is not a function

You can use fs.readdirSync() to list files in the directory, then check the stat of each file before compressing it using fs.lstatSync(). I have tested this locally on my mac.
Then you can write the compressed file to your file system.
const fs = require('fs');
const pako = require('pako');
let files = fs.readdirSync('/tmp/');
let fileContent = '';
files.forEach(file => {
let path = '/tmp/' + file;
let stats = fs.lstatSync(path);
if (stats.isFile()) {
let data = fs.readFileSync(path);
fileContent += data;
}
});
let zippedFile = pako.gzip(fileContent);
const stream = fs.createWriteStream('/tmp/dir.gz');
stream.write(zippedFile);
stream.end();

Related

readFileSync not returning buffer when giving dynamic filename but giving result when static filename is given

readFileSync is not giving any response if I pass filename dynamic but if I pass it to statically it will give response.
function base64_encode(file) {
const fs = require('fs');
let bitmap = fs.readFileSync(file);
return Buffer.from(bitmap).toString('base64');
}
let data_base64 = await base64_encode(process.cwd() + '/public/receipt/' + data.file_name);
That code is working well for me. I'd suggest adding some logging to try and understand what is going wrong, I'd add the lines below.
const fileName = process.cwd() + '/public/receipt/' + data.file_name;
console.log("File name:", fileName);
console.log("File exists:", require('fs').existsSync(fileName));
console.log("File stat:", require('fs').statSync(fileName));
let data_base64 = await base64_encode(fileName);
console.log("Base64:", data_base64);

Generating a json for a icon cheatsheet

I'm trying to generate a json file containing the filenames of all the files in a certain directory. I need this to create a cheatsheet for icons.
Currently I'm trying to run a script locally via terminal, to generate the json. That json will be the input for a react component that will display icons. That component works, the create json script doesn't.
Code for generating the json
const fs = require('fs');
const path = require('path');
/**
* Create JSON file
*/
const CreateJson = () => {
const files = [];
const dir = '../icons';
fs.readdirSync(dir).forEach(filename => {
const name = path.parse(filename);
const filepath = path.resolve(dir, filename);
const stat = fs.statSync(filepath);
const isFile = stat.isFile();
if (isFile) files.push({ name });
});
const data = JSON.stringify(files, null, 2);
fs.writeFileSync('../Icons.json', data);
};
module.exports = CreateJson;
I run it in terminal using
"create:json": "NODE_ENV=build node ./scripts/CreateJson.js"
I expect a json file to be created/overridden. But terminal returns:
$ NODE_ENV=build node ./scripts/CreateJson.js
✨ Done in 0.16s.
Any pointers?
You are creating a function CreateJson and exporting it, but you are actually never calling it.
You can get rid of the module.exports and replace it with CreateJson().
When you'll execute the file with node, it will see the function declaration, and a call to it, whereas with your current code there is no call.

Some files missing in zip archive

I am adding pdf files created by wkhtmptopdf to archiver.My purpose is to create a zip file of all the pdf's.I am posting the zip to s3 bucket. BUt in the bucket some files are missing in the zip file. Randomly out of all files some files are empty in the zip.
for (var i = 1; i <= dataCount; i++) {
var output_filename = 'testing' +'_'+i+'.pdf';
const html = "sdfsdfsdf";
var obg= wkhtmltopdf(html, wkhtmltopdfOptions);
var results = [];
archive.append(obg, { name: output_filename });
} // closing the for loop
archive.finalize();

Compress an uncompressed xlsx file using node.js (Electron)

I have an unzipped xlsx file, in it I edit some files to be able to generate a new xlsx file containing new data.
In linux to recompress the file in xlsx I just need to go into the terminal and type
find . -type f | xargs zip ../newfile.xlsx
into the folder where the xlsx files are.
The question now is how can I do this using node.js?
The solution is to compress a direct list of files contained in xlsx, for some reason if we try to compress the folder the file has corrupted.
The code looks like this if you use JSZIP
var fs = require('fs');
var JSZip = require("jszip");
var zip = new JSZip();
var file = [];
file.push("_rels/.rels");
file.push("docProps/core.xml");
file.push("docProps/app.xml");
file.push("docProps/custom.xml");
file.push("[Content_Types].xml");
file.push("xl/_rels/workbook.xml.rels");
file.push("xl/styles.xml");
file.push("xl/pivotTables/_rels/pivotTable3.xml.rels");
file.push("xl/pivotTables/_rels/pivotTable1.xml.rels");
file.push("xl/pivotTables/_rels/pivotTable2.xml.rels");
file.push("xl/pivotTables/pivotTable3.xml");
file.push("xl/pivotTables/pivotTable1.xml");
file.push("xl/pivotTables/pivotTable2.xml");
file.push("xl/workbook.xml");
file.push("xl/worksheets/_rels/sheet2.xml.rels");
file.push("xl/worksheets/_rels/sheet1.xml.rels");
file.push("xl/worksheets/_rels/sheet3.xml.rels");
file.push("xl/worksheets/sheet4.xml");
file.push("xl/worksheets/sheet1.xml");
file.push("xl/worksheets/sheet3.xml");
file.push("xl/worksheets/sheet2.xml");
file.push("xl/sharedStrings.xml");
file.push("xl/pivotCache/_rels/pivotCacheDefinition1.xml.rels");
file.push("xl/pivotCache/pivotCacheDefinition1.xml");
file.push("xl/pivotCache/pivotCacheRecords1.xml");
for (var i = 0; i < file.length; i++) {
zip.file(file[i], fs.readFileSync("/home/user/xlsx_FILES/"+file[i]));
}
zip.generateAsync({type:"blob"}).then(function(content) {
// see FileSaver.js
saveAs(content, "yourfile.xlsx");
});
Take a look at archiver, a compression library for nodejs. The docs for the library look like they are comprehensive. The library also allows you to append archives and take advantage of streaming api's for appending and creating new archives.
Here is an example snippet from their docs which shows how to use the library.
// require modules
var fs = require('fs');
var archiver = require('archiver');
// create a file to stream archive data to.
var output = fs.createWriteStream(__dirname + '/example.zip');
var archive = archiver('zip', {
store: true // Sets the compression method to STORE.
});
// listen for all archive data to be written
output.on('close', function() {
console.log(archive.pointer() + ' total bytes');
console.log('archiver has been finalized and the output file descriptor has closed.');
});
// good practice to catch this error explicitly
archive.on('error', function(err) {
throw err;
});
// pipe archive data to the file
archive.pipe(output);

How do you upload, stream, and hash a file's contents in Node.js?

I'd like to upload files on my server and name them according to their contents. This should be simple (it is in python), but I am having a hard time figuring out how to do it in Node.js.
I am using express and connect-form, which really just uses formidable. I also see that node has a library called crypto that is very similar to python's hashlib. Now I just need to understand how to stream the temp file connect-form gives me and hash it.
This is a Python/Flask(ish) implementation of what I'd like to do.
import hashlib
from Flask import request
def upload():
file = request.files['file']
hash = hashlib.sha256()
name, ext = file.filename.rsplit('.', 1)
try:
for chunk in file.chunks()
hash.update(chunk)
finally:
file.seek(0)
new_name = "%s.%s" % (hash.hexdigest(),ext)
file.save(os.path.join(UPLOAD_DIR, new_name))
I have seen a lot of these toy answers that just print out the file's name, but none that actually read and write the data.
Here it is in coffee-script in all its glory.
app.post '/upload', (request, response, next) ->
request.form.complete (error, fields, files) ->
if error
next error
else
file = files.file
[kind, extension] = file.type.split '/'
hash = crypto.createHash 'sha256'
stream = fs.createReadStream file.path,
encoding:'binary'
stream.addListener 'data', (chunk) ->
hash.update chunk
stream.addListener 'close', ->
digest = hash.digest 'hex'
new_filename = "#{digest}.#{extension}"
new_path = "#{UPLOAD_DIR}/#{new_filename}"
fs.rename file.path, new_path
response.end new_filename
Rather than hacking the hash calculation into formidable, which would likely be more efficient but far more complicated, I opted to just re-read the file from its temporary location and hash that. Then instead of pumping it like in other examples, I just renamed the temp file into its new location.
you can save a file like this:
var fs = require('fs'),
util = require('util'),
crypto = require('crypto');
// ...
req.form.complete(function (err, fields, files) {
// ...
var ext = files['content[media]']['filename'].split('.');
ext = ext[ext.length-1];
ext = ext.toLowerCase();
var newFileName = req['connection']['remoteAddress'] + req['connection']['remotePort'] + Date.now();
newFileName = crypto.createHash('md5').update(newFileName).digest("hex");
newFileName += '.' + ext;
var is = fs.createReadStream(files['content[media]']['path']);
var os = fs.createWriteStream(app.set('dataDir') + '/' + newFileName);
// copy file to public folder
util.pump(is, os, function(error) {
if (error) {
console.log("Error copying file to public ... " + error);
res.redirect("back");
return;
}
else {
// delete temp file
fs.unlinkSync(files['content[media]']['path']);
res.redirect('....');
}
});
});

Resources