I want to zip images using JSZip and NodeJS but it doesn't work, it works with simple file like .txt ... But with images it doesn't work and I don't know why...
My code :
var newFileName = pathDir + '/' + id + '.jpg';
fs.readFile(newFileName, function(err, data) {
zip.file(id+'.jpg', data, {base64: true});
});
Try:
var newFileName = pathDir + '/' + id + '.jpg';
var data = fs.readFileSync(newFileName);
zip.file(id+'.jpg', data, {base64: true});
In your case, you overwrite the id.jpg file of your zip instance using chunk data again and again...
// create a file
zip.file("hello.txt", "Hello[p my)6cxsw2q");
// oops, cat on keyboard. Fixing !
zip.file("hello.txt", "Hello World\n");
The content of hello.txt is "Hello World\n" rather than "Hello[p my)6cxsw2qHello World\n". Hope it helps.
Related
I am attempting to convert pdf files into a preview icon jpg
using the imagemagick library for NodeJS. I am trying to
generate a preview of only the first page (for multi-page pdfs).
In the normal command line imagemagick program this can be
done easily by saying "convert file.pdf[0] file.jpg"
where the [0] tells it to only convert the first page.
However I am not sure how to do that with this library.
I tried concatenating [0] to the filename, but it just
reads it as part of the real file name. Anyone know of
a way around this using this library?
I had a look around for a while and found this, but
they are not using this library.
Convert PDF to PNG Node.JS
The specific library I am using is located here: https://www.npmjs.com/package/imagemagick
The code I am working with is below:
let path = '/tmp/';
let pageNumber = '[0]';
let filePath = path + fileId + fileName + pageNumber;
let imgFilePath = path + fileId + '.jpg';
let writeStream = fs.createWriteStream(filePath);
writeStream.on('error',err => {
reject(err);
});
stream.pipe(writeStream);
im.convert([
filePath,
'-background','white',
'-alpha','remove',
'-resize','192x192',
'-quality','100',
imgFilePath
],
The problem is that you are concatenating the [0] part onto the filename before you do the conversion. You should concatenate the [0] within the scope of the convert function like this:
let path = '/tmp/';
let filePath = path + fileId + fileName;
let imgFilePath = path + fileId + '.jpg';
let writeStream = fs.createWriteStream(filePath);
writeStream.on('error',err => {
reject(err);
});
stream.pipe(writeStream);
im.convert([
filePath + '[0]',
'-background','white',
'-alpha','remove',
'-resize','192x192',
'-quality','100',
imgFilePath
],
This solution is tested working.
(new information below)
I am trying to set up a lambda function that reacts to uploaded tgz files by uncompressing them and writing the results back to S3. The unzip and untar work fine, but uploading to S3 fails:
/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:350
var buf = self.body.read(self.partSize - self.partBuffer.length) ||
^
TypeError: undefined is not a function
at ManagedUpload.fillStream (/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:350:25)
at Entry.<anonymous> (/Users/russell/lambda/gzip/node_modules/aws-sdk/lib/s3/managed_upload.js:167:28)
at Entry.emit (events.js:104:17)
at Entry._read (/Users/russell/lambda/gzip/node_modules/tar/lib/entry.js:123:12)
at Entry.end (/Users/russell/lambda/gzip/node_modules/tar/lib/entry.js:82:8)
at Parse._process (/Users/russell/lambda/gzip/node_modules/tar/lib/parse.js:107:13)
at BlockStream.<anonymous> (/Users/russell/lambda/gzip/node_modules/tar/lib/parse.js:47:8)
at BlockStream.emit (events.js:107:17)
at BlockStream._emitChunk (/Users/russell/lambda/gzip/node_modules/tar/node_modules/block-stream/block-stream.js:145:10)
at BlockStream.write (/Users/russell/lambda/gzip/node_modules/tar/node_modules/block-stream/block-stream.js:45:10)
This error occurs when I write to S3, but if instead I write the files locally to disk it works, so the pipeline is correct.
Here is code that demonstrates the problem:
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
var zlib = require('zlib');
var tar = require('tar');
var fstream = require('fstream');
fstream.Reader({'path': 'testdata.tar.gz'})
.pipe(zlib.Unzip())
.pipe(tar.Parse())
.on('entry', function(entry) {
var filename = entry.path;
console.log('got ' + entry.type + ' ' + filename);
if (entry.type == 'File') {
if (1) { // switch between working and nonworking cases
s3.upload({Bucket: 'my_bucket', Key: 'gunzip-test/' + filename, Body: entry}, {},
function(err, data) {
if (err)
console.log('ERROR!');
else
console.log('OK');
});
}
else {
entry.pipe(fstream.Writer({ 'path': '/tmp/mytest/' + filename }));
}
}
});
If the code is set to write to S3 it fails with the above error, if it writes the extracted files locally it succeeds. ENTRY is a stream, and according to the doc should be accepted in the upload Body parameter. I put a print statement in ManagedUpload, where the fail comes, and confirmed that self.body is a stream:
var stream = require('stream');
console.log('is it a stream? ' + ((self.body instanceof stream) ? 'yes' : 'no'));
console.log('self.body.read is ' + self.body.read);
returns
$ got File gunzip.js
is it a stream? yes
self.body.read is undefined
I'm pretty new with aws and node.js, so there could be a basic problem with this, but I've spent a day and haven't found it. I did the upload call with unzip instead of gzip and it worked (using lambda functions to unzip archives in S3 is really sloooooow) Can anyone point me at something I am doing wrong in this code?
Thanks
I think I understand this a little better. I broke the pipeline up into pieces and looked at each one. The problem is that tar.Parse uses fstream and not stream. If I look at the return of the .pipe(tar.Parse()) statement it is a stream, but it is not a stream.Readable or a stream.Writable. fstream does not define a read() method (its reader is based on Stream, it is not a stream.Readable), so tar.Parse, which is based on Stream, does not have one either.
So a refinement of the question is, is this a bug in fstream, or is fstream not intended to be a stream? I think it is a bug - from the README:
"Like FS streams, but with stat on them, and supporting directories and
symbolic links, as well as normal files. Also, you can use this to set
the stats on a file, even if you don't change its contents, or to create
a symlink, etc."
In my case running the stream through stream.PassThrough helped.
var PassThrough = require('stream').PassThrough;
var stream = getStreamSomeHow();
var passthrough = new PassThrough();
stream.pipe(passthrough);
s3.upload({...,Body:passthrough}) //
Your body variable is a Stream object, in which case you will need to use .toString()
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
var zlib = require('zlib');
var tar = require('tar');
var fstream = require('fstream');
fstream.Reader({'path': 'testdata.tar.gz'})
.pipe(zlib.Unzip())
.pipe(tar.Parse())
.on('entry', function(entry) {
var filename = entry.path;
console.log('got ' + entry.type + ' ' + filename);
if (entry.type == 'File') {
if (1) { // switch between working and nonworking cases
s3.upload({Bucket: 'my_bucket', Key: 'gunzip-test/' + filename, Body: entry.toString()}, {},
function(err, data) {
if (err)
console.log('ERROR!');
else
console.log('OK');
});
}
else {
entry.pipe(fstream.Writer({ 'path': '/tmp/mytest/' + filename }));
}
}
});
i have a small problem, when i try to copy one file from my tmp dir to my ftp dir the writen file is empty. I have no error, i don't understand what i'm doing wrong
var ftpPath = "/var/www/ftp/",
zipPath = "/var/www/tmp/",
file = "test";
fs.createReadStream(zipPath + file).pipe(fs.createWriteStream(ftpPath + file));
My test file contain loremipsum sample.
If you have any solution, i take it, this is the only line that bug in my app :(
First, make sure that the file /var/www/tmp/test exists, is a file, and has the right permissions for the user you start the script with.
Second, make sure that /var/www/ftp/ has writing permissions.
Then the following code should work :
var readerStream = fs.createReadStream('/var/www/tmp/test');
var writerStream = fs.createWriteStream('/var/www/ftp/test');
readerStream.pipe(writerStream);
Edit :
try debugging using this snippet :
var data;
var readerStream = fs.createReadStream('/var/www/tmp/test');
readerStream.on('data', function(data) {
data += data;
});
readerStream.on('end', function() {
console.log(data);
});
On my Express server I want to take the file uploaded by the user and rename it to match their username. If the username uploads a new file, the previous file is replaced.
Here's the code:
var newPath = 'uploads/' + user.username + '.' + (file.extension).toLowerCase();
var basePath = path.resolve(__dirname + '../../../') + '/';
// Copy, rename and delete temp file
var is = fs.createReadStream(basePath + file.path);
var os = fs.createWriteStream(basePath + newPath);
is.pipe(os);
is.on('end', function (error) {
if (err) return res.send(500);
fs.unlink(basePath + file.path);
});
Problem is that fs.unlink(basePath + file.path); doesn't actually delete the old file on my machine (OSX 10.9.2). How can i make sure the temp file is deleted?
The file basePath + file.path has link reference by the read stream is. The removal of the file contents shall be postponed until all references to the file are closed. You might want to call fs.unlink on the close event.
I just use fs.writeFile which overwrites a file. I have code in my app to this synchronously, but it looks like this:
if( !fs.existsSync( filename ) ) fs.openSync( filename, "wx" );
fs.writeFileSync( filename, data );
Basically I check if the file exists, and if it doesn't I open it. Then, I just write to the file. If it is new or already existing, only my data is present in the file when I look at it, overwriting what was there.
I'd like to upload files on my server and name them according to their contents. This should be simple (it is in python), but I am having a hard time figuring out how to do it in Node.js.
I am using express and connect-form, which really just uses formidable. I also see that node has a library called crypto that is very similar to python's hashlib. Now I just need to understand how to stream the temp file connect-form gives me and hash it.
This is a Python/Flask(ish) implementation of what I'd like to do.
import hashlib
from Flask import request
def upload():
file = request.files['file']
hash = hashlib.sha256()
name, ext = file.filename.rsplit('.', 1)
try:
for chunk in file.chunks()
hash.update(chunk)
finally:
file.seek(0)
new_name = "%s.%s" % (hash.hexdigest(),ext)
file.save(os.path.join(UPLOAD_DIR, new_name))
I have seen a lot of these toy answers that just print out the file's name, but none that actually read and write the data.
Here it is in coffee-script in all its glory.
app.post '/upload', (request, response, next) ->
request.form.complete (error, fields, files) ->
if error
next error
else
file = files.file
[kind, extension] = file.type.split '/'
hash = crypto.createHash 'sha256'
stream = fs.createReadStream file.path,
encoding:'binary'
stream.addListener 'data', (chunk) ->
hash.update chunk
stream.addListener 'close', ->
digest = hash.digest 'hex'
new_filename = "#{digest}.#{extension}"
new_path = "#{UPLOAD_DIR}/#{new_filename}"
fs.rename file.path, new_path
response.end new_filename
Rather than hacking the hash calculation into formidable, which would likely be more efficient but far more complicated, I opted to just re-read the file from its temporary location and hash that. Then instead of pumping it like in other examples, I just renamed the temp file into its new location.
you can save a file like this:
var fs = require('fs'),
util = require('util'),
crypto = require('crypto');
// ...
req.form.complete(function (err, fields, files) {
// ...
var ext = files['content[media]']['filename'].split('.');
ext = ext[ext.length-1];
ext = ext.toLowerCase();
var newFileName = req['connection']['remoteAddress'] + req['connection']['remotePort'] + Date.now();
newFileName = crypto.createHash('md5').update(newFileName).digest("hex");
newFileName += '.' + ext;
var is = fs.createReadStream(files['content[media]']['path']);
var os = fs.createWriteStream(app.set('dataDir') + '/' + newFileName);
// copy file to public folder
util.pump(is, os, function(error) {
if (error) {
console.log("Error copying file to public ... " + error);
res.redirect("back");
return;
}
else {
// delete temp file
fs.unlinkSync(files['content[media]']['path']);
res.redirect('....');
}
});
});