wasn't able to write the buffer stream into a newly created directory in nodeJs - node.js

I had to create a directory named stuff and write the stream into a text file inside the "stuff" directory. but, because the code in NodeJs doesn't execute in a serial fashion, I m getting an error which says the directory doesn't exist. any help would be appreciated. the code is pasted below.
var http = require("http");
var fs = require("fs");
var readStream = fs.createReadStream(__dirname+"/readMe.txt","UTF-8");
var writeStream;
fs.mkdir("stuff",function(){
writeStream = fs.createWriteStream(__dirname+"/stuff/writeMe.txt");
});
readStream.on("data",function(chunk){
writeStream.write(chunk);
});

When you call the folder creation function, it returns instantly. But the callback function is executed only when the folder has been created (or creation has failed.)
fs.mkdir("stuff",function(){
// executed after folder creation
writeStream = fs.createWriteStream(__dirname+"/stuff/writeMe.txt");
});
// executed instantly
In your current code, when you try to write to the folder, it has not been created yet, and writeStream is undefined.
So you need to write to the folder after it's been created:
fs.mkdir("stuff",function(){
writeStream = fs.createWriteStream(__dirname+"/stuff/writeMe.txt");
readStream.on("data",function(chunk){
writeStream.write(chunk);
});
});

Related

Create a duplicated file with fs streams and be able to read it

I'm currently trying to copy the content of a file into another one using Node.js through the fs.createWriteStream and fs.createReadStream functions.
To be more specific, the file is a music sample that I would like to duplicate. Also, I expect the resulting file to be readable by a player like any music or video. It's this last point that I don't manage to perform. The files are indeed duplicated, but the the resulting file is not accepted by my player as a readable file, like if it was corrupted somehow.
I checked its content and there it doesn't seem to be a matter a programmation logic, as the the datas of the original file have been correctly transposed into the copy. Here is my script, if you want to take a look.
const express = require('express')
const app = express()
const fs = require("fs")
var Promise = require("bluebird")
Promise.promisifyAll(fs)
const path = require('path')
const file1 = path.join(__dirname, 'sample1.wav') // The file to copy
const file2 = path.join(__dirname, 'sample2.wav') // The destination of the new file
app.use(async(req,res,next)=>{
let file1_stream = await fs.createReadStream(file1)
let file2_stream = await fs.createWriteStream(file2)
file2_stream.pipe(file2_stream)
next()
})
.listen(8080)
I guess the operation is not as simple as just copying a stream and inject it with a pipe like shown above. if someone has any idea what I am missing here, I am all ears. Thanks by advance.
That code is triggering an error, which you're probably not handling correctly, since you're using an async middleware on express.
Error [ERR_STREAM_CANNOT_PIPE]: Cannot pipe, not readable
You have to use .pipe on the readableStream not on the writeableStream
So the code should be:
file1_stream.pipe(file2_stream);
Also, you don't need to await on fs.createWriteStream. It's doing nothing. The promisify works on callbacks APIs, but createWriteStream & createReadStream don't take a callback as an argument.
app.use((req,res,next)=>{
let readStream = fs.createReadStream(file1);
let writeStream = fs.createWriteStream(file2);
readStream.pipe(writeStream);
// Call next once the file was actually copied
writeStream.on('finish', next);
writeStream.on('error', next); // go to express error handler
readStream.on('error', next);
});

returning result from another nodejs file

i am working on a project in which a nodejs program calls another program in a separate file.
this is how i've added the two:
var ocr = require('./index.js'); //this imports the file
var arr = ocr.ocr_pan(); //this calls the function in that file
am not sure but I guess the problem is that the process resumes before ocr.ocr_pan() returns the result and var arr becomes undefined.
or there is some problem in returning the result from ocr.ocr_pan()
I simply use return.
and I have also tried this : How to return array from module in NodeJS
didn't work
what more can be done?
Assuming that this file is the same directory as index.js file, code in index.js should be something like this:
// Write your function
var ocr_pan = function() {
// Do whatever you like
return result;
};
// Export it, make publicly visible to other files
module.exports = {
ocr_pan: ocr_pan
};

nodejs written file is empty

i have a small problem, when i try to copy one file from my tmp dir to my ftp dir the writen file is empty. I have no error, i don't understand what i'm doing wrong
var ftpPath = "/var/www/ftp/",
zipPath = "/var/www/tmp/",
file = "test";
fs.createReadStream(zipPath + file).pipe(fs.createWriteStream(ftpPath + file));
My test file contain loremipsum sample.
If you have any solution, i take it, this is the only line that bug in my app :(
First, make sure that the file /var/www/tmp/test exists, is a file, and has the right permissions for the user you start the script with.
Second, make sure that /var/www/ftp/ has writing permissions.
Then the following code should work :
var readerStream = fs.createReadStream('/var/www/tmp/test');
var writerStream = fs.createWriteStream('/var/www/ftp/test');
readerStream.pipe(writerStream);
Edit :
try debugging using this snippet :
var data;
var readerStream = fs.createReadStream('/var/www/tmp/test');
readerStream.on('data', function(data) {
data += data;
});
readerStream.on('end', function() {
console.log(data);
});

Unable to read a saved file in heroku

I am using NodeJS on heroku.
I read a file from another server and save it into my application in the /temp directory.
Next, I read the same file to pass it on to my client.
The code that saves the file and then subsequently reads it is:
http.request(options, function (pdfResponse) {
var filename = Math.random().toString(36).slice(2) + '.pdf',
filepath = nodePath.join(process.cwd(),'temp/' + filename);
pdfResponse.on('end', function () {
fs.readFile(filepath, function (err, contents) {
//Stuff to do after reading
});
});
//Read the response and save it directly into a file
pdfResponse.pipe(fs.createWriteStream(filepath));
});
This works well on my localhost.
However, when deployed to heroku, I get the following error:
events.js:72
throw er; // Unhandled 'error' event
Error: ENOENT, open '/app/temp/nvks0626yjf0qkt9.pdf'
Process exited with status 8
State changed from up to crashed
I am using process.cwd() to ensure that the path is correctly used. But even then it did not help. As per the heroku documentation, I am free to create files in the applications directory, which I am doing. But I can't figure out why this is failing to read the file...
The error you describe there is consistent with /app/temp/ not existing. You need to create it before you start writing in it. The idea is:
var fs = require("fs");
var path = require("path");
var temp_dir = path.join(process.cwd(), 'temp/');
if (!fs.existsSync(temp_dir))
fs.mkdirSync(temp_dir);
I've used the sync version of the calls for illustrations purposes only. This code should be part of the start up code for your app (instead of being called for each request) and how you should structure it depends on your specific application.

Why append rather than write when using knox / node.js to grab file from Amazon s3

I'm experimenting with the knox module for node.js as a way of managing some small files in an Amazon S3 bucket. Everything works fine stand-alone: I can upload a file, download a file, etc. However, I want to be able to download a file on recurring schedule. When I modify the code to run on an interval, I'm getting the downloaded file appending to the previous instance instead of overwriting.
I'm not sure if I've made a mistake in the file write code or in the knox handling code. I've tried several different write approaches (writeFile, writeStream, etc.) and I've looked at the knox source code. Nothing obvious to me stands out as a problem. Here's the code I'm using:
knox = require('knox');
fs = require('fs');
var downFile = DOWNFILE;
var downTxt = '';
var timer = INTERVAL;
var path = S3PATH + downFile;
setInterval(function()
{
var s3client = knox.createClient(
{
key: '********************',
secret: '**********************************',
bucket: '********'
});
s3client.get(path).on('response', function(response)
{
response.setEncoding('ascii');
response.on('data', function(chunk)
{
downTxt += chunk;
});
response.on('end', function()
{
fs.writeFileSync(downFile, downTxt, 'ascii');
});
}).end();
},
timer);
The problem is with your placement of var downTxt = '';. That is the only place you set downTxt to blank, so every time you retrieve more data, you add it to the data that you got in the previous request because you never clear the data from the previous request. The simplest fix is to move that line to just before the setEncoding line.
However, the way you are processing the data is unnecessarily complicated. Try something like this instead. You don't need to recreate the client every time, and setting the encoding will just break things if you are downloading non-text files, and it won't make a difference with text files. Next, you shouldn't manually collect the data, you can immediately start writing it to the file as you receive it. Lastly, since request is a standard stream, you don't need to monitor the 'data' event because you can just use pipe.
var knox = require('knox'),
fs = require('fs'),
downFile = DOWNFILE,
timer = INTERVAL,
path = S3PATH + downFile,
s3client = knox.createClient({
key: '********************',
secret: '**********************************',
bucket: '********'
});
(function downloadFile() {
var str = fs.createWriteStream(downFile);
s3client.get(path).pipe(str);
str.on('close', function() {
setTimeout(downloadFile, timer);
});
})();

Resources