Can I configure console.log so that the logs are written on a file instead of being printed in the console?
You could also just overload the default console.log function:
var fs = require('fs');
var util = require('util');
var log_file = fs.createWriteStream(__dirname + '/debug.log', {flags : 'w'});
var log_stdout = process.stdout;
console.log = function(d) { //
log_file.write(util.format(d) + '\n');
log_stdout.write(util.format(d) + '\n');
};
Above example will log to debug.log and stdout.
Edit: See multiparameter version by Clément also on this page.
Update 2013 - This was written around Node v0.2 and v0.4; There are much better utilites now around logging. I highly recommend Winston
Update Late 2013 - We still use winston, but now with a logger library to wrap the functionality around logging of custom objects and formatting. Here is a sample of our logger.js https://gist.github.com/rtgibbons/7354879
Should be as simple as this.
var access = fs.createWriteStream(dir + '/node.access.log', { flags: 'a' })
, error = fs.createWriteStream(dir + '/node.error.log', { flags: 'a' });
// redirect stdout / stderr
proc.stdout.pipe(access);
proc.stderr.pipe(error);
If you are looking for something in production winston is probably the best choice.
If you just want to do dev stuff quickly, output directly to a file (I think this works only for *nix systems):
nohup node simple-server.js > output.log &
I often use many arguments to console.log() and console.error(), so my solution would be:
var fs = require('fs');
var util = require('util');
var logFile = fs.createWriteStream('log.txt', { flags: 'a' });
// Or 'w' to truncate the file every time the process starts.
var logStdout = process.stdout;
console.log = function () {
logFile.write(util.format.apply(null, arguments) + '\n');
logStdout.write(util.format.apply(null, arguments) + '\n');
}
console.error = console.log;
Winston is a very-popular npm-module used for logging.
Here is a how-to.
Install winston in your project as:
npm install winston --save
Here's a configuration ready to use out-of-box that I use frequently in my projects as logger.js under utils.
/**
* Configurations of logger.
*/
const winston = require('winston');
const winstonRotator = require('winston-daily-rotate-file');
const consoleConfig = [
new winston.transports.Console({
'colorize': true
})
];
const createLogger = new winston.Logger({
'transports': consoleConfig
});
const successLogger = createLogger;
successLogger.add(winstonRotator, {
'name': 'access-file',
'level': 'info',
'filename': './logs/access.log',
'json': false,
'datePattern': 'yyyy-MM-dd-',
'prepend': true
});
const errorLogger = createLogger;
errorLogger.add(winstonRotator, {
'name': 'error-file',
'level': 'error',
'filename': './logs/error.log',
'json': false,
'datePattern': 'yyyy-MM-dd-',
'prepend': true
});
module.exports = {
'successlog': successLogger,
'errorlog': errorLogger
};
And then simply import wherever required as this:
const errorLog = require('../util/logger').errorlog;
const successlog = require('../util/logger').successlog;
Then you can log the success as:
successlog.info(`Success Message and variables: ${variable}`);
and Errors as:
errorlog.error(`Error Message : ${error}`);
It also logs all the success-logs and error-logs in a file under logs directory date-wise as you can see here.
const fs = require("fs");
const {keys} = Object;
const {Console} = console;
/**
* Redirect console to a file. Call without path or with false-y
* value to restore original behavior.
* #param {string} [path]
*/
function file(path) {
const con = path ? new Console(fs.createWriteStream(path)) : null;
keys(Console.prototype).forEach(key => {
if (path) {
this[key] = (...args) => con[key](...args);
} else {
delete this[key];
}
});
};
// patch global console object and export
module.exports = console.file = file;
To use it, do something like:
require("./console-file");
console.file("/path/to.log");
console.log("write to file!");
console.error("also write to file!");
console.file(); // go back to writing to stdout
For simple cases, we could redirect the Standard Out (STDOUT) and Standard Error (STDERR) streams directly to a file(say, test.log) using '>' and '2>&1'
Example:
// test.js
(function() {
// Below outputs are sent to Standard Out (STDOUT) stream
console.log("Hello Log");
console.info("Hello Info");
// Below outputs are sent to Standard Error (STDERR) stream
console.error("Hello Error");
console.warn("Hello Warning");
})();
node test.js > test.log 2>&1
As per the POSIX standard, 'input', 'output' and 'error' streams are identified by the positive integer file descriptors (0, 1, 2). i.e., stdin is 0, stdout is 1, and stderr is 2.
Step 1: '2>&1' will redirect from 2 (stderr) to 1 (stdout)
Step 2: '>' will redirect from 1 (stdout) to file (test.log)
If this is for an application, you're probably better off using a logging module. It'll give you more flexibility. Some suggestions.
winston https://github.com/winstonjs/winston
log4js https://github.com/nomiddlename/log4js-node
Straight from nodejs's API docs on Console
const output = fs.createWriteStream('./stdout.log');
const errorOutput = fs.createWriteStream('./stderr.log');
// custom simple logger
const logger = new Console(output, errorOutput);
// use it like console
const count = 5;
logger.log('count: %d', count);
// in stdout.log: count 5
Another solution not mentioned yet is by hooking the Writable streams in process.stdout and process.stderr. This way you don't need to override all the console functions that output to stdout and stderr. This implementation redirects both stdout and stderr to a log file:
var log_file = require('fs').createWriteStream(__dirname + '/log.txt', {flags : 'w'})
function hook_stream(stream, callback) {
var old_write = stream.write
stream.write = (function(write) {
return function(string, encoding, fd) {
write.apply(stream, arguments) // comments this line if you don't want output in the console
callback(string, encoding, fd)
}
})(stream.write)
return function() {
stream.write = old_write
}
}
console.log('a')
console.error('b')
var unhook_stdout = hook_stream(process.stdout, function(string, encoding, fd) {
log_file.write(string, encoding)
})
var unhook_stderr = hook_stream(process.stderr, function(string, encoding, fd) {
log_file.write(string, encoding)
})
console.log('c')
console.error('d')
unhook_stdout()
unhook_stderr()
console.log('e')
console.error('f')
It should print in the console
a
b
c
d
e
f
and in the log file:
c
d
For more info, check this gist.
Overwriting console.log is the way to go. But for it to work in required modules, you also need to export it.
module.exports = console;
To save yourself the trouble of writing log files, rotating and stuff, you might consider using a simple logger module like winston:
// Include the logger module
var winston = require('winston');
// Set up log file. (you can also define size, rotation etc.)
winston.add(winston.transports.File, { filename: 'somefile.log' });
// Overwrite some of the build-in console functions
console.error = winston.error;
console.log = winston.info;
console.info = winston.info;
console.debug = winston.debug;
console.warn = winston.warn;
module.exports = console;
If you're using linux, you can also use output redirection. Not sure about Windows.
node server.js >> file.log 2>> file.log
>> file.log to redirect stdout to the file
2>> file.log to redirect stderr to the file
others use the shorthand &>> for both stdout and stderr but it's not accepted by both my mac and ubuntu :(
extra: > overwrites, while >> appends.
By the way, regarding NodeJS loggers, I use pino + pino-pretty logger
METHOD STDOUT AND STDERR
This approach can help you (I use something similar in my projects) and works for all methods including console.log, console.warn, console.error, console.info
This method write the bytes written in stdout and stderr to file. Is better than changing console.log, console.warn, console.error, console.info methods, because output will be exact the same as this methods output
var fs= require("fs")
var os= require("os")
var HOME= os.homedir()
var stdout_r = fs.createWriteStream(HOME + '/node.stdout.log', { flags: 'a' })
var stderr_r = fs.createWriteStream(HOME + '/node.stderr.log', { flags: 'a' })
var attachToLog= function(std, std_new){
var originalwrite= std.write
std.write= function(data,enc){
try{
var d= data
if(!Buffer.isBuffer(d))
d= Buffer.from(data, (typeof enc === 'string') ? enc : "utf8")
std_new.write.apply(std_new, d)
}catch(e){}
return originalwrite.apply(std, arguments)
}
}
attachToLog(process.stdout, stdout_r)
attachToLog(process.stderr, stderr_r)
// recommended catch error on stdout_r and stderr_r
// stdout_r.on("error", yourfunction)
// stderr_r.on("error", yourfunction)
Adding to the answer above, a lit bit of an expansion to the short and efficient code overriding console.log. Minor additions: set filename with date, wrapper function, also do the original console.logging to keep the console active with the info.
Usage: in the beginning of your code, run setConsoleLogToFile([FILENAME]).
const fs = require("fs"),
util = require('util');
const getPrettyDate = ()=> new Date().toString().replace(":","-").replace(/00\s\(.*\)/, "").replace(` ${new Date().getFullYear()}`, ",").replace(/:\d\d\s/, " ");
module.exports.getPrettyDate = getPrettyDate;
module.exports.setConsoleLogToFile = (filename) => {
const log_file = fs.createWriteStream(`${__dirname}/${filename} - ${getPrettyDate()}.log`, { flags: 'w' }),
log_stdout = process.stdout;
const origConsole = console.log;
console.log = (d) => {
origConsole(d);
log_file.write(util.format(d) + '\n');
log_stdout.write(util.format(d) + '\n');
};
}
Most logger is overkill and does not support the build in console.log correctly. Hence I create console-log-to-file:
import { consoleLogToFile } from "console-log-to-file";
// or `const { consoleLogToFile } = require("console-log-to-file/dist/index.cjs.js")`
consoleLogToFile({
logFilePath: "/log/default.log",
});
// all of your console.log/warn/error/info will work as it does and save to file now.
If you are looking for a solution without modifying any code, here is a simple solution.
It requires pm2, just add it to your node modules and start you app with
pm2 start server.js
And you are done, console.logs are now automatically registered under home/.pm2/logs/server-out.log.
Improve on Andres Riofrio , to handle any number of arguments
var fs = require('fs');
var util = require('util');
var log_file = fs.createWriteStream(__dirname + '/debug.log', {flags : 'w'});
var log_stdout = process.stdout;
console.log = function(...args) {
var output = args.join(' ');
log_file.write(util.format(output) + '\r\n');
log_stdout.write(util.format(output) + '\r\n');
};
You can now use Caterpillar which is a streams based logging system, allowing you to log to it, then pipe the output off to different transforms and locations.
Outputting to a file is as easy as:
var logger = new (require('./').Logger)();
logger.pipe(require('fs').createWriteStream('./debug.log'));
logger.log('your log message');
Complete example on the Caterpillar Website
You can also have a look at this npm module:
https://www.npmjs.com/package/noogger
noogger
simple and straight forward...
For future users. #keshavDulal answer doesn't work for latest version. And I couldn't find a proper fix for the issues that are reporting in the latest version 3.3.3.
Anyway I finally fixed it after researching a bit. Here is the solution for winston version 3.3.3
Install winston and winston-daily-rotate-file
npm install winston
npm install winston-daily-rotate-file
Create a new file utils/logger.js
const winston = require('winston');
const winstonRotator = require('winston-daily-rotate-file');
var logger = new winston.createLogger({
transports: [
new (winston.transports.DailyRotateFile)({
name: 'access-file',
level: 'info',
filename: './logs/access.log',
json: false,
datePattern: 'yyyy-MM-DD',
prepend: true,
maxFiles: 10
}),
new (winston.transports.DailyRotateFile)({
name: 'error-file',
level: 'error',
filename: './logs/error.log',
json: false,
datePattern: 'yyyy-MM-DD',
prepend: true,
maxFiles: 10
})
]
});
module.exports = {
logger
};
Then in any file where you want to use logging import the module like
const logger = require('./utils/logger').logger;
Use logger like the following:
logger.info('Info service started');
logger.error('Service crashed');
if you are using forever to keep your node app running, then typing forever list will show you the path to the log file that console.log is writing too
You can use the nodejs Console constructor
const mylog = new console.Console(
fs.createWriteStream("log/logger.log"),
fs.createWriteStream("log/error.log")
);
And then you can use it just like the normal console class, eg:
mylog.log("Ok!"); // Will be written into 'log/logger.log'
mylog.error("Bad!"); // Will be written into 'log/error.log'
I took on the idea of swapping the output stream to a my stream.
const LogLater = require ('./loglater.js');
var logfile=new LogLater( 'log'+( new Date().toISOString().replace(/[^a-zA-Z0-9]/g,'-') )+'.txt' );
var PassThrough = require('stream').PassThrough;
var myout= new PassThrough();
var wasout=console._stdout;
myout.on('data',(data)=>{logfile.dateline("\r\n"+data);wasout.write(data);});
console._stdout=myout;
var myerr= new PassThrough();
var waserr=console._stderr;
myerr.on('data',(data)=>{logfile.dateline("\r\n"+data);waserr.write(data);});
console._stderr=myerr;
loglater.js:
const fs = require('fs');
function LogLater(filename, noduplicates, interval) {
this.filename = filename || "loglater.txt";
this.arr = [];
this.timeout = false;
this.interval = interval || 1000;
this.noduplicates = noduplicates || true;
this.onsavetimeout_bind = this.onsavetimeout.bind(this);
this.lasttext = "";
process.on('exit',()=>{ if(this.timeout)clearTimeout(this.timeout);this.timeout=false; this.save(); })
}
LogLater.prototype = {
_log: function _log(text) {
this.arr.push(text);
if (!this.timeout) this.timeout = setTimeout(this.onsavetimeout_bind, this.interval);
},
text: function log(text, loglastline) {
if (this.noduplicates) {
if (this.lasttext === text) return;
this.lastline = text;
}
this._log(text);
},
line: function log(text, loglastline) {
if (this.noduplicates) {
if (this.lasttext === text) return;
this.lastline = text;
}
this._log(text + '\r\n');
},
dateline: function dateline(text) {
if (this.noduplicates) {
if (this.lasttext === text) return;
this.lastline = text;
}
this._log(((new Date()).toISOString()) + '\t' + text + '\r\n');
},
onsavetimeout: function onsavetimeout() {
this.timeout = false;
this.save();
},
save: function save() { fs.appendFile(this.filename, this.arr.splice(0, this.arr.length).join(''), function(err) { if (err) console.log(err.stack) }); }
}
module.exports = LogLater;
I just build a pack to do this, hope you like it ;)
https://www.npmjs.com/package/writelog
I for myself simply took the example from winston and added the log(...) method (because winston names it info(..):
Console.js:
"use strict"
// Include the logger module
const winston = require('winston');
const logger = winston.createLogger({
level: 'info',
format: winston.format.json(),
transports: [
//
// - Write to all logs with level `info` and below to `combined.log`
// - Write all logs error (and below) to `error.log`.
//
new winston.transports.File({ filename: 'error.log', level: 'error' }),
new winston.transports.File({ filename: 'combined.log' })
]
});
//
// If we're not in production then log to the `console` with the format:
// `${info.level}: ${info.message} JSON.stringify({ ...rest }) `
//
if (process.env.NODE_ENV !== 'production') {
logger.add(new winston.transports.Console({
format: winston.format.simple()
}));
}
// Add log command
logger.log=logger.info;
module.exports = logger;
Then simply use in your code:
const console = require('Console')
Now you can simply use the normal log functions in your file and it will create a file AND log it to your console (while debugging/developing). Because of if (process.env.NODE_ENV !== 'production') { (in case you want it also in production)...
Create a utils/logger.js file with:
var fs = require('fs');
var util = require('util');
var log_file = fs.createWriteStream(__dirname + '/../logs/server.log', { flags: 'w' });
var log_stdout = process.stdout;
console.log = function () { //
[...arguments].forEach(element => {
log_file.write(util.format(element) + '\n');
log_stdout.write(util.format(element) + '\n');
});
};
module.exports = {
console
}
Include the logger.js file from any file where you want to console.log like:
const console = require('./utils/logger').console;
Create a logs folder and create an empty server.log file in it and run your app :)
Rudy Huynh's solution worked really well for me. I added a little bit to have it spit out files with today's date and time.
var dateNow = new Date();
var timeNow = dateNow.getHours() + '-' + dateNow.getMinutes();
var logPath = "log/" + dateNow.toDateString() + ' -' + ' Start Time - ' + timeNow + ".log"
consoleLogToFile({
logFilePath: logPath
});
It's not very elegant but this way it'll save different, easy to read, log files instead of just updating the same "default.log" file.
Based on multiparameter version by Clément, just without color codes for the text file
var fs = require('fs');
var util = require('util');
var logFile = fs.createWriteStream('log.txt', { flags: 'a' });
// Or 'w' to truncate the file every time the process starts.
var logStdout = process.stdout;
console.log = function () {
// Storing without color codes
logFile.write(util.format.apply(null,arguments).replace(/\033\[[0-9;]*m/g,"") + '\n');
// Display normally, with colors to Stdout
logStdout.write(util.format.apply(null, arguments) + '\n');
}
Note: Answering since I couldn't comment
Related
Is it possible to get the line number and file for each log output ?
For example:
var winston = require('winston');
winston.log('info', 'some message!'); // this is at line 4 of myfile.js
should specify in log file that 'some message' came from myFile.js line 4.
You can pass the file name as label and you can get the file name from callingModule.
Create logger.js file and code like
var winston = require('winston');
var getLabel = function (callingModule) {
var parts = callingModule.filename.split('/');
return parts[parts.length - 2] + '/' + parts.pop();
};
module.exports = function (callingModule) {
return new winston.Logger({
transports: [
new winston.transports.Console({
label: getLabel(callingModule),
json: false,
timestamp: true,
depth:true,
colorize:true
})
]
});
};
Now Here your test file
var logger = require('./logger')(module);
function test() {
logger.info('test logger');
}
test();
and if you run test file than the output looks like
2017-07-08T07:15:20.671Z - info: [utils/test.js] test logger
winston didn't have the plan to do that due to performance concerns. Please check here for detailed information.
I tried https://github.com/baryon/tracer but it isn't good, e.g. line number is incorrect from time to time.
I found this code somewhere, yeah but It is working. Use it in a new winston.js and then requires that in any file.
var winston = require('winston')
var path = require('path')
var PROJECT_ROOT = path.join(__dirname, '..')
var appRoot = require('app-root-path');
const options = {
file: {
level: 'info',
filename: `${appRoot}/logs/app.log`,
handleExceptions: true,
json: true,
maxsize: 5242880, // 5MB
maxFiles: 5,
colorize: false,
timestamp: true
},
console: {
level: 'debug',
handleExceptions: true,
json: true,
colorize: true,
timestamp: true
}
};
var logger = new winston.Logger({
transports: [
new winston.transports.File(options.file),
new winston.transports.Console(options.console)
],
exitOnError: false // do not exit on handled exceptions
});
logger.stream = {
write: function (message) {
logger.info(message)
}
}
// A custom logger interface that wraps winston, making it easy to instrument
// code and still possible to replace winston in the future.
module.exports.debug = module.exports.log = function () {
logger.debug.apply(logger, formatLogArguments(arguments))
}
module.exports.info = function () {
logger.info.apply(logger, formatLogArguments(arguments))
}
module.exports.warn = function () {
logger.warn.apply(logger, formatLogArguments(arguments))
}
module.exports.error = function () {
logger.error.apply(logger, formatLogArguments(arguments))
}
module.exports.stream = logger.stream
/**
* Attempts to add file and line number info to the given log arguments.
*/
function formatLogArguments (args) {
args = Array.prototype.slice.call(args)
var stackInfo = getStackInfo(1)
if (stackInfo) {
// get file path relative to project root
var calleeStr = '(' + stackInfo.relativePath + ':' + stackInfo.line + ')'
if (typeof (args[0]) === 'string') {
args[0] = calleeStr + ' ' + args[0]
} else {
args.unshift(calleeStr)
}
}
return args
}
/**
* Parses and returns info about the call stack at the given index.
*/
function getStackInfo (stackIndex) {
// get call stack, and analyze it
// get all file, method, and line numbers
var stacklist = (new Error()).stack.split('\n').slice(3)
// stack trace format:
// http://code.google.com/p/v8/wiki/JavaScriptStackTraceApi
// do not remove the regex expresses to outside of this method (due to a BUG in node.js)
var stackReg = /at\s+(.*)\s+\((.*):(\d*):(\d*)\)/gi
var stackReg2 = /at\s+()(.*):(\d*):(\d*)/gi
var s = stacklist[stackIndex] || stacklist[0]
var sp = stackReg.exec(s) || stackReg2.exec(s)
if (sp && sp.length === 5) {
return {
method: sp[1],
relativePath: path.relative(PROJECT_ROOT, sp[2]),
line: sp[3],
pos: sp[4],
file: path.basename(sp[2]),
stack: stacklist.join('\n')
}
}
}
Source: https://gist.github.com/transitive-bullshit/39a7edc77c422cbf8a18
Update (for Winston 3.x)
I also created a gist for the following code:
const { format } = require('winston');
const { combine, colorize, timestamp, printf } = format;
/**
* /**
* Use CallSite to extract filename and number, for more info read: https://v8.dev/docs/stack-trace-api#customizing-stack-traces
* #param numberOfLinesToFetch - optional, when we want more than one line back from the stacktrace
* #returns {string|null} filename and line number separated by a colon, if numberOfLinesToFetch > 1 we'll return a string
* that represents multiple CallSites (representing the latest calls in the stacktrace)
*
*/
const getFileNameAndLineNumber = function getFileNameAndLineNumber (numberOfLinesToFetch = 1) {
const oldStackTrace = Error.prepareStackTrace;
const boilerplateLines = line => line &&
line.getFileName() &&
(line.getFileName().indexOf('<My Module Name>') &&
(line.getFileName().indexOf('/node_modules/') < 0));
try {
// eslint-disable-next-line handle-callback-err
Error.prepareStackTrace = (err, structuredStackTrace) => structuredStackTrace;
Error.captureStackTrace(this);
// we need to "peel" the first CallSites (frames) in order to get to the caller we're looking for
// in our case we're removing frames that come from logger module or from winston
const callSites = this.stack.filter(boilerplateLines);
if (callSites.length === 0) {
// bail gracefully: even though we shouldn't get here, we don't want to crash for a log print!
return null;
}
const results = [];
for (let i = 0; i < numberOfLinesToFetch; i++) {
const callSite = callSites[i];
let fileName = callSite.getFileName();
fileName = fileName.includes(BASE_DIR_NAME) ? fileName.substring(BASE_DIR_NAME.length + 1) : fileName;
results.push(fileName + ':' + callSite.getLineNumber());
}
return results.join('\n');
} finally {
Error.prepareStackTrace = oldStackTrace;
}
};
function humanReadableFormatter ({ level, message, ...metadata }) {
const filename = getFileNameAndLineNumber();
return `[${level}] [${filename}] ${message} ${JSON.stringify(metadata)}`;
}
const logger = winston.createLogger({
transports: [
new winston.transports.Console({
level: 'info',
handleExceptions: true,
humanReadableUnhandledException: true,
json: false,
colorize: { all: true },
stderrLevels: ['error', 'alert', 'critical', 'bizAlert'],
format: combine(
colorize(),
timestamp(),
humanReadableFormatter,
),
})
]
});
Original Answer (for Winston 2.x)
I'm using winston 2.x (but the same solution will work for winston 3.x) and that's the way I'm logging the filename and linenumber of the caller:
IMPORTANT: pay attention to the embedded code comments!
/**
* Use CallSite to extract filename and number
* #returns {string} filename and line number separated by a colon
*/
const getFileNameAndLineNumber = () => {
const oldStackTrace = Error.prepareStackTrace;
try {
// eslint-disable-next-line handle-callback-err
Error.prepareStackTrace = (err, structuredStackTrace) => structuredStackTrace;
Error.captureStackTrace(this);
// in this example I needed to "peel" the first 10 CallSites in order to get to the caller we're looking for, hence the magic number 11
// in your code, the number of stacks depends on the levels of abstractions you're using, which mainly depends on winston version!
// so I advise you to put a breakpoint here and see if you need to adjust the number!
return this.stack[11].getFileName() + ':' + this.stack[11].getLineNumber();
} finally {
Error.prepareStackTrace = oldStackTrace;
}
};
And (a simplified version of) the formatter function:
function humanReadableFormatter ({level, message}) {
const filename = getFileNameAndLineNumber();
return `[${level}] ${filename} ${message}`;
}
Then declare the transport to use the formatter:
new winston.transports.Console({
level: 'info',
handleExceptions: true,
humanReadableUnhandledException: true,
json: false,
colorize: 'level',
stderrLevels: ['warn', 'error', 'alert'],
formatter: humanReadableFormatter,
})
To read more about prepareStackTrace read: https://v8.dev/docs/stack-trace-api#customizing-stack-traces
You can do string operations on
console.log(new Error().stack.split('\n')[1].slice(7));
to get line Number and file path too along with function name.
The output would look like
AutomationFramework._this.parsingLogic (/Users/user_name/Desktop/something/some-file.js:49:25)
here is how I go around it
import winston from 'winston'
const {format } = winston
const { combine, timestamp, printf } = format;
const myFormat = printf(({ level, message,timestamp , at }) => {
let on = at.stack.split('\n')[1].slice(7).split('/').pop() ;
let file = on.split(':')[0]
let line = on.split(':')[1]
let data = Date(timestamp).toString().split(' GMT')[0]
return `${level}: [${data}] Hello <yourName>, there is a ${level} message on file:${file} line ${line}
${message}`;
});
const logger = winston.createLogger({
level: 'info',
format: combine(
timestamp(),
myFormat ,
),
transports: [new winston.transports.Console()],
});
on loggin the output will be like
logger.info('wrong file type' , {at : new Error})
//logs
warn: [Thu Aug 18 2022 15:06:28] Hello <yourName>, there is a warn message on file:winston.js line 7
>>wrong file type
Newbie here.
In express 3.2.6 I noticed that when you set the logging to app.use(express.logger('dev')) the format of the log in stdout is greyed out and easy on the eyes (dark mode?), like so:
However, when using express 4.0 and using morgan for logging with the following in app.js
var morgan = require('morgan');
app.use(morgan('dev'));
The logging in terminal turns out like this
Anyway to get the "dark mode" theme for logging using express 4.0 and morgan? Or is this just available in express 3.0?
Looks like this can be modified to accomplish this? https://github.com/expressjs/morgan/blob/master/index.js#L183
Well You can customize your logger colors by making a custom logger try this
//logger.js
require('colors');
var _ = require('lodash');
var config = require('../config/config');
// create a noop (no operation) function for when loggin is disabled
var noop = function () {
};
// check if loggin is enabled in the config
// if it is, then use console.log
// if not then noop
var consoleLog = config.logging ? console.log.bind(console) : noop;
var logger = {
log: function () {
var tag = '[ ✨ LOG ✨ ]'.green;
// arguments is an array like object with all the passed
// in arguments to this function
var args = _.toArray(arguments)
.map(function (arg) {
if (typeof arg === 'object') {
// turn the object to a string so we
// can log all the properties and color it
var string = JSON.stringify(arg, null, 2);
return tag + ' ' + string.cyan;
} else {
return tag + ' ' + arg.cyan;
}
});
// call either console.log or noop here
// with the console object as the context
// and the new colored args :)
consoleLog.apply(console, args);
},
error: function () {
var args = _.toArray(arguments)
.map(function (arg) {
arg = arg.stack || arg;
var name = arg.name || '[ ❌ ERROR ❌ ]';
var log = name.yellow + ' ' + arg.red;
return log;
});
consoleLog.apply(console, args);
}
};
module.exports = logger;
usage
//controller
var logger = require('./relative/path/to/logger');
logger.log('some thing');
logger.error('some thing');
// you may use custom colors and create custom functions like warning() etc
hope it helps ;)
I'm using winston with node & express & can't find a suitable way to simply augment all log entries with express's session.id (via an extensive google & SO search) - any suggestions?
For Express request logs you could do:
var morgan = require('morgan'),
winston = require('winston');
// Define a new format token
morgan.token('id', function(req, res) {
return req.session ? req.session.id : 'N/A';
});
// Create a stream using Winston for logging
var winstonStream = {
write: function(msg, encoding) {
winston.info(msg);
}
};
// Create a logger middleware that uses the new token in format
// and writes to Winston logging stream
var expressLogger = morgan(morgan.combined + ' - :id', { stream: winstonStream });
app.use(expressLogger);
Don't know how you could achieve the same for every winston[level]() call as you do need to have the request object at hand (apart from passing the object around to every bit of code).
Thanks for your answer vesse, I need it slightly more general than that though - ie. wherever I call winston, not just morgan request logging. I've overridden winston.log so I can call log.info(req, message), or log.warn etc and the session id is passed to the meta info from the req. I'm also using cookie-session which doesnt create the session id for me, so my solution is:
var CustomLogger = function(options){
CustomLogger.super_.apply(this, arguments);
}
util.inherits(CustomLogger, winston.Logger);
CustomLogger.prototype.log = function () {
var args = Array.prototype.slice.call(arguments);
var req = args[1];
if (req.session) {
req.session.id = req.session.id || uuid.v4();
var meta = _.merge((args[3] || {}), {sessionId: req.session.id});
CustomLogger.super_.prototype.log.call(this, args[0], args[2] + ' | ', meta);
}
else {
// no req.session, assume req hasnt been passed in as 1st param
CustomLogger.super_.prototype.log.apply(this, arguments);
}
}
var logger = new CustomLogger({
transports: [ ... ],
exitOnError: false
});
In my node.js application I successfully redirect log messages produced by a socket.io library to a winston library:
var express = require('express')
, winston = require('winston')
, http = require('http');
var logger = new (winston.Logger)({
transports: [
// ... configuring transports ...
]
});
var app = express();
var server = http.createServer(app);
var io = require('socket.io').listen(server, {'logger': logger});
Now I would want to add a prefix (something like "socket.io: ") to all these redirected messages for distinguishing them from log messages produced by other parts of the application. Is there a way how to achieve this?
Add label in logger transports.
var logger = new (winston.Logger)({
transports: [
new (winston.transports.Console)({
json : false,
timestamp : true,
label: "socket.io:"
})
]
});
Log messages will look like this -
2013-08-30T08:26:52.703Z - info: [socket.io:] socket.io started
2013-08-30T08:26:52.705Z - info: [socket.io:] hello
Check here more logging options with winston - https://github.com/flatiron/winston
I solved it by adding a following function
var winston = require('winston');
// Add prefix function
winston.prefix = function (filename)
{
var label = '[' + path.parse(filename).name + ']';
var override = function (lvl) {
return function () {
var args = [].slice.call(arguments);
if (args[0]) {
if (typeof(args[0]) === 'string')
args[0] = label + ' ' + args[0];
else
args.unshift(label);
}
winston[lvl].apply(null, args);
};
};
var log = { };
for (var lvl in winston.levels) {
log[lvl] = override(lvl);
}
return log;
}
And then in each module
var log = require('winston').prefix(__filename);
log.debug('hello');
or
var log = require('winston').prefix('socket_io');
log.debug('hello');
This isn't Winston specific but a generalized JS solution to prefixing and middleware...
var log = console.log;
function x(){ // optionally pass in post/pre fix here
// caution do not put (var log = console.log()) here, the arguments will build up [ arg1 ] -> [ [arg1] , arg2 ] -> [ [ [ arg1 ] , arg2] , arg3 ]
console.log = function () {
var args = Array.from(arguments);
args.push('post fix');
log.apply(console, args);
}
}
new x()
console.log(1)
new x()
console.log(2)
OUTPUTS:
1 post fix
2 post fix
OR better yet...
const log = console.log;
export default function middleWare(event) { // optionally pass in post/pre fix here
console.log = (...args) => {
log(...args, event.id);
}
}
I need to do some parsing of large (5-10 Gb)logfiles in Javascript/Node.js (I'm using Cube).
The logline looks something like:
10:00:43.343423 I'm a friendly log message. There are 5 cats, and 7 dogs. We are in state "SUCCESS".
We need to read each line, do some parsing (e.g. strip out 5, 7 and SUCCESS), then pump this data into Cube (https://github.com/square/cube) using their JS client.
Firstly, what is the canonical way in Node to read in a file, line by line?
It seems to be fairly common question online:
http://www.quora.com/What-is-the-best-way-to-read-a-file-line-by-line-in-node-js
Read a file one line at a time in node.js?
A lot of the answers seem to point to a bunch of third-party modules:
https://github.com/nickewing/line-reader
https://github.com/jahewson/node-byline
https://github.com/pkrumins/node-lazy
https://github.com/Gagle/Node-BufferedReader
However, this seems like a fairly basic task - surely, there's a simple way within the stdlib to read in a textfile, line-by-line?
Secondly, I then need to process each line (e.g. convert the timestamp into a Date object, and extract useful fields).
What's the best way to do this, maximising throughput? Is there some way that won't block on either reading in each line, or on sending it to Cube?
Thirdly - I'm guessing using string splits, and the JS equivalent of contains (IndexOf != -1?) will be a lot faster than regexes? Has anybody had much experience in parsing massive amounts of text data in Node.js?
I searched for a solution to parse very large files (gbs) line by line using a stream. All the third-party libraries and examples did not suit my needs since they processed the files not line by line (like 1 , 2 , 3 , 4 ..) or read the entire file to memory
The following solution can parse very large files, line by line using stream & pipe. For testing I used a 2.1 gb file with 17.000.000 records. Ram usage did not exceed 60 mb.
First, install the event-stream package:
npm install event-stream
Then:
var fs = require('fs')
, es = require('event-stream');
var lineNr = 0;
var s = fs.createReadStream('very-large-file.csv')
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
lineNr += 1;
// process line here and call s.resume() when rdy
// function below was for logging memory usage
logMemoryUsage(lineNr);
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(err){
console.log('Error while reading file.', err);
})
.on('end', function(){
console.log('Read entire file.')
})
);
Please let me know how it goes!
You can use the inbuilt readline package, see docs here. I use stream to create a new output stream.
var fs = require('fs'),
readline = require('readline'),
stream = require('stream');
var instream = fs.createReadStream('/path/to/file');
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
//Do your stuff ...
//Then write to output stream
rl.write(line);
});
Large files will take some time to process. Do tell if it works.
I really liked #gerard answer which is actually deserves to be the correct answer here. I made some improvements:
Code is in a class (modular)
Parsing is included
Ability to resume is given to the outside in case there is an asynchronous job is chained to reading the CSV like inserting to DB, or a HTTP request
Reading in chunks/batche sizes that
user can declare. I took care of encoding in the stream too, in case
you have files in different encoding.
Here's the code:
'use strict'
const fs = require('fs'),
util = require('util'),
stream = require('stream'),
es = require('event-stream'),
parse = require("csv-parse"),
iconv = require('iconv-lite');
class CSVReader {
constructor(filename, batchSize, columns) {
this.reader = fs.createReadStream(filename).pipe(iconv.decodeStream('utf8'))
this.batchSize = batchSize || 1000
this.lineNumber = 0
this.data = []
this.parseOptions = {delimiter: '\t', columns: true, escape: '/', relax: true}
}
read(callback) {
this.reader
.pipe(es.split())
.pipe(es.mapSync(line => {
++this.lineNumber
parse(line, this.parseOptions, (err, d) => {
this.data.push(d[0])
})
if (this.lineNumber % this.batchSize === 0) {
callback(this.data)
}
})
.on('error', function(){
console.log('Error while reading file.')
})
.on('end', function(){
console.log('Read entirefile.')
}))
}
continue () {
this.data = []
this.reader.resume()
}
}
module.exports = CSVReader
So basically, here is how you will use it:
let reader = CSVReader('path_to_file.csv')
reader.read(() => reader.continue())
I tested this with a 35GB CSV file and it worked for me and that's why I chose to build it on #gerard's answer, feedbacks are welcomed.
I used https://www.npmjs.com/package/line-by-line for reading more than 1 000 000 lines from a text file. In this case, an occupied capacity of RAM was about 50-60 megabyte.
const LineByLineReader = require('line-by-line'),
lr = new LineByLineReader('big_file.txt');
lr.on('error', function (err) {
// 'err' contains error object
});
lr.on('line', function (line) {
// pause emitting of lines...
lr.pause();
// ...do your asynchronous line processing..
setTimeout(function () {
// ...and continue emitting lines.
lr.resume();
}, 100);
});
lr.on('end', function () {
// All lines are read, file is closed now.
});
The Node.js Documentation offers a very elegant example using the Readline module.
Example: Read File Stream Line-by-Line
const { once } = require('node:events');
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('sample.txt'),
crlfDelay: Infinity
});
rl.on('line', (line) => {
console.log(`Line from file: ${line}`);
});
await once(rl, 'close');
Note: we use the crlfDelay option to recognize all instances of CR LF ('\r\n') as a single line break.
Apart from read the big file line by line, you also can read it chunk by chunk. For more refer to this article
var offset = 0;
var chunkSize = 2048;
var chunkBuffer = new Buffer(chunkSize);
var fp = fs.openSync('filepath', 'r');
var bytesRead = 0;
while(bytesRead = fs.readSync(fp, chunkBuffer, 0, chunkSize, offset)) {
offset += bytesRead;
var str = chunkBuffer.slice(0, bytesRead).toString();
var arr = str.split('\n');
if(bytesRead = chunkSize) {
// the last item of the arr may be not a full line, leave it to the next chunk
offset -= arr.pop().length;
}
lines.push(arr);
}
console.log(lines);
I had the same problem yet. After comparing several modules that seem to have this feature, I decided to do it myself, it's simpler than I thought.
gist: https://gist.github.com/deemstone/8279565
var fetchBlock = lineByline(filepath, onEnd);
fetchBlock(function(lines, start){ ... }); //lines{array} start{int} lines[0] No.
It cover the file opened in a closure, that fetchBlock() returned will fetch a block from the file, end split to array (will deal the segment from last fetch).
I've set the block size to 1024 for each read operation. This may have bugs, but code logic is obvious, try it yourself.
Reading / Writing files using stream with the native nodejs modules (fs, readline):
const fs = require('fs');
const readline = require('readline');
const rl = readline.createInterface({
input: fs.createReadStream('input.json'),
output: fs.createWriteStream('output.json')
});
rl.on('line', function(line) {
console.log(line);
// Do any 'line' processing if you want and then write to the output file
this.output.write(`${line}\n`);
});
rl.on('close', function() {
console.log(`Created "${this.output.path}"`);
});
Based on this questions answer I implemented a class you can use to read a file synchronously line-by-line with fs.readSync(). You can make this "pause" and "resume" by using a Q promise (jQuery seems to require a DOM so cant run it with nodejs):
var fs = require('fs');
var Q = require('q');
var lr = new LineReader(filenameToLoad);
lr.open();
var promise;
workOnLine = function () {
var line = lr.readNextLine();
promise = complexLineTransformation(line).then(
function() {console.log('ok');workOnLine();},
function() {console.log('error');}
);
}
workOnLine();
complexLineTransformation = function (line) {
var deferred = Q.defer();
// ... async call goes here, in callback: deferred.resolve('done ok'); or deferred.reject(new Error(error));
return deferred.promise;
}
function LineReader (filename) {
this.moreLinesAvailable = true;
this.fd = undefined;
this.bufferSize = 1024*1024;
this.buffer = new Buffer(this.bufferSize);
this.leftOver = '';
this.read = undefined;
this.idxStart = undefined;
this.idx = undefined;
this.lineNumber = 0;
this._bundleOfLines = [];
this.open = function() {
this.fd = fs.openSync(filename, 'r');
};
this.readNextLine = function () {
if (this._bundleOfLines.length === 0) {
this._readNextBundleOfLines();
}
this.lineNumber++;
var lineToReturn = this._bundleOfLines[0];
this._bundleOfLines.splice(0, 1); // remove first element (pos, howmany)
return lineToReturn;
};
this.getLineNumber = function() {
return this.lineNumber;
};
this._readNextBundleOfLines = function() {
var line = "";
while ((this.read = fs.readSync(this.fd, this.buffer, 0, this.bufferSize, null)) !== 0) { // read next bytes until end of file
this.leftOver += this.buffer.toString('utf8', 0, this.read); // append to leftOver
this.idxStart = 0
while ((this.idx = this.leftOver.indexOf("\n", this.idxStart)) !== -1) { // as long as there is a newline-char in leftOver
line = this.leftOver.substring(this.idxStart, this.idx);
this._bundleOfLines.push(line);
this.idxStart = this.idx + 1;
}
this.leftOver = this.leftOver.substring(this.idxStart);
if (line !== "") {
break;
}
}
};
}
node-byline uses streams, so i would prefer that one for your huge files.
for your date-conversions i would use moment.js.
for maximising your throughput you could think about using a software-cluster. there are some nice-modules which wrap the node-native cluster-module quite well. i like cluster-master from isaacs. e.g. you could create a cluster of x workers which all compute a file.
for benchmarking splits vs regexes use benchmark.js. i havent tested it until now. benchmark.js is available as a node-module
import * as csv from 'fast-csv';
import * as fs from 'fs';
interface Row {
[s: string]: string;
}
type RowCallBack = (data: Row, index: number) => object;
export class CSVReader {
protected file: string;
protected csvOptions = {
delimiter: ',',
headers: true,
ignoreEmpty: true,
trim: true
};
constructor(file: string, csvOptions = {}) {
if (!fs.existsSync(file)) {
throw new Error(`File ${file} not found.`);
}
this.file = file;
this.csvOptions = Object.assign({}, this.csvOptions, csvOptions);
}
public read(callback: RowCallBack): Promise < Array < object >> {
return new Promise < Array < object >> (resolve => {
const readStream = fs.createReadStream(this.file);
const results: Array < any > = [];
let index = 0;
const csvStream = csv.parse(this.csvOptions).on('data', async (data: Row) => {
index++;
results.push(await callback(data, index));
}).on('error', (err: Error) => {
console.error(err.message);
throw err;
}).on('end', () => {
resolve(results);
});
readStream.pipe(csvStream);
});
}
}
import { CSVReader } from '../src/helpers/CSVReader';
(async () => {
const reader = new CSVReader('./database/migrations/csv/users.csv');
const users = await reader.read(async data => {
return {
username: data.username,
name: data.name,
email: data.email,
cellPhone: data.cell_phone,
homePhone: data.home_phone,
roleId: data.role_id,
description: data.description,
state: data.state,
};
});
console.log(users);
})();
I have made a node module to read large file asynchronously text or JSON.
Tested on large files.
var fs = require('fs')
, util = require('util')
, stream = require('stream')
, es = require('event-stream');
module.exports = FileReader;
function FileReader(){
}
FileReader.prototype.read = function(pathToFile, callback){
var returnTxt = '';
var s = fs.createReadStream(pathToFile)
.pipe(es.split())
.pipe(es.mapSync(function(line){
// pause the readstream
s.pause();
//console.log('reading line: '+line);
returnTxt += line;
// resume the readstream, possibly from a callback
s.resume();
})
.on('error', function(){
console.log('Error while reading file.');
})
.on('end', function(){
console.log('Read entire file.');
callback(returnTxt);
})
);
};
FileReader.prototype.readJSON = function(pathToFile, callback){
try{
this.read(pathToFile, function(txt){callback(JSON.parse(txt));});
}
catch(err){
throw new Error('json file is not valid! '+err.stack);
}
};
Just save the file as file-reader.js, and use it like this:
var FileReader = require('./file-reader');
var fileReader = new FileReader();
fileReader.readJSON(__dirname + '/largeFile.json', function(jsonObj){/*callback logic here*/});