I have a Node.js server application with Express. I would like to log its activity into ElasticSearch and visualize the logs using Kibana.
What would be the right way to do that?
Should I write a log file of json lines and read it with Logstash?
I'd recommend log4js. It has a range of useful appenders, and logstash is one of them. It works over UDP.
Here is an example taken from the log4js site:
var log4js = require('../lib/log4js');
/*
Sample logstash config:
udp {
codec => json
port => 10001
queue_size => 2
workers => 2
type => myAppType
}
*/
log4js.configure({
"appenders": [
{
type: "console",
category: "myLogger"
},
{
"host": "127.0.0.1",
"port": 10001,
"type": "logstashUDP",
"logType": "myAppType", // Optional, defaults to 'category'
"fields": { // Optional, will be added to the 'fields' object in logstash
"field1": "value1",
"field2": "value2"
},
"layout": {
"type": "pattern",
"pattern": "%m"
},
"category": "myLogger"
}
]
});
var logger = log4js.getLogger("myLogger");
logger.info("Test log message %s", "arg1", "arg2");
Related
How can I monitor and detect errors when connecting kafka to logstash.
Say for example my kafka broker is down and no connection is established between kafka and logstash.
Is there a way in to get the monitor the connection status between logstash and kafka?
I can query logstash logs (but I don't think it is the appropriate way) and I tried to use logstash monitoring API (for example localhost:9600/_node/stats/pipelines?pretty) but no api gives me the connection status is off
Thank you in advance
If you have an elastic agent or a metricbeat agent installed on the Kafka node, you can configure the agent to monitor them using their Kafka specific module.
elastic-agent kafka module
metricbeat kafka module
For getting the connection status from logstash as you mentioned, you can also configure your logstash config to grab the status from the log message.
Sample document in elasticsearch :
{
"_index": "topicname",
"_type": "_doc",
"_id": "ulF8uH0BK9MbBSR7DPEw",
"_version": 1,
"_score": null,
"fields": {
"#timestamp": [
"2022-05-09T10:27:56.956Z"
],
"#version": [
"1"
],
"#version.keyword": [
"1"
],
"message": [
"{\"requestMethod\":\"GET\",\"headers\":{\"content-type\":\"application/json\",\"user-agent\":\"PostmanRuntime/7.XX.XX\",\"accept\":\"*/*\",\"postman-token\":\"11224442345223\",\"host\":\"localhost:2300\",\"accept-encoding\":\"gzip, deflate, br\",\"connection\":\"keep-alive\",\"content-length\":\"44\"},\"body\":{\"category\":\"CAT\",\"noise\":\"purr\"},\"query\":{},\"requestUrl\":\"http://localhost:2300/kafka\",\"protocol\":\"HTTP/1.1\",\"remoteIp\":\"1\",\"requestSize\":302,\"userAgent\":\"PostmanRuntime/7.XX.X\",\"statusCode\":200,\"response\":{\"success\":true,\"message\":\"Kafka Details are added\",\"data\":{\"kafkaData\":{\"_id\":\"12gvsddwqbwrfteacr313rcet5\",\"category\":\"DOG\",\"noise\":\"bark\",\"__v\":0},\"postData\":{\"category\":\"DOG\",\"noise\":\"bark\"}}},\"latency\":{\"seconds\":0,\"nanos\":61000000},\"responseSize\":193}"
]
} }
Below configuration can be added to fetch the status:
input {
kafka {
topics => ["topicname"]
bootstrap_servers => "11.11.11.11:1111"
}
}
filter{
mutate { add_field => { "StatusCode" => "%{[message][0][status]}" } }
}
output {
elasticsearch {
hosts => ["11.11.11.12:9200"]
index => "topic-name-index"
}
}
I'm trying to create mapping in Elasticsearch using Fluentd, which Node.js connects to.
Elasticsearch mapping example:
PUT http://host:9200/test_mapping
{
"mappings": {
"properties": {
"response_code": {
"type": "text",
"fielddata": true
},
"response_text": {
"type": "text",
"fielddata": true
},
"status": {
"type": "boolean"
},
"ip": {
"type": "ip"
},
"login": {
"type": "text",
"fielddata": true
}
}
}
}
Fluentd configuration example:
<source>
#type forward
port 24225
</source>
<match mapping.doc>
#type elasticsearch
logstash_format "#{ENV['LOGSTASH_FORMAT']}"
scheme "#{ENV['SCHEME']}"
host "#{ENV['HOST']}"
port "#{ENV['PORT']}"
write_operation index
index_name "#{ENV['INDEX_NAME']}"
flush_interval "#{ENV['FLUSH_INTERVAL']}"
</match>
Sample code on Node.js:
// ...
require('dotenv').config();
const env = process.env;
const loggerfluentd = require('fluent-logger');
loggerfluentd.configure('mapping', {
host: env.FLUENTD_HOST,
port: Number.parseInt(env.FLUENTD_PORT),
timeout: 3.0,
reconnectInterval: 10000 // 10 sec
});
function EmitMapping(data) {
loggerfluentd.emit(env.INDEX_NAME, data);
}
exports.EmitMapping = EmitMapping;
This configuration does not create mapping, but simply adds new documents to Elasticsearch.
Is it possible to change the configuration so that by executing the EmitMapping () function not to add new documents (with automatically assigned data types in mapping), namely to create your own mapping with your own data types?
There is a possibility that the elasticsearch plugin does not create and does not change the index, but simply writes to the index, therefore I used the http plugin:
<match mapping.doc>
#type http
endpoint "#{ENV['SCHEME']}://#{ENV['HOST']}:#{ENV['PORT']}/#{ENV['INDEX_NAME']}"
http_method put
headers {"Content-Type":"application/json"}
open_timeout 2
<buffer>
flush_interval "#{ENV['FLUSH_INTERVAL']}"
</buffer>
</match>
I want to add a dynamic attribute to a pattern I am using with log4js.
I am using some custom pattern, something like this:
"%d{} %-6p[%thread] %c [%x{context}]: %m%n%r"
Context is the dynamic value that I want to set with some unique id generated for each user on the server side.
There is a way to add dynamic value when creation log4js configuration by using "tokens" and "context" attributes.
But in this case values should be set during the log creation.
Is there a way to add some dynamic attribute that is set when the actual message is written to the log and not during the config phase?
Right now I am doing something like this:
log4js.configure(
{
appenders: { "file": { "type": "file", "filename": "service.log", "maxLogSize": 102400, "backups": 5, "category": "com.nextinsurance", "layout": { "type": "pattern", "pattern": "%d{} %-6p[%thread] %c [%x{context}]: %m%n%r", "tokens" : {context: function(logEvent){ return getContextFromData(logEvent) } } } }, "console" : {"type": "console"} },
categories: { "default": { "appenders": ["file", "console"], "level": "info" } }
}
);
But want to inject this value when writing to log, something like
logger.info(Message, {context: context_value})
You can use logEvent data property to fetch context. logEvent data property contains the array of args passed in log event.
Here is the sample code:
var log4js = require("log4js");
log4js.configure({
appenders: {
out: {
type: 'stdout',
layout: {
type: 'pattern',
pattern: '[%d] [%p] [%c] [%x{context}] - %m%n',
tokens: {
context: function(logEvent) {
let returnVal = logEvent.data[1] ? logEvent.data[1].context : undefined;
if (returnVal) {
logEvent.data.pop();
} else {
returnVal = 'null'; // set any default value if context is not passed.
}
return returnVal;
}
}
}
}
},
categories: {
default: {
appenders: ['out'],
level: 'INFO'
}
}
});
log4js.level = 'info';
let logger = log4js.getLogger();
logger.info('Hello', { context: 'context_value'}); // prints [2019-09-13T16:50:48.818] [INFO] [default] [context_value] - Hello
logger.info('Hello'); // prints [2019-09-13T16:50:48.820] [INFO] [default] [null] - Hello
my code like this,copy from other project,but I don't know why not found loglevel field in json, detail's code:
LoggerContext context = (LoggerContext) LoggerFactory.getILoggerFactory();
Logger logger = context.getLogger("ROOT");
if (enabled == null || enabled) {
if (logger.getAppender(APPENDER_NAME) == null) {
String destination = host + ":" + port;
try {
DestinationParser.parse(destination, AbstractLogstashTcpSocketAppender.DEFAULT_PORT);
} catch (RuntimeException e) {
throw new IllegalArgumentException("Invalid host and port : " + destination);
}
LogstashTcpSocketAppender appender = new LogstashTcpSocketAppender();
appender.setContext(context);
appender.addDestination(destination);
appender.setName(APPENDER_NAME);
LogstashEncoder encoder = new LogstashEncoder();
encoder.setCustomFields("{\"application_name\":\"" + applicationName + "\"}");
encoder.setContext(context);
appender.setEncoder(encoder);
appender.start();
encoder.start();
logger.addAppender(appender);
logger.setLevel(Level.ALL);
https://github.com/Dreampie/docker-elk use docker-compose up -d --build start elk,but logstash result not contains loglevel, logstash use codec:"json":
{
"_index": "logstash-2016.10.10",
"_type": "logs",
"_id": "AVetGEgU-dbBmx39fbyl",
"_score": null,
"_source": {
"message": "{\"#timestamp\":\"2016-10-10T13:33:24.998+08:00\",\"#version\":1,\"message\":\"Retrieving delivery for Consumer: tags=[{amq.ctag-qURSKFA3CagYtd7y9EDAFQ=springCloudBus.anonymous.QTc6psI8RbOJm5oNFurqHA}], channel=Cached Rabbit Channel: AMQChannel(amqp://acfun#192.168.60.201:5672/,1), conn: Proxy#8440641 Shared Rabbit Connection: SimpleConnection#62e8f5bf [delegate=amqp://acfun#192.168.60.201:5672/], acknowledgeMode=AUTO local queue size=0\",\"logger_name\":\"org.springframework.amqp.rabbit.listener.BlockingQueueConsumer\",\"thread_name\":\"springCloudBus.anonymous.QTc6psI8RbOJm5oNFurqHA-1\",\"level\":\"DEBUG\",\"level_value\":10000,\"LOG_LEVEL_PATTERN\":\"%5p\",\"level\":\"DEBUG\",\"application_name\":\"user-api-provider\"}",
"#version": "1",
"#timestamp": "2016-10-10T05:37:11.819Z",
"host": "192.168.60.228",
"port": 52196
},
"fields": {
"#timestamp": [
1476077831819
]
},
"sort": [
1476077831819
]
}
Yeah,I got it,codec in json not get loglevel field and someother field, json_lines work fine
codec => json_lines
I am not able to get specific lines from logs file /var/log/messages. I am using logstash-forwarder in client-server and logstash, elasticsearch and kibana in log-server. I tried to install grep filter but it gives me some error so I try to implement below with grok. My original post is here . I found this but m quite unsatisfied.
Following is the configuration for logstash-forwarder file-name: logstash-forwarder in client-server
{
"network": {
"servers": [ "logstashserver-ip:5000" ],
"timeout": 15,
"ssl ca": "xxx.crt"
},
"files": [
{
"paths": [
"/var/log/messages"
],
"fields": { "type": "syslog" }
}
]
}
and following is the logstash configuration in logstashserver
file-name:input.conf
input {
lumberjack {
port => 5000
type => "logs"
ssl_certificate => "xxx.crt"
ssl_key => "xxx.key"
}
}
file-name:filter.conf
filter {
grok {
match => ["message", "\[%{WORD:messagetype}\]: %{GREEDYDATA}"]
}
}
file-name:output.conf
output {
elasticsearch { host => "logstashserver-ip" }
if [messagetype] == "ERROR" {
stdout {
codec => "rubydebug"
}
}
}
Is there anything wrong?
Not sure if you're still having this problem, but I'd look at dropping the messages you don't want. On my server, I get syslog severity levels which include syslog_severity_code as defined at http://en.wikipedia.org/wiki/Syslog#Severity_levels.
If you're getting them in your indices, try something like
filter {
if [type] == 'syslog' and [syslog_severity_code] > 5 {
drop { }
}
}