I am trying to :
1. Pull monitoring metrics of Logstash from an index.
2. Filter/Process with Logstash script.
3. Writing to InfluxDB.
The Logstash script looks like this:
input {
#logstash Metrics
http_poller {
urls => {
logstash_metrics => {
method => post
url => "http://elasticco-###############.com:80//%3C.monitoring-logstash-6-%7Bnow%2Fd%7D%3E//_search?filter_path=aggregations.source_node_ip.buckets"
headers => { Accept => "application/json" }
body => '{"query":{"bool":{"must":[{"range":{"timestamp":{"gte":"now-10m","format":"epoch_millis"}}}]}},"size":0,"aggregations":{"source_node_ip":{"terms":{"field":"source_node.ip","size":20},"aggregations":{"data":{"top_hits":{"size":1,"sort":[{"timestamp":{"order":"desc"}}]}}}}}}'
auth => { user => "elastic" password => "changeme" }
}
}
codec => json
schedule => { every => "10m"}
type => "logstash_metrics"
add_field => {
"region" => "west"
}
}
}
filter {
if [type] == "logstash_metrics"{
mutate {
rename => {
"[aggregations][source_node_ip][buckets]" => "root"
}
}
split { field => "[root]" }
mutate {
rename => {
"[root][data][hits][hits]" => "main_event"
}
}
split { field => "[main_event]" }
mutate {
rename => {
"[main_event][_source][cluster_uuid]" => "cluster_uuid"
"[main_event][_source][source_node][ip]" => "source_node_ip"
"[main_event][_source][source_node][host]" => "source_node_host"
"[main_event][_source][source_node][uuid]" => "source_node_uuid"
"[main_event][_source][logstash_stats][jvm][mem][heap_used_percent]" => "logstash_stats_jvm_mem_heap_used_percent"
"[main_event][_source][logstash_stats][jvm][mem][heap_used_in_bytes]" => "logstash_stats_jvm_mem_heap_used_in_bytes"
"[main_event][_source][logstash_stats][jvm][mem][heap_max_in_bytes]" => "logstash_stats_jvm_mem_heap_max_in_bytes"
"[main_event][_source][logstash_stats][jvm][uptime_in_millis]" => "logstash_stats_jvm_uptime_in_millis"
"[main_event][_source][logstash_stats][jvm][gc][collectors][young][collection_time_in_millis]" => "logstash_stats_jvm_gc_collectors_young_collection_time_in_millis"
"[main_event][_source][logstash_stats][jvm][gc][collectors][young][collection_count]" => "logstash_stats_jvm_gc_collectors_young_collection_count"
"[main_event][_source][logstash_stats][jvm][gc][collectors][old][collection_time_in_millis]" => "logstash_stats_jvm_gc_collectors_old_collection_time_in_millis"
"[main_event][_source][logstash_stats][jvm][gc][collectors][old][collection_count]" => "logstash_stats_jvm_gc_collectors_old_collection_count"
"[main_event][_source][logstash_stats][logstash][pipeline][batch_size]" => "logstash_stats_logstash_pipeline_batch_size"
"[main_event][_source][logstash_stats][logstash][pipeline][workers]" => "logstash_stats_logstash_pipeline_workers"
"[main_event][_source][logstash_stats][logstash][status]" => "logstash_stats_logstash_status"
"[main_event][_source][logstash_stats][logstash][host]" => "logstash_stats_logstash_host"
"[main_event][_source][logstash_stats][process][max_file_descriptors]" => "logstash_stats_process_max_file_descriptors"
"[main_event][_source][logstash_stats][process][cpu][percent]" => "logstash_stats_process_cpu_percent"
"[main_event][_source][logstash_stats][process][open_file_descriptors]" => "logstash_stats_process_open_file_descriptors"
"[main_event][_source][logstash_stats][os][cpu][load_average][5m]" => "logstash_stats_os_cpu_load_average_5m"
"[main_event][_source][logstash_stats][os][cpu][load_average][15m]" => "logstash_stats_os_cpu_load_average_15m"
"[main_event][_source][logstash_stats][os][cpu][load_average][1m]" => "logstash_stats_os_cpu_load_average_1m"
"[main_event][_source][logstash_stats][events][filtered]" => "logstash_stats_events_filtered"
"[main_event][_source][logstash_stats][events][in]" => "logstash_stats_events_in"
"[main_event][_source][logstash_stats][events][duration_in_millis]" => "logstash_stats_events_duration_in_millis"
"[main_event][_source][logstash_stats][events][out]" => "logstash_stats_events_out"
"[main_event][_source][logstash_stats][queue][type]" => "logstash_stats_queue_type"
"[main_event][_source][logstash_stats][queue][events_count]" => "logstash_stats_queue_events_count"
"[main_event][_source][logstash_stats][reloads][failures]" => "logstash_stats_reloads_failures"
"[main_event][_source][logstash_stats][reloads][successes]" => "logstash_stats_reloads_successes"
"[main_event][_source][logstash_stats][timestamp]" => "timestamp"
}
}
mutate {
remove_field => [ "root", "aggregations", "#timestamp", "#version" , "main_event"]
}
}
}
output {
if [type] == "logstash_metrics" {
stdout { codec => rubydebug }
influxdb {
host => "influx-qa-write.##########.com"
port => "8086"
user => "gt######00"
password => "hg3########1"
db => "logstash_statistics"
measurement => "logstash_health_test1"
data_points => {
"logstash_stats_events_in" => "%{logstash_stats_events_in}"
"logstash_stats_logstash_status" => "%{logstash_stats_logstash_status}"
"logstash_stats_logstash_pipeline_workers" => "%{logstash_stats_logstash_pipeline_workers}"
"logstash_stats_events_out" => "%{logstash_stats_events_out}"
"logstash_stats_events_duration_in_millis" => "%{logstash_stats_events_duration_in_millis}"
"logstash_stats_process_cpu_percent" => "%{logstash_stats_process_cpu_percent}"
"logstash_stats_jvm_mem_heap_used_in_bytes" => "%{logstash_stats_jvm_mem_heap_used_in_bytes}"
"logstash_stats_process_open_file_descriptors" => "%{logstash_stats_process_open_file_descriptors}"
"logstash_stats_jvm_uptime_in_millis" => "%{logstash_stats_jvm_uptime_in_millis}"
"logstash_stats_events_filtered" => "%{logstash_stats_events_filtered}"
"logstash_stats_jvm_mem_heap_used_percent" => "%{logstash_stats_jvm_mem_heap_used_percent}"
"logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => "%{logstash_stats_jvm_gc_collectors_young_collection_time_in_millis}"
"source_node_ip" => "%{source_node_ip}"
"logstash_stats_queue_events_count" => "%{logstash_stats_queue_events_count}"
"logstash_stats_reloads_failures" => "%{logstash_stats_reloads_failures}"
"logstash_stats_logstash_host" => "%{logstash_stats_logstash_host}"
"logstash_stats_jvm_gc_collectors_young_collection_count" => "%{logstash_stats_jvm_gc_collectors_young_collection_count}"
"logstash_stats_os_cpu_load_average_5m" => "%{logstash_stats_os_cpu_load_average_5m}"
"logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => "%{logstash_stats_jvm_gc_collectors_old_collection_time_in_millis}"
"source_node_uuid" => "%{source_node_uuid}"
"logstash_stats_os_cpu_load_average_15m" => "%{logstash_stats_os_cpu_load_average_15m}"
"logstash_stats_reloads_successes" => "%{logstash_stats_reloads_successes}"
"logstash_stats_logstash_pipeline_batch_size" => "%{logstash_stats_logstash_pipeline_batch_size}"
"source_node_host" => "%{source_node_host}"
"logstash_stats_jvm_gc_collectors_old_collection_count" => "%{logstash_stats_jvm_gc_collectors_old_collection_count}"
"logstash_stats_process_max_file_descriptors" => "%{logstash_stats_process_max_file_descriptors}"
"logstash_stats_jvm_mem_heap_max_in_bytes" => "%{logstash_stats_jvm_mem_heap_max_in_bytes}"
"cluster_uuid" => "%{cluster_uuid}"
"logstash_stats_queue_type" => "%{logstash_stats_queue_type}"
"logstash_stats_os_cpu_load_average_1m" => "%{logstash_stats_os_cpu_load_average_1m}"
"region" => "%{region}"
}
coerce_values => {
"logstash_stats_logstash_pipeline_workers" => "integer"
"logstash_stats_events_in" => "integer"
"logstash_stats_logstash_status" => "string"
"logstash_stats_events_out" => "integer"
"logstash_stats_events_duration_in_millis" => "integer"
"logstash_stats_process_cpu_percent" => "float"
"logstash_stats_jvm_mem_heap_used_in_bytes" => "integer"
"logstash_stats_process_open_file_descriptors" => "integer"
"logstash_stats_jvm_uptime_in_millis" => "integer"
"logstash_stats_events_filtered" => "integer"
"logstash_stats_jvm_mem_heap_used_percent" => "float"
"logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => "integer"
"source_node_ip" => "string"
"logstash_stats_queue_events_count" => "integer"
"logstash_stats_reloads_failures" => "integer"
"logstash_stats_logstash_host" => "string"
"logstash_stats_jvm_gc_collectors_young_collection_count" => "integer"
"logstash_stats_os_cpu_load_average_5m" => "float"
"logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => "integer"
"source_node_uuid" => "string"
"logstash_stats_os_cpu_load_average_15m" => "float"
"logstash_stats_reloads_successes" => "integer"
"logstash_stats_logstash_pipeline_batch_size" => "integer"
"source_node_host" => "string"
"logstash_stats_jvm_gc_collectors_old_collection_count" => "integer"
"logstash_stats_process_max_file_descriptors" => "integer"
"logstash_stats_jvm_mem_heap_max_in_bytes" => "integer"
"cluster_uuid" => "string"
"logstash_stats_queue_type" => "string"
"region" => "string"
}
send_as_tags => ["region","source_node_uuid"]
flush_size => 3000
idle_flush_time => 1
retention_policy => "rp_400d"
}
stdout {codec => rubydebug }
}
}
Sample output to the console (stdout) looks good and as expected:
{
"logstash_stats_events_in" => 621,
"logstash_stats_logstash_status" => "green",
"logstash_stats_logstash_pipeline_workers" => 16,
"logstash_stats_events_out" => 621,
"logstash_stats_events_duration_in_millis" => 4539,
"logstash_stats_process_cpu_percent" => 0,
"logstash_stats_jvm_mem_heap_used_in_bytes" => 170390792,
"logstash_stats_process_open_file_descriptors" => 259,
"type" => "logstash_metrics",
"logstash_stats_jvm_uptime_in_millis" => 310770160,
"logstash_stats_events_filtered" => 621,
"logstash_stats_jvm_mem_heap_used_percent" => 0,
"logstash_stats_jvm_gc_collectors_young_collection_time_in_millis" => 21586,
"source_node_ip" => "10.187.8.207",
"logstash_stats_queue_events_count" => 0,
"logstash_stats_reloads_failures" => 0,
"timestamp" => "2018-01-30T15:56:18.270Z",
"logstash_stats_logstash_host" => "ip-187-7-147.dqa.capitalone.com",
"logstash_stats_jvm_gc_collectors_young_collection_count" => 487,
"logstash_stats_os_cpu_load_average_5m" => 0.19,
"logstash_stats_jvm_gc_collectors_old_collection_time_in_millis" => 124,
"source_node_uuid" => "VmarsH2-RMO0HY2u2-A9EQ",
"logstash_stats_os_cpu_load_average_15m" => 0.13,
"logstash_stats_reloads_successes" => 0,
"logstash_stats_logstash_pipeline_batch_size" => 125,
"source_node_host" => "10.187.8.207",
"logstash_stats_jvm_gc_collectors_old_collection_count" => 1,
"logstash_stats_process_max_file_descriptors" => 16384,
"logstash_stats_jvm_mem_heap_max_in_bytes" => 32098877440,
"cluster_uuid" => "LkLw_ASTR7CVQAaX1IzDgg",
"logstash_stats_queue_type" => "memory",
"region" => "west",
"logstash_stats_os_cpu_load_average_1m" => 0.06
(Successfully the formatted output was generated)
But this above script is unable to write this to the Influx and the error logs shows this:
09:56:25.658 [[main]>worker0] DEBUG logstash.outputs.influxdb - Influxdb output: Received event: %{host} %{message}
Exception in thread "[main]>worker0" java.io.IOException: fails
at org.logstash.Event.getTimestamp(Event.java:140)
at org.logstash.ext.JrubyEventExtLibrary$RubyEvent.ruby_timestamp(JrubyEventExtLibrary.java:289)
at org.logstash.ext.JrubyEventExtLibrary$RubyEvent$INVOKER$i$0$0$ruby_timestamp.call(JrubyEventExtLibrary$RubyEvent$INVOKER$i$0$0$ruby_ti
mestamp.gen)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:306)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:136)
at org.jruby.ast.CallNoArgNode.interpret(CallNoArgNode.java:60)
at org.jruby.ast.FCallTwoArgNode.interpret(FCallTwoArgNode.java:38)
at org.jruby.ast.LocalAsgnNode.interpret(LocalAsgnNode.java:123)
at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:105)
at org.jruby.ast.BlockNode.interpret(BlockNode.java:71)
at org.jruby.evaluator.ASTInterpreter.INTERPRET_METHOD(ASTInterpreter.java:74)
at org.jruby.internal.runtime.methods.InterpretedMethod.call(InterpretedMethod.java:182)
at org.jruby.internal.runtime.methods.DefaultMethod.call(DefaultMethod.java:203)
at org.jruby.runtime.callsite.CachingCallSite.cacheAndCall(CachingCallSite.java:326)
at org.jruby.runtime.callsite.CachingCallSite.call(CachingCallSite.java:170)
at org.jruby.ast.FCallOneArgNode.interpret(FCallOneArgNode.java:36)
at org.jruby.ast.NewlineNode.interpret(NewlineNode.java:105)
at org.jruby.evaluator.ASTInterpreter.INTERPRET_BLOCK(ASTInterpreter.java:112)
at org.jruby.runtime.Interpreted19Block.evalBlockBody(Interpreted19Block.java:206)
at org.jruby.runtime.Interpreted19Block.yield(Interpreted19Block.java:157)
at org.jruby.runtime.Block.yield(Block.java:142)
at org.jruby.RubyArray.eachCommon(RubyArray.java:1606)
at org.jruby.RubyArray.each(RubyArray.java:1613)
Update: My logstash is able to communicate with InfluxDB (other scripts were working fine) and the environment versions I am using are : Logstash 5.4, InfluxDB 1.4.2, Java 8 (64 bit), logstash-output-influxdb 5.0.3 (output plugin) , Windows 7 Enterprise (64 bit).
Can someone suggest what is going wrong here ? Let me know if you require any further information.
Thanks !
Related
Below is my logastsh configuration. Grafana is unable to understand the namespace, pod, container_name send by logstash
input {
file{
path => "/host/var/log/pods/**/*.log"
type => "kubernetes"
start_position => "beginning"
}
}
filter {
if \[kubernetes\] {
mutate {
add_field => {
"container_name" => "%{\[kubernetes\]\[container\]\[name\]}"
"namespace" => "%{\[kubernetes\]\[namespace\]}"
"pod" => "%{\[kubernetes\]\[pod\]\[name\]}"
}
replace => { "host" => "%{\[kubernetes\]\[node\]\[name\]}"}
}
}
mutate {
remove_field => \["tags"\]
}
}
output {
stdout { codec => rubydebug}
loki {
url => "http://loki-loki-distributed-distributor.loki-benchmark.svc.cluster.local:3100/loki/api/v1/push"
}
}
I am doing a select * from xxx to a database, I would need to know how to take two or three fields from that select (field1, field2, field3) and create an array with these values.
Any suggestion on how to do it?
input {
jdbc {
jdbc_driver_library => "/usr/share/logstash/logstash-core/lib/jars/ifxjdbc-4.50.3.jar"
jdbc_driver_class => "com.informix.jdbc.IfxDriver"
jdbc_connection_string => "jdbc:informix-sqli://xxxxxxxx"
jdbc_user => "***"
jdbc_password => "****"
schedule => "* * * * *"
statement => "SELECT * FROM informix.test WHERE id_nvd_com > :sql_last_value"
use_column_value => true
tracking_column => id_nvd_com
last_run_metadata_path => "/var/log/logstash/value/test_last_value.yml"
tags => "test"
}
}
filter {
xxxxxxxx
}
output {
# Para ELK
elasticsearch {
hosts => "localhost:9200"
index => "index_001"
document_type => "hca_001_lgs"
document_id => "%{clave1}%{clave2}"
#user => "user"
#password => "pass"
#upsert => ""
#action => "%{accion}"
#action => "delete"
doc_as_upsert => true
manage_template => false
}
}
I could create it like this, but it doesn't generate it as nested
filter{
mutate {
rename => {
"[clave1_tmp]" => "[objectClaves][clave1]"
"[clave2_tmp]" => "[objectClaves][clave2]"
}
}
}
I need to create a module wise dashboard like User management, campaign management. How do I configure in logstash to pull all logs from different log files?
Logstash configuration:
input {
beats {
port => 5044
ssl => false
}
file {
path => "C:\data\logs\OCDE.log"
type => "ocde"
}
file {
path => "C:\data\logs\CLM.log"
type => "clm"
}
}
filter {
if [type] == "ocde"{
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}"]
}
}
else if [type] == "clm" {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}"]
}
}
}
output {
if (document_type= backendlog) {
elasticsearch {
hosts => ["localhost:9200"]
manage_template => false
index => "enliven_be_log_yyyymmdd"
document_type => "%{[#metadata][type]}"
}
}
}
I'm trying to use the logstash http_poller plug-in to pull data from a Zabbix "rest" endpoint. I'm getting the following error:
Error: undefined method `encoding' for #<Hash:0x370283f5>
Here's my config:
input {
http_poller {
interval => 15
codec => "json"
urls => {
zab => {
url => "http://my_zabbbix_server/zabbix/api_jsonrpc.php"
method => post
headers => {
"content-type" => "application/json"
}
body => {
jsonrpc => "2.0"
method => "user.login"
params => {
user => "myusername"
password => "ermagerd"
}
id => 1
}
#auth => null
}
}
}
}
output {
stdout { codec => rubydebug }
}
}
If anyone could shed some light on what is not correct or missing from my configuration that would be great. Yeah.
i'm very new to logstash and elastic search. I am trying to store log files both in elasticsearch and a flat file. I know that logstash support both output. But are they processed simultaneously? or is it done periodically through a job?
Yes you can do this like so by tagging and cloning your inputs with the "add_tag" command on your shipper config.
input
{
tcp { type => "linux" port => "50000" codec => plain { charset => "US-ASCII" } }
tcp { type => "apache_access" port => "50001" codec => plain { charset => "US-ASCII" } }
tcp { type => "apache_error" port => "50002" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_security" port => "50003" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_application" port => "50004" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_system" port => "50005" codec => plain { charset => "US-ASCII" } }
udp { type => "network_equipment" port => "514" codec => plain { charset => "US-ASCII" } }
udp { type => "firewalls" port => "50006" codec => plain }
}
filter
{
grok { match => [ "host", "%{IPORHOST:ipaddr}(:%{NUMBER})?" ] }
mutate { replace => [ "fqdn", "%{ipaddr}" ] }
dns { reverse => [ "fqdn", "fqdn" ] action => "replace" }
if [type] == "linux" { clone { clones => "linux.log" add_tag => "savetofile" } }
if [type] == "apache_access" { clone { clones => "apache_access.log" add_tag => "savetofile" } }
if [type] == "apache_error" { clone { clones => "apache_error.log" add_tag => "savetofile" } }
if [type] == "windows_security" { clone { clones => "windows_security.log" add_tag => "savetofile" } }
if [type] == "windows_application" { clone { clones => "windows_application.log" add_tag => "savetofile" } }
if [type] == "windows_system" { clone { clones => "windows_system.log" add_tag => "savetofile" } }
if [type] == "network_equipment" { clone { clones => "network_%{fqdn}.log" add_tag => "savetofile" } }
if [type] == "firewalls" { clone { clones => "firewalls.log" add_tag => "savetofile" } }
}
output
{
#stdout { debug => true }
#stdout { codec => rubydebug }
redis { host => "1.1.1.1" data_type => "list" key => "logstash" }
}
And on your main logstash instance you would do this:
input {
redis {
host => "1.1.1.1"
data_type => "list"
key => "logstash"
type=> "redis-input"
# We use the 'json' codec here because we expect to read json events from redis.
codec => json
}
}
output
{
if "savetofile" in [tags] {
file {
path => [ "/logs/%{fqdn}/%{type}" ] message_format => "%{message}"
}
}
else { elasticsearch { host => "2.2.2.2" }
}
}
FYI, You can study The life of logstash event about the logstash event.
The output worker model is currently a single thread. Outputs will receive events in the order they are defined in the config file.
But the Outputs may decide to buffer events temporarily before publishing them. Ex: Output will buffers 2 or 3 events then just it write to file.
First you need to install output plugins:
/usr/share/logstash/bin/logstash-plugin install logstash-output-elasticsearch
/usr/share/logstash/bin/logstash-plugin install logstash-output-file
Then create conf files for output:
cat /etc/logstash/conf.d/nfs-output.conf
output {
file {
path => "/your/path/filebeat-%{+YYYY-MM-dd}.log"
}
}
cat /etc/logstash/conf.d/30-elasticsearch-output.conf
output {
elasticsearch {
hosts => ["elasitc_ip:9200"]
manage_template => true
user => "elastic"
password => "your_password"
}
}
Then:
service logstash restart