I would like to truncate/prune my logstash configuration.
As u can see it is very long and always repeating the same.
I was not able to minimize/eliminate code/config doublication.
see below:
for every tomcat instance there are application-, access-, standardout- and standerderror-logs which were shipped by filebeat to the elkserver:9200.
There are round about 15 instances, which ends up to 60 "if" and "else" instructions.
Does anyone have a hint, how to truncate/prune the output section?
input {
beats {
port => 5044
ssl => true
ssl_certificate_authorities => ["/etc/logstash/root-ca.pem"]
ssl_certificate => "/etc/logstash/elkserver.pem"
ssl_key => "/etc/logstash/elkserver.key"
ssl_verify_mode => "force_peer"
}
}
filter {
grok { match => ["message",'^%{TIMESTAMP_ISO8601:TIMESTAMP} %{LOGLEVEL:LEVEL} *\[(?<CLASS>[A-Za-z0-9$]+).%{NOTSPACE:METHOD}:%{NONNEGINT:LINE}:%{NOTSPACE:THREAD}\] %{GREEDYDATA:MESSAGE}$'] }
if "beats_input_codec_plain_applied" in [tags] { mutate { remove_tag => ["beats_input_codec_plain_applied"] } }
}
output {
if "jt09_02_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt09_02_access"
}
} else if "jt07_02_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_02_access"
}
} else if "jt07_04_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_04_access"
}
} else if "jt07_01_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_01_access"
}
} else if "jt07_09_sdterr" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_09_sdterr"
}
} else if "jt07_09_sdtout" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_09_sdtout"
}
} else if "jt07_09_custom_pattern" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "jt07_09_custom_pattern"
}
} else if "jt09_01_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_01_access"
}
} else if "jt09_03_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_03_access"
}
} else if "jt09_01_sdterr" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_01_sdterr"
}
} else if "jt09_01_sdtout" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_01_sdterr"
}
} else if "jt09_02_sdterr" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_02_sdterr"
}
} else if "jt09_02_sdtout" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_02_sdtout"
}
} else if "jt09_03_sdterr" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_03_sdterr"
}
} else if "jt09_03_sdtout" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt09_03_sdtout"
}
} else if "jt08_03_access" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt08_03_access"
}
} else if "jt08_03_sdterr" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt08_03_sdterr"
}
} else if "jt08_03_sdtout" in [tags] {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
cacert => "/etc/logstash/master.pem"
index => "jt08_03_sdtout"
}
}
}
Thanks Michael Dz, your answer helped me to solve my issue in the following way:
Filebeat adds already the required tag while shipping the data
- type: log
paths: pathto\log_custompattern.log
tags: ["jt07_09_custom_pattern"]
close_older: 24h
- type: log
paths: pathto\tomcat-stdout.??????????.log
tags: ["jt07_09_sdtout"]
scan_frequency: 30s
close_inactive: 12h
- type: log
paths: pathto\tomcat-stderr.??????????.log
tags: ["jt07_09_sdterr"]
scan_frequency: 30s
close_inactive: 12h
...
and Logstash now does the output in the following way
output {
elasticsearch {
hosts => ["ma-javainfra02.konzern.mvvcorp.de:9200"]
user => admin
password => logfileselkadmin
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "%{tags[0]}"
}
}
You can add a tag as a new field and then based on the new field name create an index, I don't know how you make tags or how many of them are stored in the array but I will assume that you are interested in the first one.
filter {
mutate {
add_field => { "[#metadata][indexname]", "tags[0]" }
}
}
output {
elasticsearch {
hosts => ["elkserver:9200"]
user => user
password => password
ssl => true
ssl_certificate_verification => true
cacert => "/etc/logstash/master.pem"
index => "%{[#metadata][indexname]}"
}
}
Related
input {
http {
port => 8080
codec => json
}
}
filter part:
map['username'] ||= event.get('username');
map['error'] ||= event.get('message');
map['filename'] ||= event.get('filename');
map['line'] ||= event.get('line');
output {
stdout {
codec => rubydebug
}
if [type] == "client" {
elasticsearch {
hosts => ["${LOGSTASH_OUTPUT_HOST}"]
index => "%{[#metadata][target_index_client]}"
user => "${LOGSTASH_OUTPUT_USER:}"
password => "${LOGSTASH_OUTPUT_PASS:}"
manage_template => false
}
} else if [type] == "server" {
elasticsearch {
hosts => ["${LOGSTASH_OUTPUT_HOST}"]
index => "%{[#metadata][target_index_server]}"
user => "${LOGSTASH_OUTPUT_USER:}"
password => "${LOGSTASH_OUTPUT_PASS:}"
manage_template => false
}
}
}
**We getting most of the attributes but some the important attribute missing in our logstash ** please suggest how to fix this issue
Below is my logastsh configuration. Grafana is unable to understand the namespace, pod, container_name send by logstash
input {
file{
path => "/host/var/log/pods/**/*.log"
type => "kubernetes"
start_position => "beginning"
}
}
filter {
if \[kubernetes\] {
mutate {
add_field => {
"container_name" => "%{\[kubernetes\]\[container\]\[name\]}"
"namespace" => "%{\[kubernetes\]\[namespace\]}"
"pod" => "%{\[kubernetes\]\[pod\]\[name\]}"
}
replace => { "host" => "%{\[kubernetes\]\[node\]\[name\]}"}
}
}
mutate {
remove_field => \["tags"\]
}
}
output {
stdout { codec => rubydebug}
loki {
url => "http://loki-loki-distributed-distributor.loki-benchmark.svc.cluster.local:3100/loki/api/v1/push"
}
}
I am doing a select * from xxx to a database, I would need to know how to take two or three fields from that select (field1, field2, field3) and create an array with these values.
Any suggestion on how to do it?
input {
jdbc {
jdbc_driver_library => "/usr/share/logstash/logstash-core/lib/jars/ifxjdbc-4.50.3.jar"
jdbc_driver_class => "com.informix.jdbc.IfxDriver"
jdbc_connection_string => "jdbc:informix-sqli://xxxxxxxx"
jdbc_user => "***"
jdbc_password => "****"
schedule => "* * * * *"
statement => "SELECT * FROM informix.test WHERE id_nvd_com > :sql_last_value"
use_column_value => true
tracking_column => id_nvd_com
last_run_metadata_path => "/var/log/logstash/value/test_last_value.yml"
tags => "test"
}
}
filter {
xxxxxxxx
}
output {
# Para ELK
elasticsearch {
hosts => "localhost:9200"
index => "index_001"
document_type => "hca_001_lgs"
document_id => "%{clave1}%{clave2}"
#user => "user"
#password => "pass"
#upsert => ""
#action => "%{accion}"
#action => "delete"
doc_as_upsert => true
manage_template => false
}
}
I could create it like this, but it doesn't generate it as nested
filter{
mutate {
rename => {
"[clave1_tmp]" => "[objectClaves][clave1]"
"[clave2_tmp]" => "[objectClaves][clave2]"
}
}
}
I need to create a module wise dashboard like User management, campaign management. How do I configure in logstash to pull all logs from different log files?
Logstash configuration:
input {
beats {
port => 5044
ssl => false
}
file {
path => "C:\data\logs\OCDE.log"
type => "ocde"
}
file {
path => "C:\data\logs\CLM.log"
type => "clm"
}
}
filter {
if [type] == "ocde"{
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}"]
}
}
else if [type] == "clm" {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}"]
}
}
}
output {
if (document_type= backendlog) {
elasticsearch {
hosts => ["localhost:9200"]
manage_template => false
index => "enliven_be_log_yyyymmdd"
document_type => "%{[#metadata][type]}"
}
}
}
i'm very new to logstash and elastic search. I am trying to store log files both in elasticsearch and a flat file. I know that logstash support both output. But are they processed simultaneously? or is it done periodically through a job?
Yes you can do this like so by tagging and cloning your inputs with the "add_tag" command on your shipper config.
input
{
tcp { type => "linux" port => "50000" codec => plain { charset => "US-ASCII" } }
tcp { type => "apache_access" port => "50001" codec => plain { charset => "US-ASCII" } }
tcp { type => "apache_error" port => "50002" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_security" port => "50003" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_application" port => "50004" codec => plain { charset => "US-ASCII" } }
tcp { type => "windows_system" port => "50005" codec => plain { charset => "US-ASCII" } }
udp { type => "network_equipment" port => "514" codec => plain { charset => "US-ASCII" } }
udp { type => "firewalls" port => "50006" codec => plain }
}
filter
{
grok { match => [ "host", "%{IPORHOST:ipaddr}(:%{NUMBER})?" ] }
mutate { replace => [ "fqdn", "%{ipaddr}" ] }
dns { reverse => [ "fqdn", "fqdn" ] action => "replace" }
if [type] == "linux" { clone { clones => "linux.log" add_tag => "savetofile" } }
if [type] == "apache_access" { clone { clones => "apache_access.log" add_tag => "savetofile" } }
if [type] == "apache_error" { clone { clones => "apache_error.log" add_tag => "savetofile" } }
if [type] == "windows_security" { clone { clones => "windows_security.log" add_tag => "savetofile" } }
if [type] == "windows_application" { clone { clones => "windows_application.log" add_tag => "savetofile" } }
if [type] == "windows_system" { clone { clones => "windows_system.log" add_tag => "savetofile" } }
if [type] == "network_equipment" { clone { clones => "network_%{fqdn}.log" add_tag => "savetofile" } }
if [type] == "firewalls" { clone { clones => "firewalls.log" add_tag => "savetofile" } }
}
output
{
#stdout { debug => true }
#stdout { codec => rubydebug }
redis { host => "1.1.1.1" data_type => "list" key => "logstash" }
}
And on your main logstash instance you would do this:
input {
redis {
host => "1.1.1.1"
data_type => "list"
key => "logstash"
type=> "redis-input"
# We use the 'json' codec here because we expect to read json events from redis.
codec => json
}
}
output
{
if "savetofile" in [tags] {
file {
path => [ "/logs/%{fqdn}/%{type}" ] message_format => "%{message}"
}
}
else { elasticsearch { host => "2.2.2.2" }
}
}
FYI, You can study The life of logstash event about the logstash event.
The output worker model is currently a single thread. Outputs will receive events in the order they are defined in the config file.
But the Outputs may decide to buffer events temporarily before publishing them. Ex: Output will buffers 2 or 3 events then just it write to file.
First you need to install output plugins:
/usr/share/logstash/bin/logstash-plugin install logstash-output-elasticsearch
/usr/share/logstash/bin/logstash-plugin install logstash-output-file
Then create conf files for output:
cat /etc/logstash/conf.d/nfs-output.conf
output {
file {
path => "/your/path/filebeat-%{+YYYY-MM-dd}.log"
}
}
cat /etc/logstash/conf.d/30-elasticsearch-output.conf
output {
elasticsearch {
hosts => ["elasitc_ip:9200"]
manage_template => true
user => "elastic"
password => "your_password"
}
}
Then:
service logstash restart