1. Install filebeat, logstash on production server need monitor
# Install Java (required for Elasticsearch and Logstash)
sudo apt-get update
sudo apt-get install default-jre
# install filebeat
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.15.0-amd64.deb
sudo dpkg -i filebeat-7.15.0-amd64.deb
# Install Logstash
curl -L -O https://artifacts.elastic.co/downloads/logstash/logstash-7.15.0-amd64.deb
sudo dpkg -i logstash-7.15.0-amd64.deb
2. Install elasticsearch, kibana on server logs manager
# Install Java (required for Elasticsearch and Logstash)
sudo apt-get update
sudo apt-get install default-jre
# Install Elasticsearch
curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.15.0-amd64.deb
sudo dpkg -i elasticsearch-7.15.0-amd64.deb
# Install Kibana
curl -L -O https://artifacts.elastic.co/downloads/kibana/kibana-7.15.0-amd64.deb
sudo dpkg -i kibana-7.15.0-amd64.deb
if occur error dpkg, follow guide check and run above again
# check list
sudo lsof /var/lib/dpkg/lock-frontend
# kill and remove
sudo kill xxx
sudo rm /var/lib/apt/lists/lock
sudo rm /var/cache/apt/archives/lock
sudo rm /var/lib/dpkg/lock*
# update config
sudo dpkg --configure -a
3. Config
Elasticsearch: database to store logs and database of kibana to run
network.host: 0.0.0.0
discovery.type: single-node
xpack.security.enabled: false
Filebeat: collect logs from file and push to logstash
max_procs: 1
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/haproxy.log
fields:
log_type: haproxy
output.logstash:
hosts: ["<logstash-server-ip>:5044"]
kibana: UI, visualize
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://127.0.0.1:9200"]
logstash: handle logs from filebeat, re-format and push to elasticsearch to index,store
input {
beats {
port => 5044
}
}
filter {
if [fields][log_type] == "haproxy" {
grok {
match => { "message" => "%{HAPROXYHTTP}" }
}
date {
match => [ "timestamp", "dd/MMM/yyyy:HH:mm:ss.SSS" ]
}
}
}
output {
elasticsearch {
hosts => ["<elasticsearch-server-ip>:<elasticsearch-port>"]
index => "<index-name>-%{+YYYY.MM.dd}"
}
}
input {
exec {
command => "top -bn1 | grep 'Cpu(s)' | awk '{print 100-$8}'"
interval => 60
type => "cpu"
}
exec {
command => "free | grep Mem | awk '{print $3/$2 * 100.0}'"
interval => 60
type => "memory"
}
exec {
command => "df -h / | tail -n 1 | awk '{print $5}' | sed 's/%//'"
interval => 60
type => "disk"
}
}
filter {
mutate {
add_field => { "[@metadata][index]" => "system-metrics-%{+YYYY.MM.dd}" }
}
if [type] == "cpu" {
mutate {
convert => { "message" => "float" }
rename => { "message" => "cpu" }
}
}
if [type] == "memory" {
mutate {
convert => { "message" => "float" }
rename => { "message" => "memory" }
}
}
if [type] == "disk" {
mutate {
convert => { "message" => "float" }
rename => { "message" => "disk" }
}
}
}
output {
elasticsearch {
hosts => ["<elasticsearch-ip>:<elasticsearch-port>"]
index => "<index-name>-%{[@metadata][index]}"
}
}
haproxy
...
global
log /dev/log local0
...
4. Start service
On Logs manager server
# start service
sudo systemctl start elasticsearch
sudo systemctl start kibana
# enable auto start when reboot server
sudo systemctl enable elasticsearch
sudo systemctl enable kibana
On Haproxy server
# start service
sudo systemctl start haproxy
sudo systemctl start logstash
sudo systemctl start filebeat
# enable auto start when reboot server
sudo systemctl enable haproxy
sudo systemctl enable logstash
sudo systemctl enable filebeat
5. Visualize logs data
Access http://<logs-manager-server-ip>:5601
Go to discovery to map index pattern from filebeat push logs
Go to dashboard to create new visualization