dream_653
作者dream_6532019-10-30 00:00
系统应用运维, *****

ELK7.3.2安装部署

字数 10362阅读 5109评论 0赞 3

ELK架构图

一:环境准备

1、环境规划准备(centos 7.6)

    192.168.110.128 jdk elasticsearch-master  logstash kibana
    192.168.110.129 jdk elasticsearch-node1
    192.168.110.130 jdk elasticsearch-node2
    192.168.110.131 liunx  filebeat

2、安装JDK

elasticseach以及logstash的运行都需要jdk环境,在三台机器上分别安装jdk-12.0.2

下载地址:https://download.oracle.com/otn-pub/java/jdk/13.0.1+9/cec27d702aa74d5a8630c65ae61e4305/jdk-13.0.1_linux-x64_bin.tar.gz?AuthParam=1572331930_e0c8e3d8c2a2be8579bbce2ca936262a
https://www.oracle.com/technetwork/java/javase/downloads/index.html

      #下载
      wget https://download.oracle.com/otn-pub/java/jdk/13.0.1+9/cec27d702aa74d5a8630c65ae61e4305/jdk-13.0.1_linux-x64_bin.tar.gz?AuthParam=1572331930_e0c8e3d8c2a2be8579bbce2ca936262a
     #解压
     [root@localhost ~]# tar -zxvf jdk-13.0.1_linux-x64_bin.tar.gz -C /usr/
     #设置环境变量
     [root@localhost ~]# yum install epel-release
     [root@localhost ~]# yum install vim
     [root@localhost ~]# yum -y install ntpdate
     [root@localhost ~]#ntpdate -d 182.92.12.11
     [root@localhost ~]# vim /etc/profile  
     export JAVA_HOME=/usr/jdk-13.0.1/   
     export JRE_HOME=$JAVA_HOME/jre   
     export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH  
     export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
     #使环境变量生效
    [root@localhost ~]# source /etc/profile  

3、操作系统调优设置

# 修改系统文件
[root@localhost ~]# vim /etc/security/limits.conf

#增加的内容
* soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096

#修改系统文件
[root@localhost ~]# vim /etc/security/limits.d/20-nproc.conf
 
#调整成以下配置
*          soft    nproc     4096
root       soft    nproc     unlimited

[root@localhost ~]# vim /etc/sysctl.conf
#在最后追加
vm.max_map_count=262144
fs.file-max=655360

#使用 sysctl -p 查看修改结果
[root@localhost ~]# sysctl -p  
vm.max_map_count = 262144  
fs.file-max = 655360

4、配置hosts

     [root@localhost ~]# vim /etc/hosts
     192.168.110.128 elk-master-node
     192.168.110.129 elk-data-node1
     192.168.110.130 elk-data-node2

5、关闭防火墙以及SELINUX

[root@localhost ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config  
[root@localhost ~]# setenforce 0  
[root@localhost ~]# systemctl stop firewalld  
[root@localhost ~]# systemctl disable firewalld  
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.  
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service. 

6、创建elk用户

[root@localhost ~]#groupadd elk  
[root@localhost ~]# useradd -g elk elk  
[root@localhost ~]# cat /etc/shadow|grep elk  
elk:!!:18198:0:99999:7:::  
[root@localhost ~]# cat /etc/group|grep elk  
elk:x:1000:

7、安装目录规划

[root@localhost ~]# mkdir -p /home/app/elk  
[root@localhost ~]# chown -R elk:elk /home/app/elk   
[root@localhost ~]# ls -lrt /home/app/  
total 0  
drwxr-xr-x. 2 elk elk 6 Oct 29 03:55 elk

8、下载软件包(elaticsearch-node上只安装elaticsearch)

将压缩包全部解压至/home/app/elk目录

[root@localhost ~]#   
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.2-linux-x86_64.tar.gz  
[root@localhost ~]#   
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.3.2.tar.gz  
[root@localhost ~]#   
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.3.2-linux-x86_64.tar.gz  
[root@localhost ~]#tar -zxvf elasticsearch-7.3.2-linux-x86_64.tar.gz -C /home/app/elk 
[root@localhost ~]#tar -zxvf logstash-7.3.2.tar.gz -C /home/app/elk 
[root@localhost ~]#tar -zxvf kibana-7.3.2-linux-x86_64.tar.gz -C /home/app/elk

二、安装elasticsearch

1、配置elasticsearch(切换至elk用户)

    [root@localhost ~]#chown -R elk:elk /home/app/elk
    [root@localhost ~]# su - elk  

创建Elasticsearch数据目录

mkdir /home/app/elk/elasticsearch-7.3.2/data -p

创建Elasticsearch日志目录

mkdir /home/app/elk/elasticsearch-7.3.2/logs -p

192.168.110.128主节点配置:

[elk@elk-master-node ~]$vim /home/app/elk/elasticsearch-7.3.2/config/elasticsearch.yml
# 集群名称  
cluster.name: es  
# 节点名称  
node.name: es-master  
# 存放数据目录,先创建该目录  
path.data: /home/app/elk/elasticsearch-7.3.2/data  
# 存放日志目录,先创建该目录  
path.logs: /home/app/elk/elasticsearch-7.3.2/logs  
# 节点IP  
network.host: 192.168.110.128  
# tcp端口  
transport.tcp.port: 9300  
# http端口  
http.port: 9200  
# 种子节点列表,主节点的IP地址必须在seed_hosts中  
discovery.seed_hosts: ["192.168.110.128:9300","192.168.110.129:9300","192.168.110.130:9300"]  
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置  
cluster.initial_master_nodes: ["192.168.110.128:9300"]  
# 主节点相关配置  
# 是否允许作为主节点  
node.master: true  
# 是否保存数据  
node.data: true  
node.ingest: false  
node.ml: false  
cluster.remote.connect: false  
# 跨域  
http.cors.enabled: true  
http.cors.allow-origin: "*"

192.168.110.129 数据节点从配置:

[elk@elk-data-node1 ~]$vim /home/app/elk/elasticsearch-7.3.2/config/elasticsearch.yml
# 集群名称  
cluster.name: es  
# 节点名称  
node.name: es-data1  
# 存放数据目录,先创建该目录  
path.data: /home/app/elk/elasticsearch-7.3.2/data  
# 存放日志目录,先创建该目录  
path.logs: /home/app/elk/elasticsearch-7.3.2/logs  
# 节点IP  
network.host: 192.168.110.129  
# tcp端口  
transport.tcp.port: 9300  
# http端口  
http.port: 9200  
# 种子节点列表,主节点的IP地址必须在seed_hosts中  
discovery.seed_hosts: ["192.168.110.128:9300","192.168.110.129:9300","192.168.110.130:9300"]  
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置  
cluster.initial_master_nodes: ["192.168.110.128:9300"]  
# 主节点相关配置  
# 是否允许作为主节点  
node.master: false  
# 是否保存数据  
node.data: true  
node.ingest: false  
node.ml: false  
cluster.remote.connect: false  
# 跨域  
http.cors.enabled: true  
http.cors.allow-origin: "*"   

192.168.110.130数据节点从配置:

[elk@elk-data-node2 ~]$vim /home/app/elk/elasticsearch-7.3.2/config/elasticsearch.yml
# 集群名称  
cluster.name: es  
# 节点名称  
node.name: es-data2  
# 存放数据目录,先创建该目录  
path.data: /home/app/elk/elasticsearch-7.3.2/data  
# 存放日志目录,先创建该目录  
path.logs: /home/app/elk/elasticsearch-7.3.2/logs  
# 节点IP  
network.host: 192.168.110.130  
# tcp端口  
transport.tcp.port: 9300  
# http端口  
http.port: 9200  
# 种子节点列表,主节点的IP地址必须在seed_hosts中  
discovery.seed_hosts: ["192.168.110.128:9300","192.168.110.129:9300","192.168.110.130:9300"]  
# 主合格节点列表,若有多个主节点,则主节点进行对应的配置  
cluster.initial_master_nodes: ["192.168.110.128:9300"]  
# 主节点相关配置  
# 是否允许作为主节点  
node.master: false  
# 是否保存数据  
node.data: true  
node.ingest: false  
node.ml: false  
cluster.remote.connect: false  
# 跨域  
http.cors.enabled: true  
http.cors.allow-origin: "*"

2、启动elasticserach

 sh /home/app/elk/elasticsearch-7.3.2/bin/elasticsearch -d
 ps -ef|grep -v grep |grep elasticsearch  

3、监控检查

[elk@elk-master-node ~]$ curl -X GET 'http://192.168.110.128:9200/_cluster/health?pretty'  
{  
"cluster_name" : "es",  
"status" : "green",  
"timed_out" : false,  
"number_of_nodes" : 1,  
"number_of_data_nodes" : 1,  
"active_primary_shards" : 0,  
"active_shards" : 0,  
"relocating_shards" : 0,  
"initializing_shards" : 0,  
"unassigned_shards" : 0,  
"delayed_unassigned_shards" : 0,  
"number_of_pending_tasks" : 0,  
"number_of_in_flight_fetch" : 0,  
"task_max_waiting_in_queue_millis" : 0,  
"active_shards_percent_as_number" : 100.0  
}  
    #status=green表示服务正常

三、安装kibana

1、修改配置文件

[elk@elk-master-node ~]cd /home/app/elk/kibana-7.3.2-linux-x86_64/config  
[elk@elk-master-node config]$ vim kibana.yml  
# 配置kibana的端口  
server.port: 5601  
# 配置监听ip  
server.host: "192.168.110.128"  
# 配置es服务器的ip,如果是集群则配置该集群中主节点的ip  
elasticsearch.hosts: "http://192.168.110.128:9200/"  
# 配置kibana的日志文件路径,不然默认是messages里记录日志  
logging.dest: /home/app/elk/kibana-7.3.2-linux-x86_64/logs/kibana.log

2、启动kibana

  [elk@elk-master-node config]$ nohup /home/app/elk/kibana-7.3.2-linux-x86_64/bin/kibana & 

三、安装filebeat

在192.168.110.131上安装filebeat单独对nginx的访问日志和错误日志进行采集,网上有json格式的配置,在此使用grok 对日志进行切割,直接使用原格式进行配置

1、下载filebeat

    wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.2-linux-x86_64.tar.gz
    mkdir -p /usr/filebeat
    tar -zxvf filebeat-7.3.2-linux-x86_64.tar.gz -C /usr/filebeat

2、配置filebeat.yml

vim /usr/filebeat/filebeat-7.3.2-linux-x86_64/filebeat.yml
#===== Filebeat inputs =======
filebeat.inputs:
- type: log
  paths:
   - /var/log/nginx/access.log
  fields:
   log_source: nginx-access
- type: log
  paths:
   - /var/log/nginx/error.log
  fields:
   log_source: nginx-error
#=============== Dashboards ==========
setup.dashboards.enabled: false
#======== Kibana =====================
#添加libana仪表盘
setup.kibana:
  host: "192.168.110.128:5601"
#===== Logstash output=================
output.logstash:
  # The Logstash hosts
  hosts: ["192.168.110.128:5044"]

3、启动filebeat

    nohup /usr/filebeat/filebeat-7.3.2-linux-x86_64/filebeat -c filebeat.yml &

报错

Exiting: error loading config file: yaml: invalid trailing UTF-8 octet  

解决办法:将配置文件另存为编码UTF-8

[root@localhost filebeat-7.3.2-linux-x86_64]# ps -ef|grep filebeat  
root 18198 8211 0 10:22 pts/1 00:00:00 /usr/filebeat/filebeat-7.3.2-linux-x86_64/filebeat -c filebeat.yml  

四、安装logstash

1、创建logstash.conf文件

vim /home/app/elk/logstash-7.3.2/config/logstash.conf
input {
  beats {
    port => 5044
  }
}
filter {
  if [fields][log_source]=="nginx-access"{
    grok {
      match => {
        "message" => '%{IP:clientip}\\s*%{DATA}\\s*%{DATA}\\s*\\[%{HTTPDATE:requesttime}\\]\\s*"%{WORD:requesttype}.*?"\\s*%{NUMBER:status:int}\\s*%{NUMBER:bytes_read:int}\\s*"%{DATA:requesturl}"\\s*%{QS:ua}'
     }
      overwrite => ["message"]
    }
  }
  if [fields][log_source]=="nginx-error"{
    grok {
      match => {
        "message" => '(?<time>.*?)\\s*\\[%{LOGLEVEL:loglevel}\\]\\s*%{DATA}:\\s*%{DATA:errorinfo},\\s*%{WORD}:\\s*%{IP:clientip},\\s*%{WORD}:%{DATA:server},\\s*%{WORD}:\\s*%{QS:request},\\s*%{WORD}:\\s*%{QS:upstream},\\s*%{WORD}:\\s*"%{IP:hostip}",\\s*%{WORD}:\\s*%{QS:referrer}'
      }
      overwrite => ["message"]
    }
  }
}
output {
  if [fields][log_source]=="nginx-access"{
    elasticsearch {
      hosts => ["http://192.168.43.16:9200"]
      action => "index"
      index => "nginx-access-%{+YYYY.MM.dd}"
   }
  }
  if [fields][log_source]=="nginx-error"{
    elasticsearch {
      hosts => ["http://192.168.43.16:9200"]
      action => "index"
      index => "nginx-error-%{+YYYY.MM.dd}"
   }
  }
  stdout { codec => rubydebug }
}

2、启动logstash

    /home/app/elk/logstash-7.3.2/bin/logstash -f /home/app/elk/logstash-7.3.2/config/logstash.conf

注明:上条命令后加 -t 检查配置文件是否正确
报错:

[FATAL][logstash.runner ] The given configuration is invalid. Reason: Expected one of #, input, filter, output at line 1, column 1 (byte 1)

解决办法: 文件必须是UTF-8 Without Bom 使用Notepad++ v5.9.2或其他工具将文件f编码改为UTF-8 Without Bom

六、登陆kibana平台


接下来创建索引,分别对访问日志和错误日志建立索引,建立完之后点击discover,即可看到日志数据




也可以这样创建索引





如果觉得我的文章对您有用,请点赞。您的支持将鼓励我继续创作!

3

添加新评论0 条评论

Ctrl+Enter 发表

作者其他文章

X社区推广