Update elk 6.x playbooks
Most of the changes in this PR are for style and to adapt the playbooks so that the system can operate on a multi-node cloud. Functional change includes the removal of mainline Java 8 in favor of OpenJDK 8. A site playbook was add to allow an operator to just run everything. Old tools that no longer function within the stack have been removed. Packetbeat was added to the install list Auditbeat was added to the install list All of the config files have been updated for the recent ElasticStack 6.x changes. Change-Id: I01200ad4772ff200b9c5c93f8f121145dfb88170 Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
parent
d29019d448
commit
17fb37f075
@ -63,4 +63,3 @@ node.data: {{ node_data | default(true) }}
|
||||
# Require explicit names when deleting indices:
|
||||
#
|
||||
# action.destructive_requires_name: true
|
||||
|
||||
|
30
elk_metrics_6x/common_task_install_elk_repo.yml
Normal file
30
elk_metrics_6x/common_task_install_elk_repo.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
# Copyright 2018, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- name: add Elastic search public GPG key (same for Metricsbeat)
|
||||
apt_key:
|
||||
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
||||
state: "present"
|
||||
|
||||
- name: enable apt-transport-https
|
||||
apt:
|
||||
name: apt-transport-https
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: add metricsbeat repo to apt sources list
|
||||
apt_repository:
|
||||
repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main'
|
||||
state: present
|
@ -5,6 +5,7 @@ elastic-logstash_hosts:
|
||||
ip: 172.22.8.28
|
||||
logging03:
|
||||
ip: 172.22.8.29
|
||||
|
||||
kibana_hosts:
|
||||
logging01:
|
||||
ip: 172.22.8.27
|
||||
|
@ -13,15 +13,11 @@ container_skel:
|
||||
- elastic-logstash_containers
|
||||
contains:
|
||||
- elastic-logstash
|
||||
properties:
|
||||
container_fs_size: 150G
|
||||
kibana_container:
|
||||
belongs_to:
|
||||
- kibana_containers
|
||||
contains:
|
||||
- kibana
|
||||
properties:
|
||||
container_fs_size: 10G
|
||||
|
||||
physical_skel:
|
||||
elastic-logstash_containers:
|
||||
|
53
elk_metrics_6x/installAuditbeat.yml
Normal file
53
elk_metrics_6x/installAuditbeat.yml
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
- name: Install Auditbeat
|
||||
hosts: hosts
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
|
||||
pre_tasks:
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure Auditbeat is installed
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: true
|
||||
with_items:
|
||||
- audispd-plugins
|
||||
- auditbeat
|
||||
|
||||
post_tasks:
|
||||
- name: Drop auditbeat conf file
|
||||
template:
|
||||
src: templates/auditbeat.yml.j2
|
||||
dest: /etc/auditbeat/auditbeat.yml
|
||||
|
||||
- name: Enable and restart auditbeat
|
||||
systemd:
|
||||
name: "auditbeat"
|
||||
enabled: "{{ not inventory_hostname in groups['kibana'] | default([]) }}"
|
||||
state: restarted
|
||||
|
||||
|
||||
- name: Load Auditbeat Dashboards
|
||||
hosts: hosts[0]
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- name: Load templates
|
||||
shell: >-
|
||||
{% set IP_ARR=[] %}
|
||||
{% for host in groups['elastic-logstash'] %}
|
||||
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
|
||||
auditbeat setup
|
||||
-E 'output.logstash.enabled=false'
|
||||
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
|
||||
-e -v
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: install ElK stack
|
||||
hosts: "{{ elk_hosts }}"
|
||||
- name: Install Elastic Search
|
||||
hosts: "elastic-logstash"
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
@ -23,38 +23,32 @@
|
||||
container_config:
|
||||
- "lxc.mount.entry=/openstack/{{ inventory_hostname }} var/lib/elasticsearch none bind 0 0"
|
||||
delegate_to: "{{ physical_host }}"
|
||||
- name: Add Oracle Java PPA to apt sources list
|
||||
apt_repository: repo='ppa:webupd8team/java' state=present
|
||||
- name: Accept Java 8 License
|
||||
debconf: name='oracle-java8-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
|
||||
- name: Ensure Java is installed.
|
||||
apt: name=oracle-java8-installer state=present install_recommends=yes update_cache=yes
|
||||
- name: add Elastic search public GPG key
|
||||
apt_key:
|
||||
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
||||
state: "present"
|
||||
- name: enable apt https transport
|
||||
apt: name=apt-transport-https state=present update_cache=yes
|
||||
- name: add Elastic search repo to apt sources list
|
||||
apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present
|
||||
|
||||
- name: Ensure Java is installed
|
||||
apt:
|
||||
name: openjdk-8-jre
|
||||
state: present
|
||||
install_recommends: yes
|
||||
update_cache: yes
|
||||
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure Elastic search is installed.
|
||||
apt: name=elasticsearch state=present update_cache=yes
|
||||
apt:
|
||||
name: elasticsearch
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Drop elastic search conf file
|
||||
template:
|
||||
src: templates/elasticsearch.yml.j2
|
||||
dest: /etc/elasticsearch/elasticsearch.yml
|
||||
tags:
|
||||
tags:
|
||||
- config
|
||||
- name: Enable and restart elastic
|
||||
service:
|
||||
systemd:
|
||||
name: "elasticsearch"
|
||||
enabled: true
|
||||
state: restarted
|
||||
tags:
|
||||
tags:
|
||||
- config
|
||||
- name: copy elk-data rotater script
|
||||
copy: src=templates/rotate-topbeatdata.sh dest=/root/rotate-topbeatdata.sh mode=0755
|
||||
when: node_data | bool
|
||||
- name: setup a cron job to use topbeat-data rotater script daily
|
||||
cron: name="compress old topbeat data" minute="55" hour="23" job="/root/rotate-topbeatdata.sh"
|
||||
when: node_data | bool
|
||||
|
@ -1,63 +1,55 @@
|
||||
---
|
||||
- name: install kibana
|
||||
- name: Install Kibana
|
||||
hosts: kibana
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure Nginx is installed.
|
||||
apt: name={{ item }} state=present update_cache=yes
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
with_items:
|
||||
- nginx
|
||||
- apache2-utils
|
||||
- python-passlib
|
||||
|
||||
- name: create kibana user to access web interface
|
||||
htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644
|
||||
htpasswd:
|
||||
path: "/etc/nginx/htpasswd.users"
|
||||
name: "{{ kibana_username }}"
|
||||
password: "{{ kibana_password }}"
|
||||
owner: root
|
||||
mode: 0644
|
||||
|
||||
- name: Drop Nginx default conf file
|
||||
template:
|
||||
src: templates/nginx_default.j2
|
||||
dest: /etc/nginx/sites-available/default
|
||||
|
||||
- name: Enable and restart nginx
|
||||
service:
|
||||
name: "nginx"
|
||||
enabled: true
|
||||
state: restarted
|
||||
- name: add Elastic search public GPG key
|
||||
apt_key:
|
||||
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
||||
state: "present"
|
||||
- name: enable apt https transport
|
||||
apt: name=apt-transport-https state=present update_cache=yes
|
||||
- name: add kibana repo to apt sources list
|
||||
apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present
|
||||
|
||||
- name: Ensure kibana is installed.
|
||||
apt: name=kibana state=present update_cache=yes
|
||||
apt:
|
||||
name: kibana
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Drop kibana conf file
|
||||
template:
|
||||
src: templates/kibana.yml.j2
|
||||
dest: /opt/kibana/config/kibana.yml
|
||||
mode: "u=rw,g=rw,o=rw"
|
||||
dest: /etc/kibana/kibana.yml
|
||||
mode: "0666"
|
||||
|
||||
- name: Enable and restart kibana
|
||||
service:
|
||||
systemd:
|
||||
name: "kibana"
|
||||
enabled: true
|
||||
state: restarted
|
||||
- name: install metricsbeat
|
||||
apt: name=metricbeat state=present
|
||||
- name: Ensure curl is installed.
|
||||
apt: name=curl state=present
|
||||
- name: Drop metricbeat conf file
|
||||
template:
|
||||
src: templates/metricbeat-kibana.yml.j2
|
||||
dest: /etc/metricbeat/metricbeat.yml
|
||||
- name: import dashboards in elasticsearch
|
||||
command: "metricbeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=[\"localhost:9200\"]'"
|
||||
args:
|
||||
chdir: /root/
|
||||
- name: Enable and restart metricbeat
|
||||
service:
|
||||
name: "metricbeat"
|
||||
enabled: true
|
||||
state: restarted
|
||||
|
||||
|
||||
|
@ -1,46 +1,51 @@
|
||||
---
|
||||
- name: install ElK stack
|
||||
- name: Install Logstash
|
||||
hosts: elastic-logstash
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- name: add Elastic search public GPG key
|
||||
apt_key:
|
||||
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
||||
state: "present"
|
||||
- name: enable apt https transport
|
||||
apt: name=apt-transport-https state=present update_cache=yes
|
||||
- name: add Logstash to apt sources list
|
||||
apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure Logstash is installed.
|
||||
apt: name=logstash state=present update_cache=yes
|
||||
apt:
|
||||
name: logstash
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Drop Logstash conf for beats input
|
||||
template:
|
||||
src: templates/02-beats-input.conf.j2
|
||||
dest: /etc/logstash/conf.d/02-beats-input.conf
|
||||
|
||||
- name: Drop Logstash conf for beats input
|
||||
template:
|
||||
src: templates/10-syslog-filter.conf.j2
|
||||
dest: /etc/logstash/conf.d/10-syslog-filter.conf
|
||||
|
||||
- name: Drop Logstash conf for beats output
|
||||
template:
|
||||
src: templates/30-elasticsearch-output.conf.j2
|
||||
dest: /etc/logstash/conf.d/30-elasticsearch-output.conf
|
||||
- shell: /usr/share/logstash/bin/logstash -t --path.settings /etc/logstash
|
||||
|
||||
- name: Ensure logstash ownership
|
||||
file:
|
||||
path: /var/lib/logstash
|
||||
owner: logstash
|
||||
group: logstash
|
||||
recurse: true
|
||||
|
||||
- name: Load logstash config
|
||||
command: "/usr/share/logstash/bin/logstash -t --path.settings /etc/logstash"
|
||||
register: conf_success
|
||||
- debug: var=conf_success
|
||||
become: yes
|
||||
become_user: logstash
|
||||
|
||||
- name: Print config output
|
||||
debug: var=conf_success
|
||||
|
||||
- name: Enable and restart logstash
|
||||
service:
|
||||
systemd:
|
||||
name: "logstash"
|
||||
enabled: true
|
||||
state: restarted
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,30 +1,77 @@
|
||||
---
|
||||
- name: metricsbeat
|
||||
hosts: hosts
|
||||
- name: Install Metricsbeat
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
|
||||
pre_tasks:
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure Metricsbeat is installed
|
||||
apt:
|
||||
name: metricbeat
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
tasks:
|
||||
- name: add metricsbeat repo to apt sources list
|
||||
apt_repository: repo='deb https://artifacts.elastic.co/packages/6.x/apt stable main' state=present
|
||||
- name: add Elastic search public GPG key (same for Metricsbeat)
|
||||
apt_key:
|
||||
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
||||
state: "present"
|
||||
- name: enable apt-transport-https
|
||||
apt: name=apt-transport-https state=present update_cache=yes
|
||||
- name: Ensure Metricsbeat is installed.
|
||||
apt: name=metricbeat state=present update_cache=yes
|
||||
- name: Check for apache
|
||||
stat:
|
||||
path: /etc/apache2
|
||||
register: apache2
|
||||
|
||||
- name: Check for httpd
|
||||
stat:
|
||||
path: /etc/httpd
|
||||
register: httpd
|
||||
|
||||
- name: Check for nginx
|
||||
stat:
|
||||
path: /etc/nginx/nginx.conf
|
||||
register: nginx
|
||||
|
||||
- name: Check for uwsgi
|
||||
stat:
|
||||
path: /etc/uwsgi
|
||||
register: uwsgi
|
||||
|
||||
- name: Set discovery facts
|
||||
set_fact:
|
||||
apache_enabled: "{{ (apache2.stat.exists | bool) or (httpd.stat.exists | bool) }}"
|
||||
nginx_enabled: "{{ nginx.stat.exists | bool }}"
|
||||
uwsgi_enabled: "{{ uwsgi.stat.exists | bool }}"
|
||||
|
||||
post_tasks:
|
||||
- name: Drop metricbeat conf file
|
||||
template:
|
||||
src: templates/metricbeat.yml.j2
|
||||
dest: /etc/metricbeat/metricbeat.yml
|
||||
tags:
|
||||
- config
|
||||
|
||||
- name: Enable and restart metricbeat
|
||||
service:
|
||||
systemd:
|
||||
name: "metricbeat"
|
||||
enabled: true
|
||||
state: restarted
|
||||
tags:
|
||||
- config
|
||||
|
||||
|
||||
- name: Load Metricsbeat Dashboards
|
||||
hosts: all[0]
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- name: Load templates
|
||||
shell: >-
|
||||
{% set IP_ARR=[] %}
|
||||
{% for host in groups['elastic-logstash'] %}
|
||||
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
|
||||
metricbeat setup
|
||||
-E 'output.logstash.enabled=false'
|
||||
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
|
||||
-e -v
|
||||
|
50
elk_metrics_6x/installPacketbeat.yml
Normal file
50
elk_metrics_6x/installPacketbeat.yml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Install Packetbeat
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
haproxy_ssl: false
|
||||
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
|
||||
pre_tasks:
|
||||
- include_tasks: common_task_install_elk_repo.yml
|
||||
|
||||
- name: Ensure packetbeat is installed
|
||||
apt:
|
||||
name: packetbeat
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
post_tasks:
|
||||
- name: Drop packetbeat conf file
|
||||
template:
|
||||
src: templates/packetbeat.yml.j2
|
||||
dest: /etc/packetbeat/packetbeat.yml
|
||||
|
||||
- name: Enable and restart packetbeat
|
||||
systemd:
|
||||
name: "packetbeat"
|
||||
enabled: true
|
||||
state: restarted
|
||||
|
||||
|
||||
- name: Load Packetbeat Dashboards
|
||||
hosts: all[0]
|
||||
become: true
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- name: Load templates
|
||||
shell: >-
|
||||
{% set IP_ARR=[] %}
|
||||
{% for host in groups['elastic-logstash'] %}
|
||||
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
|
||||
packetbeat setup
|
||||
-E 'output.logstash.enabled=false'
|
||||
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
|
||||
-e -v
|
@ -27,7 +27,7 @@ Copy the env.d file into place
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd openstack-ansible-ops
|
||||
cd openstack-ansible-ops/elk_metrics_6x
|
||||
cp env.d/elk.yml /etc/openstack_deploy/env.d/
|
||||
|
||||
Copy the conf.d file into place
|
||||
@ -53,55 +53,70 @@ install master/data elasticsearch nodes on the elastic-logstash containers
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd /opt/openstack-ansible-ops
|
||||
openstack-ansible installElastic.yml -e elk_hosts=elastic-logstash -e node_master=true -e node_data=true
|
||||
|
||||
Install an Elasticsearch client on the kibana container to serve as a loadbalancer for the Kibana backend server
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
openstack-ansible installElastic.yml -e elk_hosts=kibana -e node_master=false -e node_data=false
|
||||
cd /opt/openstack-ansible-ops/elk_metrics_6x
|
||||
openstack-ansible installElastic.yml
|
||||
|
||||
Install Logstash on all the elastic containers
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd /opt/openstack-ansible-ops/elk_metrics_6x
|
||||
openstack-ansible installLogstash.yml
|
||||
|
||||
Install Kibana, nginx reverse proxy and metricbeat on the kibana container
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd /opt/openstack-ansible-ops/elk_metrics_6x
|
||||
openstack-ansible installKibana.yml
|
||||
|
||||
Conigure haproxy endpoints:
|
||||
|
||||
Edit the /etc/openstack_deploy/user_variables.yml file and add fiel following lines:
|
||||
.. code-block:: bash
|
||||
|
||||
haproxy_extra_services:
|
||||
- service:
|
||||
haproxy_service_name: kibana
|
||||
haproxy_ssl: False
|
||||
haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}"
|
||||
haproxy_port: 81
|
||||
haproxy_balance_type: tcp
|
||||
|
||||
and then run the haproxy-install playbook
|
||||
.. code-block:: bash
|
||||
cd /opt/openstack-ansible/playbooks/
|
||||
openstack-ansible haproxy-install.yml --tags=haproxy-service-config
|
||||
|
||||
|
||||
install Metricbeat everywhere to start shipping metrics to our logstash instances
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
openstack-ansible installMetricbeat.yml
|
||||
cd /opt/openstack-ansible-ops/elk_metrics_6x
|
||||
openstack-ansible installMetricbeat.yml
|
||||
|
||||
Trouble shooting:
|
||||
Optional | conigure haproxy endpoints
|
||||
|
||||
If everything goes bad, you can clean up with the following command:
|
||||
Edit the `/etc/openstack_deploy/user_variables.yml` file and add fiel following lines
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
haproxy_extra_services:
|
||||
- service:
|
||||
haproxy_service_name: kibana
|
||||
haproxy_ssl: False
|
||||
haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}"
|
||||
haproxy_port: 81 # This is set using the "kibana_nginx_port" variable
|
||||
haproxy_balance_type: tcp
|
||||
- service:
|
||||
haproxy_service_name: elastic-logstash
|
||||
haproxy_ssl: False
|
||||
haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}"
|
||||
haproxy_port: 5044 # This is set using the "logstash_beat_input_port" variable
|
||||
haproxy_balance_type: tcp
|
||||
- service:
|
||||
haproxy_service_name: elastic-logstash
|
||||
haproxy_ssl: False
|
||||
haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}"
|
||||
haproxy_port: 9201 # This is set using the "elastic_hap_port" variable
|
||||
haproxy_check_port: 9200 # This is set using the "elastic_port" variable
|
||||
haproxy_backend_port: 9200 # This is set using the "elastic_port" variable
|
||||
haproxy_balance_type: tcp
|
||||
|
||||
Optional | run the haproxy-install playbook
|
||||
|
||||
.. code-block:: bash
|
||||
openstack-ansible lxc-containers-destroy.yml --limit=elastic-logstash_all
|
||||
|
||||
cd /opt/openstack-ansible/playbooks/
|
||||
openstack-ansible haproxy-install.yml --tags=haproxy-service-config
|
||||
|
||||
Trouble shooting
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
If everything goes bad, you can clean up with the following command
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
openstack-ansible lxc-containers-destroy.yml --limit=kibana:elastic-logstash_all
|
||||
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
- name: add reverse proxy to kibana dashboard
|
||||
hosts: kibana
|
||||
become: true
|
||||
tags: nginx-setup
|
||||
vars_files:
|
||||
- vars/variables.yml
|
||||
tasks:
|
||||
- name: Ensure Nginx is installed.
|
||||
apt: name={{ item }} state=present update_cache=yes
|
||||
with_items:
|
||||
- nginx
|
||||
- apache2-utils
|
||||
- python-passlib
|
||||
- name: create kibana user to access web interface
|
||||
htpasswd: path=/etc/nginx/htpasswd.users name={{ kibana_username }} password={{ kibana_password }} owner=root mode=0644
|
||||
- name: Drop Nginx default conf file
|
||||
template:
|
||||
src: templates/nginx_default.j2
|
||||
dest: /etc/nginx/sites-available/default
|
||||
- name: Enable and restart nginx
|
||||
service:
|
||||
name: "nginx"
|
||||
enabled: true
|
||||
state: restarted
|
21
elk_metrics_6x/site.yml
Normal file
21
elk_metrics_6x/site.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# Copyright 2018, Rackspace US, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
- import_playbook: installElastic.yml
|
||||
- import_playbook: installLogstash.yml
|
||||
- import_playbook: installKibana.yml
|
||||
- import_playbook: installMetricbeat.yml
|
||||
- import_playbook: installPacketbeat.yml
|
||||
- import_playbook: installAuditbeat.yml
|
@ -1,9 +1,13 @@
|
||||
{% set IP_ARR=[] %}
|
||||
{% for host in groups['elastic-logstash'] %}
|
||||
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
|
||||
{% endif %}
|
||||
{% endfor -%}
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
|
||||
hosts => [{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
|
||||
sniffing => true
|
||||
manage_template => false
|
||||
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
|
||||
document_type => "%{[@metadata][type]}"
|
||||
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
|
||||
}
|
||||
}
|
||||
|
1016
elk_metrics_6x/templates/auditbeat.yml.j2
Normal file
1016
elk_metrics_6x/templates/auditbeat.yml.j2
Normal file
File diff suppressed because it is too large
Load Diff
@ -41,9 +41,14 @@ http.port: {{ elastic_port }}
|
||||
# Pass an initial list of hosts to perform discovery when new node is started:
|
||||
# The default list of hosts is ["127.0.0.1", "[::1]"]
|
||||
#
|
||||
discovery.zen.ping.unicast.hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] | union(groups['kibana']) %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | join(', ') }}]
|
||||
node.master: {{ node_master | default(true) }}
|
||||
node.data: {{ node_data | default(true) }}
|
||||
{% set IP_ARR=[] %}
|
||||
{% for host in groups['elastic-logstash'] %}
|
||||
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
discovery.zen.ping.unicast.hosts: [{{ IP_ARR | join(', ') }}]
|
||||
node.master: {{ (inventory_hostname == groups['elastic-logstash'][0]) | ternary(true, false) }}
|
||||
node.data: true
|
||||
#
|
||||
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
|
||||
#
|
||||
@ -70,4 +75,3 @@ node.data: {{ node_data | default(true) }}
|
||||
# Require explicit names when deleting indices:
|
||||
#
|
||||
# action.destructive_requires_name: true
|
||||
|
||||
|
@ -12,14 +12,14 @@
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The URL of the Elasticsearch instance to use for all your queries.
|
||||
elasticsearch.url: "http://localhost:{{ elastic_port }}"
|
||||
elasticsearch.url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
|
||||
|
||||
# When this setting’s value is true Kibana uses the hostname specified in the server.host
|
||||
# setting. When the value of this setting is false, Kibana uses the hostname of the host
|
||||
# that connects to this Kibana instance.
|
||||
# elasticsearch.preserveHost: true
|
||||
|
||||
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
|
||||
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
|
||||
# dashboards. Kibana creates a new index if the index doesn’t already exist.
|
||||
# kibana.index: ".kibana"
|
||||
|
||||
@ -28,12 +28,12 @@
|
||||
|
||||
# If your Elasticsearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the Kibana server uses to perform maintenance on the Kibana
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# is proxied through the Kibana server.
|
||||
# elasticsearch.username: "user"
|
||||
# elasticsearch.password: "pass"
|
||||
|
||||
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
|
||||
# Paths to the PEM-format SSL certificate and SSL key files, respectively. These
|
||||
# files enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
# server.ssl.cert: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
@ -43,7 +43,7 @@
|
||||
# elasticsearch.ssl.cert: /path/to/your/client.crt
|
||||
# elasticsearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your Elasticsearch instance.
|
||||
# elasticsearch.ssl.ca: /path/to/your/CA.pem
|
||||
|
||||
@ -54,7 +54,7 @@
|
||||
# the elasticsearch.requestTimeout setting.
|
||||
# elasticsearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# must be a positive integer.
|
||||
# elasticsearch.requestTimeout: 300000
|
||||
|
||||
@ -76,6 +76,6 @@
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
# logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
@ -1,135 +0,0 @@
|
||||
###################### Metricbeat Configuration Example #######################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The metricbeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/metricbeat/index.html
|
||||
|
||||
#========================== Modules configuration ============================
|
||||
|
||||
metricbeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
#==================== Elasticsearch template setting ==========================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 1
|
||||
index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
#================================ General =====================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
|
||||
#============================== Dashboards =====================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here, or by using the `-setup` CLI flag or the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
host: "localhost:5601"
|
||||
|
||||
#============================= Elastic Cloud ==================================
|
||||
|
||||
# These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
#================================ Outputs =====================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
#-------------------------- Elasticsearch output ------------------------------
|
||||
output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
hosts: ["localhost:9200"]
|
||||
|
||||
# Optional protocol and basic auth credentials.
|
||||
#protocol: "https"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
#----------------------------- Logstash output --------------------------------
|
||||
#output.logstash:
|
||||
# The Logstash hosts
|
||||
#hosts: ["localhost:5044"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/etc/pki/client/cert.key"
|
||||
|
||||
#================================ Logging =====================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
#logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publish", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
#============================== Xpack Monitoring ===============================
|
||||
# metricbeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#xpack.monitoring.enabled: false
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well. Any setting that is not set is
|
||||
# automatically inherited from the Elasticsearch output configuration, so if you
|
||||
# have the Elasticsearch output configured, you can simply uncomment the
|
||||
# following line.
|
||||
#xpack.monitoring.elasticsearch:
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
||||
server {
|
||||
listen {{ nginx_port }};
|
||||
listen {{ kibana_nginx_port }};
|
||||
|
||||
server_name {{ server_name }};
|
||||
server_name {{ kibana_server_name }};
|
||||
|
||||
auth_basic "Restricted Access";
|
||||
auth_basic_user_file /etc/nginx/htpasswd.users;
|
||||
@ -12,6 +12,6 @@ server {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
}
|
||||
|
1369
elk_metrics_6x/templates/packetbeat.yml.j2
Normal file
1369
elk_metrics_6x/templates/packetbeat.yml.j2
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
for d in $(find /var/lib/elasticsearch/openstack_elk/nodes/0/indices/ -maxdepth 1 -type d -mtime 5 | grep topbeat); do
|
||||
echo $d
|
||||
tar -zcvf $d.tar.gz $d && rm -r $d
|
||||
mv $d.tar.gz /var/lib/elasticsearch-olddata/;
|
||||
done
|
@ -1,6 +1,6 @@
|
||||
# elastic search vars
|
||||
elastic_interface: "['eth1']"
|
||||
elastic_port: 9200
|
||||
elastic_hap_port: 9201
|
||||
cluster_name: openstack_elk
|
||||
node_name: ${HOSTNAME}
|
||||
|
||||
@ -10,11 +10,8 @@ kibana_interface: 0.0.0.0
|
||||
kibana_port: 5601
|
||||
kibana_username: admin
|
||||
kibana_password: admin
|
||||
kibana_nginx_port: 81
|
||||
kibana_server_name: "{{ ansible_hostname }}"
|
||||
|
||||
nginx_port: 81
|
||||
server_name: server_name
|
||||
|
||||
logstash_ssl_self_signed_subject: "/C=US/ST=Texas/L=San Antonio/O=IT/CN={{ server_name }}/subjectAltName=IP.1={{ elk_server_private_ip }}"
|
||||
# logstash vars
|
||||
logstash_beat_input_port: 5044
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user