diff --git a/elk_metrics_6x/installFilebeat.yml b/elk_metrics_6x/installFilebeat.yml new file mode 100644 index 00000000..4e783a60 --- /dev/null +++ b/elk_metrics_6x/installFilebeat.yml @@ -0,0 +1,155 @@ +--- +- name: Install Filebeat + hosts: all + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + pre_tasks: + - include_tasks: common_task_install_elk_repo.yml + + - name: Ensure Filebeat is installed + apt: + name: "{{ item }}" + state: present + update_cache: true + with_items: + - filebeat + + tasks: + - name: Check for apache + stat: + path: /etc/apache2 + register: apache2 + + - name: Check for auditd + stat: + path: /etc/audit + register: audit + + - name: Check for ceph + stat: + path: /var/log/ceph + register: ceph + + - name: Check for cinder + stat: + path: /var/log/cinder + register: cinder + + - name: Check for glance + stat: + path: /var/log/glance + register: glance + + - name: Check for heat + stat: + path: /var/log/heat + register: heat + + - name: Check for horizon + stat: + path: /var/log/horizon + register: horizon + + - name: Check for httpd + stat: + path: /var/log/httpd + register: httpd + + - name: Check for keystone + stat: + path: /var/log/keystone + register: keystone + + - name: Check for mysql + stat: + path: /var/lib/mysql + register: mysql + + - name: Check for neutron + stat: + path: /var/log/neutron + register: neutron + + - name: Check for nginx + stat: + path: /var/log/nginx + register: nginx + + - name: Check for nova + stat: + path: /var/log/nova + register: nova + + - name: Check for octavia + stat: + path: /var/log/octavia + register: octavia + + - name: Check for swift + stat: + path: /var/log/swift + register: swift + + - name: Check for rabbitmq + stat: + path: /var/lib/rabbitmq + register: rabbitmq + + - name: Set discovery facts + set_fact: + apache_enabled: "{{ (apache2.stat.exists | bool) or (httpd.stat.exists | bool) }}" + nginx_enabled: "{{ nginx.stat.exists | bool }}" + auditd_enabled: "{{ audit.stat.exists | bool }}" + mysql_enabled: "{{ mysql.stat.exists | bool }}" + cinder_enabled: "{{ cinder.stat.exists | bool }}" + glance_enabled: "{{ glance.stat.exists | bool }}" + heat_enabled: "{{ heat.stat.exists | bool }}" + horizon_enabled: "{{ horizon.stat.exists | bool }}" + keystone_enabled: "{{ keystone.stat.exists | bool }}" + neutron_enabled: "{{ neutron.stat.exists | bool }}" + nova_enabled: "{{ nova.stat.exists | bool }}" + octavia_enabled: "{{ octavia.stat.exists | bool }}" + swift_enabled: "{{ swift.stat.exists | bool }}" + rabbitmq_enabled: "{{ rabbitmq.stat.exists | bool }}" + ceph_enabled: "{{ ceph.stat.exists | bool }}" + + post_tasks: + - name: Drop Filebeat conf file + template: + src: templates/filebeat.yml.j2 + dest: /etc/filebeat/filebeat.yml + + - name: Enable and restart Filebeat + systemd: + name: "filebeat" + enabled: true + state: restarted + + +- name: Load Filebeat Dashboards + hosts: all[0] + become: true + vars_files: + - vars/variables.yml + tasks: + - name: Load templates + shell: >- + {% set IP_ARR=[] %} + {% for host in groups['elastic-logstash'] %} + {% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %} + {% endif %} + {% endfor %} + {% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %} + filebeat setup + {{ item }} + -E 'output.logstash.enabled=false' + -E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}' + -e -v + with_items: + - "--template" + - "--dashboards" diff --git a/elk_metrics_6x/templates/filebeat.yml.j2 b/elk_metrics_6x/templates/filebeat.yml.j2 new file mode 100644 index 00000000..05c424e1 --- /dev/null +++ b/elk_metrics_6x/templates/filebeat.yml.j2 @@ -0,0 +1,2031 @@ +######################## Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + + +#========================== Modules configuration ============================ +filebeat.modules: + +#------------------------------- System Module ------------------------------- +- module: system + # Syslog + syslog: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + var.convert_timezone: false + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Authorization logs + auth: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + var.convert_timezone: false + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Apache2 Module ------------------------------ +- module: apache2 + # Access logs + access: + enabled: {{ apache_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Error logs + error: + enabled: {{ apache_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Auditd Module ------------------------------- +- module: auditd + log: + enabled: {{ apache_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Icinga Module ------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#-------------------------------- Kafka Module ------------------------------- +#- module: kafka + # All logs + #log: + #enabled: true + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + +#------------------------------ logstash Module ------------------------------ +#- module: logstash + # logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + #slowlog: + #enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- MySQL Module ------------------------------- +- module: mysql + # Error logs + error: + enabled: {{ mysql_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + var.paths: + - /var/log/mysql_logs + - /var/log/mysql + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#-------------------------------- Nginx Module ------------------------------- +- module: nginx + # Access logs + access: + enabled: {{ nginx_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Error logs + error: + enabled: {{ nginx_enabled | bool }} + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Osquery Module ------------------------------ +- module: osquery + result: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + var.use_namespace: true + +#----------------------------- PostgreSQL Module ----------------------------- +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#-------------------------------- Redis Module ------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: + +#------------------------------- Traefik Module ------------------------------ +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + +#=========================== Filebeat prospectors ============================= + +# List of prospectors to fetch data. +filebeat.prospectors: +# Each - is a prospector. Most options can be set at the prospector level, so +# you can use different prospectors for various configurations. +# Below are the prospector specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ cinder_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/cinder/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - cinder + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ glance_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/glance/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - glance + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ heat_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/heat/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - heat + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ horizon_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/horizon/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - horizon + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ keystone_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/keystone/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - keystone + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ neutron_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/neutron/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - neutron + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ nova_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/nova/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - nova + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ octavia_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/octavia/*.log + + ### Multiline options + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + multiline.pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - oslofmt + - octavia + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ swift_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/swift/account*.log + + ### Multiline options + + # Month Day Time Host Python Module Status + multiline.pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ account-replicator: +[A-Za-z0-9-\ ]+' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - swift + - swift-account + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ swift_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/swift/container*.log + + ### Multiline options + + # Month Day Time Host Python Module Status + multiline.pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ container-replicator: +[A-Za-z0-9-\ ]+' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - swift + - swift-container + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ swift_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/swift/object*.log + + ### Multiline options + + # Month Day Time Host Python Module Status + multiline.pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ object-replicator: +[A-Za-z0-9-\ ]+' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - swift + - swift-object + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ swift_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/swift/proxy*.log + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - openstack + - swift + - swift-proxy + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ rabbitmq_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/rabbitmq/* + + ### Multiline options + + multiline.pattern: '^=' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: true + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - rabbitmq + - infrastructure + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ ceph_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/ceph/ceph-mon.*.log + + ### Multiline options + + multiline.pattern: '^[a-z_]* ' + + # Defines if the pattern set under pattern should be negated or not. Default is false. + multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + multiline.timeout: 5s + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - ceph-mon + - ceph + - infrastructure + +- type: log + + # Change to true to enable this prospector configuration. + enabled: {{ ceph_enabled | bool }} + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/ceph/ceph-osd.*.log + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + tags: + - ceph-osd + - ceph + - infrastructure + +- type: log + + # Change to true to enable this prospector configuration. + enabled: true + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + - /var/log/haproxy/*.log + - /var/log/libvirt/*.log + - /var/log/libvirt/*/*.log + - /var/log/lxc/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # How often the prospector checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + ### Recursive glob configuration + + # Expand "**" patterns into regular glob patterns. + #recursive_glob.enabled: true + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Mutiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # The Ingest Node pipeline ID associated with this prospector. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the + # original for harvesting but will report the symlink name as source. + #symlinks: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # Max number of harvesters that are started in parallel. + # Default is 0 which means unlimited + #harvester_limit: 0 + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close_removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_eof: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_timeout: 0 + + # Defines if prospectors is enabled + #enabled: true + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- type: stdin + +#------------------------- Redis slowlog prospector --------------------------- +# Experimental: Config options for the redis slow log prospector +#- type: redis + #hosts: ["localhost:6379"] + #username: + #password: + #enabled: false + #scan_frequency: 10s + + # Timeout after which time the prospector should return an error + #timeout: 1s + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------ Udp prospector -------------------------------- +# Experimental: Config options for the udp prospector +#- type: udp + + # Maximum size of the message received over UDP + #max_message_size: 10240 + +#========================== Filebeat autodiscover ============================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# or prospectors as they happen. + +#filebeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: busybox +# config: +# - type: log +# paths: +# - /var/lib/docker/containers/${data.docker.container.id}/*.log + +#========================= Filebeat global options ============================ + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: ${path.data}/registry + +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: + +# How long filebeat waits on shutdown for the publisher to finish. +# Default is 0, not waiting. +#filebeat.shutdown_timeout: 0 + +# Enable filebeat config reloading +#filebeat.config: + #prospectors: + #enabled: false + #path: prospectors.d/*.yml + #reload.enabled: true + #reload.period: 10s + #modules: + #enabled: false + #path: modules.d/*.yml + #reload.enabled: true + #reload.period: 10s + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # A value of 0 (the default) ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "filebeat" plus date + # and generates [filebeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ logstash_beat_input_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + loadbalance: true + + # Number of batches to be sent asynchronously to logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # Optional index name. The default index name is set to filebeat + # in all lowercase. + #index: 'filebeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version filebeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is filebeat. + #key: filebeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates + # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every filebeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the filebeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the filebeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the filebeat installation. This is the default base path +# for all the files in which filebeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a filebeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +setup.dashboards.enabled: true + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: filebeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: true + +# Template name. By default the template name is "filebeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.name: "filebeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.pattern: "filebeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: true + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: 3 + codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, filebeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/filebeat + + # The name of the files where the logs are written to. + name: filebeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# filebeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066