
The logstash groks were running in line using the legacy method which uses lexical sorting of all logstash filter files and loads them in order. While this works it makes it so all data has to travel through all filters. This change makes use of the logstash multi-pipeline capabilities using a distributor and fork pattern. This allows data to flow through logstash more quickly and not block whenever there's an issue with an output plugin. Finger-prints using SHA1 when there's a message and UUID when not. This will ensure we're duplicating log entries which will help speed up transations and further reduce the storage required. Change-Id: I38268e33b370da0f1e186ecf65911d4a312c3e6a Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
132 lines
5.2 KiB
YAML
132 lines
5.2 KiB
YAML
---
|
|
|
|
- name: Refresh minimal facts
|
|
setup:
|
|
gather_subset: '!all,!any,network,virtual'
|
|
|
|
# storage node count is equal to the cluster size
|
|
- name: Node count fact
|
|
set_fact:
|
|
storage_node_count: "{{ groups['elastic-logstash'] | length }}"
|
|
tags:
|
|
- always
|
|
|
|
# the elasticserch cluster elects one master from all those which are marked as master-eligible
|
|
# 1 node cluster can only have one master
|
|
# 2 node clusters have 1 master-eligable nodes to avoid split-brain
|
|
# 3 node clusters have 3 master-eligable nodes
|
|
# >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number
|
|
- name: Master node count fact
|
|
set_fact:
|
|
master_node_count: |-
|
|
{% set masters = 0 %}
|
|
{% if (storage_node_count | int) < 3 %}
|
|
{% set masters = 1 %}
|
|
{% elif (storage_node_count | int) == 3 %}
|
|
{% set masters = 3 %}
|
|
{% else %}
|
|
{% set masters = (storage_node_count | int ) // 2 %}
|
|
{% if ((masters | int) % 2 == 0) %}
|
|
{% set masters = (masters | int) + 1 %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{{ masters }}
|
|
tags:
|
|
- always
|
|
|
|
# assign node roles
|
|
# the first 'master_node_count' hosts in groups['elastic-logstash'] become master-eligible nodes
|
|
# the first 'master_node_count' and subsequent alternate hosts in groups['elastic-logstash'] becomes data nodes
|
|
- name: Data nodes fact
|
|
set_fact:
|
|
data_nodes: "{{ (groups['elastic-logstash'][:master_node_count | int] + groups['elastic-logstash'][master_node_count | int::2]) }}"
|
|
master_nodes: "{{ groups['elastic-logstash'][:master_node_count | int] }}"
|
|
coordination_nodes: >-
|
|
{{
|
|
(groups['kibana'] | map('extract', hostvars, 'ansible_host') | list)
|
|
| map('regex_replace', '(.*)' ,'\1:' ~ elastic_port)
|
|
| list
|
|
}}
|
|
zen_nodes: >-
|
|
{{
|
|
(groups['elastic-logstash'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list
|
|
}}
|
|
elasticserch_interface_speed: |-
|
|
{% set default_interface_fact = hostvars[inventory_hostname]['ansible_' + (elastic_data_interface | replace('-', '_'))] %}
|
|
{% set speeds = [] %}
|
|
{% if default_interface_fact['type'] == 'bridge' %}
|
|
{% for interface in default_interface_fact['interfaces'] %}
|
|
{% set interface_fact = hostvars[inventory_hostname]['ansible_' + (interface | replace('-', '_'))] %}
|
|
{% if 'speed' in interface_fact %}
|
|
{% set speed = (interface_fact['speed'] | default(1000)) | string %}
|
|
{% if speed == "-1" %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(speed | int) %}
|
|
{% endif %}
|
|
{% if 'module' in interface_fact %}
|
|
{% set _ = speeds.append((interface_fact['speed'] | default(1000)) | int) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% endfor %}
|
|
{% else %}
|
|
{% if ('module' in default_interface_fact) or (default_interface_fact['type'] == 'bond') %}
|
|
{% set speed = (default_interface_fact['speed'] | default(1000)) | string %}
|
|
{% if speed == "-1" %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% else %}
|
|
{% set _ = speeds.append(speed | int) %}
|
|
{% endif %}
|
|
{% else %}
|
|
{% set _ = speeds.append(1000) %}
|
|
{% endif %}
|
|
{% endif %}
|
|
{% set interface_speed = ((speeds | min) * 0.20) | int %}
|
|
{{ ((interface_speed | int) > 750) | ternary(750, interface_speed) }}
|
|
tags:
|
|
- always
|
|
|
|
- name: Set data node details
|
|
set_fact:
|
|
elasticsearch_data_node_details: >-
|
|
{{
|
|
(data_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ elastic_port) | list
|
|
}}
|
|
logstash_data_node_details: >-
|
|
{{
|
|
(data_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ logstash_beat_input_port) | list
|
|
}}
|
|
|
|
# based on the assignment of roles to hosts, set per host booleans
|
|
- name: Node enablement
|
|
set_fact:
|
|
master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}"
|
|
data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}"
|
|
elastic_thread_pool_size: "{{ ((ansible_processor_cores | int) >= 24) | ternary(24, ansible_processor_cores) }}"
|
|
tags:
|
|
- always
|
|
|
|
# Set a data node facts. The data nodes, in the case of elasticsearch are also
|
|
# ingest nodes.
|
|
- name: Set data nodes
|
|
set_fact:
|
|
elasticsearch_number_of_replicas: "{{ ((data_nodes | length) > 1) | ternary(((data_nodes | length) > 2 | ternary(2, 1)), 0) }}"
|
|
elasticsearch_data_hosts: |-
|
|
{% set nodes = elasticsearch_data_node_details %}
|
|
{% if inventory_hostname in data_nodes %}
|
|
{% set _ = nodes.insert(0, '127.0.0.1:' ~ elastic_port) %}
|
|
{% endif %}
|
|
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
|
{{ data_hosts }}
|
|
logstash_data_hosts: |-
|
|
{% set nodes = logstash_data_node_details %}
|
|
{% if inventory_hostname in data_nodes %}
|
|
{% set _ = nodes.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %}
|
|
{% endif %}
|
|
{% set data_hosts = nodes | shuffle(seed=inventory_hostname) %}
|
|
{{ data_hosts }}
|
|
tags:
|
|
- always
|