From 5e968441238f388f171669e2b080709b10ff1ca1 Mon Sep 17 00:00:00 2001 From: Georgina Shippey Date: Tue, 9 Jul 2019 10:18:59 +0100 Subject: [PATCH] Duplicate of elk_metrics_6x to elk_metrics_7x Change-Id: I92a894e31f725a20c684165f93dd4c34b9c8b450 --- elk_metrics_7x/README.rst | 630 ++++++++ elk_metrics_7x/ansible-role-requirements.yml | 13 + .../assets/Elastic-Stack-Diagram.svg | 2 + elk_metrics_7x/assets/Elastic-dataflow.svg | 2 + .../openstack-kibana-custom-dashboard.png | Bin 0 -> 121999 bytes elk_metrics_7x/bootstrap-embedded-ansible.sh | 1 + elk_metrics_7x/conf.d/elk.yml | 27 + elk_metrics_7x/createElasticIndexes.yml | 251 ++++ elk_metrics_7x/env.d/elk.yml | 53 + elk_metrics_7x/fieldRefresh.yml | 101 ++ elk_metrics_7x/installAPMserver.yml | 51 + elk_metrics_7x/installAuditbeat.yml | 52 + elk_metrics_7x/installCurator.yml | 30 + elk_metrics_7x/installElastic.yml | 27 + elk_metrics_7x/installFilebeat.yml | 52 + elk_metrics_7x/installHeartbeat.yml | 66 + elk_metrics_7x/installJournalbeat.yml | 102 ++ elk_metrics_7x/installKibana.yml | 26 + elk_metrics_7x/installLogstash.yml | 26 + elk_metrics_7x/installMetricbeat.yml | 52 + elk_metrics_7x/installMonitorStack.yml | 297 ++++ elk_metrics_7x/installPacketbeat.yml | 52 + .../elastic_apm_server/defaults/main.yml | 19 + .../elastic_apm_server/handlers/main.yml | 33 + .../roles/elastic_apm_server/meta/main.yml | 35 + .../roles/elastic_apm_server/tasks/main.yml | 81 + .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/apm-server.yml.j2 | 145 ++ .../roles/elastic_apm_server/vars/redhat.yml | 17 + .../roles/elastic_apm_server/vars/suse.yml | 17 + .../roles/elastic_apm_server/vars/ubuntu.yml | 17 + .../roles/elastic_auditbeat/defaults/main.yml | 16 + .../roles/elastic_auditbeat/handlers/main.yml | 33 + .../roles/elastic_auditbeat/meta/main.yml | 35 + .../roles/elastic_auditbeat/tasks/main.yml | 112 ++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/auditbeat.yml.j2 | 845 +++++++++++ .../roles/elastic_auditbeat/vars/redhat.yml | 18 + .../roles/elastic_auditbeat/vars/suse.yml | 18 + .../roles/elastic_auditbeat/vars/ubuntu.yml | 18 + .../elastic_beat_setup/defaults/main.yml | 30 + .../roles/elastic_beat_setup/meta/main.yml | 35 + .../roles/elastic_beat_setup/tasks/main.yml | 72 + .../roles/elastic_curator/handlers/main.yml | 25 + .../roles/elastic_curator/meta/main.yml | 34 + .../elastic_curator/tasks/curator_systemd.yml | 46 + .../elastic_curator/tasks/curator_upstart.yml | 32 + .../roles/elastic_curator/tasks/main.yml | 103 ++ .../templates/curator-actions-age.yml.j2 | 65 + .../templates/curator-actions-size.yml.j2 | 63 + .../elastic_curator/templates/curator.yml.j2 | 32 + .../roles/elastic_curator/vars/redhat.yml | 17 + .../roles/elastic_curator/vars/suse.yml | 17 + .../elastic_curator/vars/ubuntu-14.04.yml | 17 + .../roles/elastic_curator/vars/ubuntu.yml | 18 + .../elastic_data_hosts/defaults/main.yml | 17 + .../roles/elastic_data_hosts/meta/main.yml | 33 + .../roles/elastic_data_hosts/tasks/main.yml | 41 + .../vars/data-node-variables.yml | 204 +++ .../elastic_dependencies/defaults/main.yml | 46 + .../roles/elastic_dependencies/meta/main.yml | 34 + .../roles/elastic_dependencies/tasks/main.yml | 238 +++ .../templates/logrotate.j2 | 12 + .../elastic_dependencies/vars/redhat.yml | 17 + .../roles/elastic_dependencies/vars/suse.yml | 17 + .../elastic_dependencies/vars/ubuntu.yml | 17 + .../vars/vars_elasticsearch.yml | 17 + .../vars/vars_logstash.yml | 17 + .../roles/elastic_filebeat/defaults/main.yml | 284 ++++ .../roles/elastic_filebeat/handlers/main.yml | 33 + .../roles/elastic_filebeat/meta/main.yml | 35 + .../roles/elastic_filebeat/tasks/main.yml | 112 ++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/filebeat.yml.j2 | 938 ++++++++++++ .../roles/elastic_filebeat/vars/redhat.yml | 17 + .../roles/elastic_filebeat/vars/suse.yml | 17 + .../roles/elastic_filebeat/vars/ubuntu.yml | 17 + .../roles/elastic_heartbeat/defaults/main.yml | 16 + .../roles/elastic_heartbeat/handlers/main.yml | 33 + .../roles/elastic_heartbeat/meta/main.yml | 35 + .../roles/elastic_heartbeat/tasks/main.yml | 118 ++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/heartbeat.yml.j2 | 951 ++++++++++++ .../roles/elastic_heartbeat/vars/redhat.yml | 17 + .../roles/elastic_heartbeat/vars/suse.yml | 17 + .../roles/elastic_heartbeat/vars/ubuntu.yml | 17 + .../elastic_journalbeat/defaults/main.yml | 16 + .../elastic_journalbeat/handlers/main.yml | 25 + .../roles/elastic_journalbeat/meta/main.yml | 35 + .../roles/elastic_journalbeat/tasks/main.yml | 118 ++ .../templates/journalbeat.yml.j2 | 796 ++++++++++ .../roles/elastic_journalbeat/vars/redhat.yml | 17 + .../roles/elastic_journalbeat/vars/suse.yml | 17 + .../roles/elastic_journalbeat/vars/ubuntu.yml | 17 + .../roles/elastic_kibana/defaults/main.yml | 26 + .../roles/elastic_kibana/handlers/main.yml | 39 + .../roles/elastic_kibana/meta/main.yml | 34 + .../roles/elastic_kibana/tasks/main.yml | 88 ++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../elastic_kibana/templates/kibana.yml.j2 | 92 ++ .../elastic_kibana/templates/nginx_default.j2 | 19 + .../roles/elastic_kibana/vars/redhat.yml | 20 + .../roles/elastic_kibana/vars/suse.yml | 21 + .../roles/elastic_kibana/vars/ubuntu.yml | 21 + .../roles/elastic_logstash/defaults/main.yml | 102 ++ .../roles/elastic_logstash/handlers/main.yml | 33 + .../roles/elastic_logstash/meta/main.yml | 39 + .../tasks/logstash_arcsight.yml | 62 + .../tasks/logstash_kafka_ssl.yml | 24 + .../roles/elastic_logstash/tasks/main.yml | 250 ++++ .../roles/elastic_logstash/templates/extras | 10 + .../templates/logstash.yml.j2 | 259 ++++ .../systemd.logstash-mem-queue.conf.j2 | 2 + .../roles/elastic_logstash/vars/redhat.yml | 20 + .../roles/elastic_logstash/vars/suse.yml | 20 + .../roles/elastic_logstash/vars/ubuntu.yml | 20 + .../elastic_metricbeat/defaults/main.yml | 24 + .../elastic_metricbeat/handlers/main.yml | 33 + .../roles/elastic_metricbeat/meta/main.yml | 35 + .../roles/elastic_metricbeat/tasks/main.yml | 335 +++++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/apache-status.conf.j2 | 7 + .../templates/metricbeat.yml.j2 | 1297 +++++++++++++++++ .../templates/nginx-status.conf.j2 | 9 + .../roles/elastic_metricbeat/vars/redhat.yml | 18 + .../roles/elastic_metricbeat/vars/suse.yml | 18 + .../roles/elastic_metricbeat/vars/ubuntu.yml | 18 + .../elastic_packetbeat/defaults/main.yml | 16 + .../elastic_packetbeat/handlers/main.yml | 33 + .../roles/elastic_packetbeat/meta/main.yml | 35 + .../roles/elastic_packetbeat/tasks/main.yml | 103 ++ .../tasks/systemd.general-overrides.conf.j2 | 1 + .../templates/packetbeat.yml.j2 | 1202 +++++++++++++++ .../roles/elastic_packetbeat/vars/redhat.yml | 18 + .../roles/elastic_packetbeat/vars/suse.yml | 18 + .../roles/elastic_packetbeat/vars/ubuntu.yml | 18 + .../elastic_repositories/defaults/main.yml | 20 + .../roles/elastic_repositories/meta/main.yml | 33 + .../tasks/elastic_apt_repos.yml | 42 + .../tasks/elastic_yum_repos.yml | 58 + .../tasks/elastic_zypper_repos.yml | 39 + .../roles/elastic_repositories/tasks/main.yml | 40 + .../elastic_repositories/vars/redhat.yml | 24 + .../roles/elastic_repositories/vars/suse.yml | 20 + .../vars/ubuntu-14.04.yml | 26 + .../elastic_repositories/vars/ubuntu.yml | 23 + .../roles/elastic_retention/defaults/main.yml | 118 ++ .../roles/elastic_retention/meta/main.yml | 34 + .../roles/elastic_retention/tasks/main.yml | 104 ++ .../calculate_index_retention_default.yml | 58 + .../roles/elastic_rollup/defaults/main.yml | 16 + .../roles/elastic_rollup/meta/main.yml | 34 + .../roles/elastic_rollup/tasks/main.yml | 106 ++ .../roles/elasticsearch/defaults/main.yml | 33 + .../roles/elasticsearch/handlers/main.yml | 33 + .../roles/elasticsearch/meta/main.yml | 39 + .../tasks/elasticsearch_nfs_setup.yml | 29 + .../tasks/elasticsearch_plugins.yml | 26 + .../roles/elasticsearch/tasks/main.yml | 140 ++ .../templates/elasticsearch.yml.j2 | 146 ++ .../templates/es-log4j2.properties.j2 | 85 ++ .../systemd.elasticsearch-overrides.conf.j2 | 14 + .../systemd.general-overrides.conf.j2 | 1 + .../roles/elasticsearch/vars/redhat.yml | 20 + .../roles/elasticsearch/vars/suse.yml | 20 + .../roles/elasticsearch/vars/ubuntu.yml | 20 + .../roles/elasticsearch/vars/vars_default.yml | 16 + .../roles/elasticsearch/vars/vars_kibana.yml | 26 + elk_metrics_7x/setupKibanaDashboard.yml | 86 ++ elk_metrics_7x/showElasticCluster.yml | 64 + elk_metrics_7x/site-beats-core.yml | 24 + elk_metrics_7x/site-elka.yml | 18 + elk_metrics_7x/site.yml | 17 + elk_metrics_7x/templates/_macros.j2 | 484 ++++++ elk_metrics_7x/templates/jvm.options.j2 | 95 ++ .../templates/logstash-pipelines.yml.j2 | 597 ++++++++ .../templates/openstack-log-dashboard.json | 107 ++ .../systemd.general-overrides.conf.j2 | 12 + elk_metrics_7x/tests/_container-setup.yml | 73 + elk_metrics_7x/tests/_key-setup.yml | 41 + .../tests/ansible-role-requirements.yml | 33 + elk_metrics_7x/tests/functional.yml | 161 ++ .../inventory/test-container-inventory.yml | 54 + .../tests/inventory/test-metal-inventory.yml | 24 + elk_metrics_7x/tests/manual-test.rc | 16 + elk_metrics_7x/tests/post-run.yml | 27 + elk_metrics_7x/tests/run-cleanup.sh | 56 + elk_metrics_7x/tests/run-setup.yml | 53 + elk_metrics_7x/tests/run-tests.sh | 46 + elk_metrics_7x/tests/test-vars.yml | 24 + elk_metrics_7x/tests/test.yml | 18 + elk_metrics_7x/tests/testAPI.yml | 123 ++ elk_metrics_7x/tests/testLayout.yml | 62 + elk_metrics_7x/vars/variables.yml | 378 +++++ 194 files changed, 16967 insertions(+) create mode 100644 elk_metrics_7x/README.rst create mode 100644 elk_metrics_7x/ansible-role-requirements.yml create mode 100644 elk_metrics_7x/assets/Elastic-Stack-Diagram.svg create mode 100644 elk_metrics_7x/assets/Elastic-dataflow.svg create mode 100644 elk_metrics_7x/assets/openstack-kibana-custom-dashboard.png create mode 120000 elk_metrics_7x/bootstrap-embedded-ansible.sh create mode 100644 elk_metrics_7x/conf.d/elk.yml create mode 100644 elk_metrics_7x/createElasticIndexes.yml create mode 100644 elk_metrics_7x/env.d/elk.yml create mode 100644 elk_metrics_7x/fieldRefresh.yml create mode 100644 elk_metrics_7x/installAPMserver.yml create mode 100644 elk_metrics_7x/installAuditbeat.yml create mode 100644 elk_metrics_7x/installCurator.yml create mode 100644 elk_metrics_7x/installElastic.yml create mode 100644 elk_metrics_7x/installFilebeat.yml create mode 100644 elk_metrics_7x/installHeartbeat.yml create mode 100644 elk_metrics_7x/installJournalbeat.yml create mode 100644 elk_metrics_7x/installKibana.yml create mode 100644 elk_metrics_7x/installLogstash.yml create mode 100644 elk_metrics_7x/installMetricbeat.yml create mode 100644 elk_metrics_7x/installMonitorStack.yml create mode 100644 elk_metrics_7x/installPacketbeat.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_apm_server/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_apm_server/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_apm_server/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_auditbeat/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_auditbeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_beat_setup/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_beat_setup/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_beat_setup/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/tasks/curator_systemd.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/tasks/curator_upstart.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/templates/curator-actions-age.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_curator/templates/curator-actions-size.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_curator/templates/curator.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_curator/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/vars/ubuntu-14.04.yml create mode 100644 elk_metrics_7x/roles/elastic_curator/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_data_hosts/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_data_hosts/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_data_hosts/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_data_hosts/vars/data-node-variables.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/templates/logrotate.j2 create mode 100644 elk_metrics_7x/roles/elastic_dependencies/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/vars/vars_elasticsearch.yml create mode 100644 elk_metrics_7x/roles/elastic_dependencies/vars/vars_logstash.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_filebeat/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_filebeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_filebeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_heartbeat/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_heartbeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_journalbeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_kibana/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_kibana/templates/kibana.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_kibana/templates/nginx_default.j2 create mode 100644 elk_metrics_7x/roles/elastic_kibana/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_kibana/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/tasks/logstash_arcsight.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/tasks/logstash_kafka_ssl.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/templates/extras create mode 100644 elk_metrics_7x/roles/elastic_logstash/templates/logstash.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_logstash/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_logstash/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_metricbeat/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/templates/apache-status.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/templates/nginx-status.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_metricbeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/tasks/main.yml create mode 120000 elk_metrics_7x/roles/elastic_packetbeat/tasks/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_packetbeat/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/tasks/elastic_apt_repos.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/tasks/elastic_yum_repos.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/tasks/elastic_zypper_repos.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/vars/ubuntu-14.04.yml create mode 100644 elk_metrics_7x/roles/elastic_repositories/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elastic_retention/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_retention/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_retention/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elastic_retention/vars/calculate_index_retention_default.yml create mode 100644 elk_metrics_7x/roles/elastic_rollup/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elastic_rollup/meta/main.yml create mode 100644 elk_metrics_7x/roles/elastic_rollup/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/defaults/main.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/handlers/main.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/meta/main.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_nfs_setup.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_plugins.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/tasks/main.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/templates/elasticsearch.yml.j2 create mode 100644 elk_metrics_7x/roles/elasticsearch/templates/es-log4j2.properties.j2 create mode 100644 elk_metrics_7x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 create mode 120000 elk_metrics_7x/roles/elasticsearch/templates/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/roles/elasticsearch/vars/redhat.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/vars/suse.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/vars/ubuntu.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/vars/vars_default.yml create mode 100644 elk_metrics_7x/roles/elasticsearch/vars/vars_kibana.yml create mode 100644 elk_metrics_7x/setupKibanaDashboard.yml create mode 100644 elk_metrics_7x/showElasticCluster.yml create mode 100644 elk_metrics_7x/site-beats-core.yml create mode 100644 elk_metrics_7x/site-elka.yml create mode 100644 elk_metrics_7x/site.yml create mode 100644 elk_metrics_7x/templates/_macros.j2 create mode 100644 elk_metrics_7x/templates/jvm.options.j2 create mode 100644 elk_metrics_7x/templates/logstash-pipelines.yml.j2 create mode 100644 elk_metrics_7x/templates/openstack-log-dashboard.json create mode 100644 elk_metrics_7x/templates/systemd.general-overrides.conf.j2 create mode 100644 elk_metrics_7x/tests/_container-setup.yml create mode 100644 elk_metrics_7x/tests/_key-setup.yml create mode 100644 elk_metrics_7x/tests/ansible-role-requirements.yml create mode 100644 elk_metrics_7x/tests/functional.yml create mode 100644 elk_metrics_7x/tests/inventory/test-container-inventory.yml create mode 100644 elk_metrics_7x/tests/inventory/test-metal-inventory.yml create mode 100644 elk_metrics_7x/tests/manual-test.rc create mode 100644 elk_metrics_7x/tests/post-run.yml create mode 100755 elk_metrics_7x/tests/run-cleanup.sh create mode 100644 elk_metrics_7x/tests/run-setup.yml create mode 100755 elk_metrics_7x/tests/run-tests.sh create mode 100644 elk_metrics_7x/tests/test-vars.yml create mode 100644 elk_metrics_7x/tests/test.yml create mode 100644 elk_metrics_7x/tests/testAPI.yml create mode 100644 elk_metrics_7x/tests/testLayout.yml create mode 100644 elk_metrics_7x/vars/variables.yml diff --git a/elk_metrics_7x/README.rst b/elk_metrics_7x/README.rst new file mode 100644 index 00000000..cc0e40f1 --- /dev/null +++ b/elk_metrics_7x/README.rst @@ -0,0 +1,630 @@ +Install ELK with beats to gather metrics +######################################## +:tags: openstack, ansible + +.. +About this repository +--------------------- + +This set of playbooks will deploy an elastic stack cluster (Elasticsearch, +Logstash, Kibana) with beats to gather metrics from hosts and store them into +the elastic stack. + +**These playbooks require Ansible 2.5+.** + +Highlevel overview of the Elastic-Stack infrastructure these playbooks will +build and operate against. + +.. image:: assets/Elastic-Stack-Diagram.svg + :scale: 50 % + :alt: Elasticsearch Architecture Diagram + :align: center + +OpenStack-Ansible Integration +----------------------------- + +These playbooks can be used as standalone inventory or as an integrated part of +an OpenStack-Ansible deployment. For a simple example of standalone inventory, +see [test-inventory.yml](tests/inventory/test-inventory.yml). + + +Optional | Load balancer configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Configure the Elasticsearch endpoints: + While the Elastic stack cluster does not need a load balancer to scale, it is + useful when accessing the Elasticsearch cluster using external tooling. Tools + like OSProfiler, Grafana, etc will all benefit from being able to interact + with Elasticsearch using the load balancer. This provides better fault + tolerance especially when compared to connecting to a single node. + The following section can be added to the `haproxy_extra_services` list to + create an Elasticsearch backend. The ingress port used to connect to + Elasticsearch is **9201**. The backend port is **9200**. If this backend is + setup make sure you set the `internal_lb_vip_address` on the CLI or within a + known variable file which will be sourced at runtime. If using HAProxy, edit + the `/etc/openstack_deploy/user_variables.yml` file and add the following + lines. + +.. code-block:: yaml + + haproxy_extra_services: + - service: + haproxy_service_name: elastic-logstash + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['Kibana'] | default([]) }}" # Kibana nodes are also Elasticsearch coordination nodes + haproxy_port: 9201 # This is set using the "elastic_hap_port" variable + haproxy_check_port: 9200 # This is set using the "elastic_port" variable + haproxy_backend_port: 9200 # This is set using the "elastic_port" variable + haproxy_balance_type: tcp + + +Configure the Kibana endpoints: + It is recommended to use a load balancer with Kibana. Like Elasticsearch, a + load balancer is not required however without one users will need to directly + connect to a single Kibana node to access the dashboard. If a load balancer is + present it can provide a highly available address for users to access a pool + of Kibana nodes which will provide a much better user experience. If using + HAProxy, edit the `/etc/openstack_deploy/user_variables.yml` file and add the + following lines. + +.. code-block:: yaml + + haproxy_extra_services: + - service: + haproxy_service_name: Kibana + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['Kibana'] | default([]) }}" + haproxy_port: 81 # This is set using the "Kibana_nginx_port" variable + haproxy_balance_type: tcp + +Configure the APM endpoints: + It is recommented to use a load balancer for submitting Application + Performance Monitoring data to the APM server. A load balancer will provide + a highly available address which APM clients can use to connect to a pool of + APM nodes. If using HAProxy, edit the `/etc/openstack_deploy/user_variables.yml` + and add the following lines + +.. code-block:: yaml + + haproxy_extra_services: + - service: + haproxy_service_name: apm-server + haproxy_ssl: False + haproxy_backend_nodes: "{{ groups['apm-server'] | default([]) }}" + haproxy_port: 8200 # this is set using the "apm_port" variable + haproxy_balance_type: tcp + +Optional | add OSProfiler to an OpenStack-Ansible deployment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To initialize the `OSProfiler` module within openstack the following overrides +can be applied to the to a user variables file. The hmac key needs to be defined +consistently throughout the environment. + +Full example to initialize the `OSProfiler` modules throughout an +OpenStack-Ansible deployment. + +.. code-block:: yaml + + profiler_overrides: &os_profiler + profiler: + enabled: true + trace_sqlalchemy: true + hmac_keys: "UNIQUE_HMACKEY" # This needs to be set consistently throughout the deployment + connection_string: "Elasticsearch://{{ internal_lb_vip_address }}:9201" + es_doc_type: "notification" + es_scroll_time: "2m" + es_scroll_size: "10000" + filter_error_trace: "false" + + aodh_aodh_conf_overrides: *os_profiler + barbican_config_overrides: *os_profiler + ceilometer_ceilometer_conf_overrides: *os_profiler + cinder_cinder_conf_overrides: *os_profiler + designate_designate_conf_overrides: *os_profiler + glance_glance_api_conf_overrides: *os_profiler + gnocchi_conf_overrides: *os_profiler + heat_heat_conf_overrides: *os_profiler + horizon_config_overrides: *os_profiler + ironic_ironic_conf_overrides: *os_profiler + keystone_keystone_conf_overrides: *os_profiler + magnum_config_overrides: *os_profiler + neutron_neutron_conf_overrides: *os_profiler + nova_nova_conf_overrides: *os_profiler + octavia_octavia_conf_overrides: *os_profiler + rally_config_overrides: *os_profiler + sahara_conf_overrides: *os_profiler + swift_swift_conf_overrides: *os_profiler + tacker_tacker_conf_overrides: *os_profiler + trove_config_overrides: *os_profiler + + +If a deployer wishes to use multiple keys they can do so by with comma separated +list. + +.. code-block:: yaml + + profiler_overrides: &os_profiler + profiler: + hmac_keys: "key1,key2" + + +To add the `OSProfiler` section to an exist set of overrides, the `yaml` section +can be added or dynamcally appended to a given hash using `yaml` tags. + +.. code-block:: yaml + + profiler_overrides: &os_profiler + profiler: + enabled: true + hmac_keys: "UNIQUE_HMACKEY" # This needs to be set consistently throughout the deployment + connection_string: "Elasticsearch://{{ internal_lb_vip_address }}:9201" + es_doc_type: "notification" + es_scroll_time: "2m" + es_scroll_size: "10000" + filter_error_trace: "false" + + # Example to merge the os_profiler tag to into an existing override hash + nova_nova_conf_overrides: + section1_override: + key: "value" + <<: *os_profiler + + +While the `osprofiler` and `Elasticsearch` libraries should be installed +within all virtual environments by default, it's possible they're missing +within a given deployment. To install these dependencies throughout the +cluster without having to invoke a *repo-build* run the following *adhoc* +Ansible command can by used. + + The version of the Elasticsearch python library should match major version of + of Elasticsearch being deployed within the environment. + +.. code-block:: bash + + ansible -m shell -a 'find /openstack/venvs/* -maxdepth 0 -type d -exec {}/bin/pip install osprofiler "elasticsearch>=6.0.0,<7.0.0" --isolated \;' all + + +Once the overrides are in-place the **openstack-ansible** playbooks will need to +be rerun. To simply inject these options into the system a deployer will be able +to use the `*-config` tags that are apart of all `os_*` roles. The following +example will run the **config** tag on **ALL** openstack playbooks. + +.. code-block:: bash + + openstack-ansible setup-openstack.yml --tags "$(cat setup-openstack.yml | grep -wo 'os-.*' | awk -F'-' '{print $2 "-config"}' | tr '\n' ',')" + + +Once the `OSProfiler` module has been initialized tasks can be profiled on +demand by using the `--profile` or `--os-profile` switch in the various +openstack clients along with one of the given hmac keys defined. + +Legacy profile example command. + +.. code-block:: bash + + glance --profile key1 image-list + + +Modern profile example command, requires `python-openstackclient >= 3.4.1` and +the `osprofiler` library. + +.. code-block:: bash + + openstack --os-profile key2 image list + + +If the client library is not installed in the same path as the +`python-openstackclient` client, run the following command to install the +required library. + +.. code-block:: bash + + pip install osprofiler + + +Optional | run the haproxy-install playbook +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + cd /opt/openstack-ansible/playbooks/ + openstack-ansible haproxy-install.yml --tags=haproxy-service-config + + +Setup | system configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Clone the elk-osa repo + +.. code-block:: bash + + cd /opt + git clone https://github.com/openstack/openstack-ansible-ops + +Copy the env.d file into place + +.. code-block:: bash + + cd /opt/openstack-ansible-ops/elk_metrics_6x + cp env.d/elk.yml /etc/openstack_deploy/env.d/ + +Copy the conf.d file into place + +.. code-block:: bash + + cp conf.d/elk.yml /etc/openstack_deploy/conf.d/ + +In **elk.yml**, list your logging hosts under elastic-logstash_hosts to create +the Elasticsearch cluster in multiple containers and one logging host under +`kibana_hosts` to create the Kibana container + +.. code-block:: bash + + vi /etc/openstack_deploy/conf.d/elk.yml + +Create the containers + +.. code-block:: bash + + cd /opt/openstack-ansible/playbooks + openstack-ansible lxc-containers-create.yml --limit elk_all + + +Deploying | Installing with embedded Ansible +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If this is being executed on a system that already has Ansible installed but is +incompatible with these playbooks the script `bootstrap-embedded-ansible.sh` can +be sourced to grab an embedded version of Ansible prior to executing the +playbooks. + +.. code-block:: bash + + source bootstrap-embedded-ansible.sh + + +Deploying | Manually resolving the dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This playbook has external role dependencies. If Ansible is not installed with +the `bootstrap-ansible.sh` script these dependencies can be resolved with the +``ansible-galaxy`` command and the ``ansible-role-requirements.yml`` file. + +* Example galaxy execution + +.. code-block:: bash + + ansible-galaxy install -r ansible-role-requirements.yml + + +Once the dependencies are set make sure to set the action plugin path to the +location of the config_template action directory. This can be done using the +environment variable `ANSIBLE_ACTION_PLUGINS` or through the use of an +`ansible.cfg` file. + + +Deploying | The environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Install master/data Elasticsearch nodes on the elastic-logstash containers, +deploy logstash, deploy Kibana, and then deploy all of the service beats. + +.. code-block:: bash + + cd /opt/openstack-ansible-ops/elk_metrics_6x + ansible-playbook site.yml $USER_VARS + + +* The `openstack-ansible` command can be used if the version of ansible on the + system is greater than **2.5**. This will automatically pick up the necessary + group_vars for hosts in an OSA deployment. + +* If required add ``-e@/opt/openstack-ansible/inventory/group_vars/all/all.yml`` + to import sufficient OSA group variables to define the OpenStack release. + Journalbeat will then deploy onto all hosts/containers for releases prior to + Rocky, and hosts only for Rocky onwards. If the variable ``openstack_release`` + is undefined the default behaviour is to deploy Journalbeat to hosts only. + +* Alternatively if using the embedded ansible, create a symlink to include all + of the OSA group_vars. These are not available by default with the embedded + ansible and can be symlinked into the ops repo. + +.. code-block:: bash + + ln -s /opt/openstack-ansible/inventory/group_vars /opt/openstack-ansible-ops/elk_metrics_6x/group_vars + + +The individual playbooks found within this repository can be independently run +at anytime. + +Architecture | Data flow +^^^^^^^^^^^^^^^^^^^^^^^^ + +This diagram outlines the data flow from within an Elastic-Stack deployment. + +.. image:: assets/Elastic-dataflow.svg + :scale: 50 % + :alt: Elastic-Stack Data Flow Diagram + :align: center + +Optional | Enable uwsgi stats +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Config overrides can be used to make uwsgi stats available on unix +domain sockets. Any /tmp/*uwsgi-stats.sock will be picked up by Metricsbeat. + +.. code-block:: yaml + + keystone_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/keystone-uwsgi-stats.sock" + + cinder_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/cinder-api-uwsgi-stats.sock" + + glance_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/glance-api-uwsgi-stats.sock" + + heat_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/heat-api-uwsgi-stats.sock" + + heat_api_cfn_init_overrides: + uwsgi: + stats: "/tmp/heat-api-cfn-uwsgi-stats.sock" + + nova_api_metadata_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/nova-api-metadata-uwsgi-stats.sock" + + nova_api_os_compute_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/nova-api-os-compute-uwsgi-stats.sock" + + nova_placement_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/nova-placement-uwsgi-stats.sock" + + octavia_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/octavia-api-uwsgi-stats.sock" + + sahara_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/sahara-api-uwsgi-stats.sock" + + ironic_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/ironic-api-uwsgi-stats.sock" + + magnum_api_uwsgi_ini_overrides: + uwsgi: + stats: "/tmp/magnum-api-uwsgi-stats.sock" + +Rerun all of the **openstack-ansible** playbooks to enable these stats. Use +the `${service_name}-config` tags on all of the `os_*` roles. It's possible to +auto-generate the tags list with the following command. + +.. code-block:: bash + + openstack-ansible setup-openstack.yml --tags "$(cat setup-openstack.yml | grep -wo 'os-.*' | awk -F'-' '{print $2 "-config"}' | tr '\n' ',')" + + +Optional | add Kafka Output format +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To send data from Logstash to Kafka create the `logstash_kafka_options` +variable. This variable will be used as a generator and create a Kafka output +configuration file using the key/value pairs as options. + +.. code-block:: yaml + + logstash_kafka_options: + codec: json + topic_id: "elk_kafka" + ssl_key_password: "{{ logstash_kafka_ssl_key_password }}" + ssl_keystore_password: "{{ logstash_kafka_ssl_keystore_password }}" + ssl_keystore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_keystore_location | basename }}" + ssl_truststore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_truststore_location | basename }}" + ssl_truststore_password: "{{ logstash_kafka_ssl_truststore_password }}" + bootstrap_servers: + - server1.local:9092 + - server2.local:9092 + - server3.local:9092 + client_id: "elk_metrics_6x" + compression_type: "gzip" + security_protocol: "SSL" + id: "UniqueOutputID" + + +For a complete list of all options available within the Logstash Kafka output +plugin please review the `following documentation `_. + +Optional config: + The following variables are optional and correspond to the example + `logstash_kafka_options` variable. + +.. code-block:: yaml + + logstash_kafka_ssl_key_password: "secrete" + logstash_kafka_ssl_keystore_password: "secrete" + logstash_kafka_ssl_truststore_password: "secrete" + + # SSL certificates in Java KeyStore format + logstash_kafka_ssl_keystore_location: "/root/kafka/keystore.jks" + logstash_kafka_ssl_truststore_location: "/root/kafka/truststore.jks" + + +When using the kafka output plugin the options, +`logstash_kafka_ssl_keystore_location` and +`logstash_kafka_ssl_truststore_location` will automatically copy a local SSL key +to the logstash nodes. These options are string value and assume the deployment +nodes have local access to the files. + + +Optional | add Grafana visualizations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +See the grafana directory for more information on how to deploy grafana. Once +When deploying grafana, source the variable file from ELK in order to +automatically connect grafana to the Elasticsearch datastore and import +dashboards. Including the variable file is as simple as adding +``-e @../elk_metrics_6x/vars/variables.yml`` to the grafana playbook +run. + +Included dashboards. + +* https://grafana.com/dashboards/5569 +* https://grafana.com/dashboards/5566 + +Example command using the embedded Ansible from within the grafana directory. + +.. code-block:: bash + + ansible-playbook ${USER_VARS} installGrafana.yml \ + -e @../elk_metrics_6x/vars/variables.yml \ + -e 'galera_root_user="root"' \ + -e 'galera_address={{ internal_lb_vip_address }}' + +Optional | add kibana custom dashboard +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If you want to use a custom dashboard directly on your kibana, +you can run the playbook bellow. The dashboard uses filebeat to +collect the logs of your deployment. + +.. code-block:: bash + + ansible-playbook setupKibanaDashboard.yml $USER_VARS + +Overview of kibana custom dashboard + +.. image:: assets/openstack-kibana-custom-dashboard.png + :scale: 50 % + :alt: Kibana Custom Dashboard + :align: center + + +Optional | Customize Elasticsearch cluster configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Cluster configuration can be augmented using several variables which will force +a node to use a given role. + +Available roles are *data*, *ingest*, and *master*. + +* ``elasticsearch_node_data``: This variable will override the automatic node + determination and set a given node to be an "data" node. +* ``elasticsearch_node_ingest``: This variable will override the automatic node + determination and set a given node to be an "ingest" node. +* ``elasticsearch_node_master``: This variable will override the automatic node + determination and set a given node to be an "master" node. + +Example setting override options within inventory. + +.. code-block:: yaml + + hosts: + children: + elastic-logstash: + hosts: + elk1: + ansible_host: 10.0.0.1 + ansible_user: root + elasticsearch_node_master: true + elasticsearch_node_data: false + elasticsearch_node_ingest: false + elk2: + ansible_host: 10.0.0.2 + ansible_user: root + elasticsearch_node_master: false + elasticsearch_node_data: true + elasticsearch_node_ingest: false + elk3: + ansible_host: 10.0.0.3 + ansible_user: root + elasticsearch_node_master: false + elasticsearch_node_data: false + elasticsearch_node_ingest: true + elk4: + ansible_host: 10.0.0.4 + ansible_user: root + +With the following inventory settings **elk1** would be a master node, **elk2** +would be a data, **elk3** would be an ingest node, and **elk4** would auto +select a role. + + +Upgrading the cluster +--------------------- + +To upgrade the packages throughout the elastic search cluster set the package +state variable, `elk_package_state`, to latest. + +.. code-block:: bash + + cd /opt/openstack-ansible-ops/elk_metrics_6x + ansible-playbook site.yml $USER_VARS -e 'elk_package_state="latest"' + + +Forcing the Elasticsearch cluster retention policy to refresh +------------------------------------------------------------- + +To force the cluster retention policy to refresh set `elastic_retention_refresh`, to +"yes". When setting `elastic_retention_refresh` to "yes" the retention policy will forcibly +be refresh across all hosts. This option should only be used when the Elasticsearch storage +array is modified on an existing cluster. Should the Elasticseach cluster size change +(nodes added or removed) the retention policy will automatically be refreshed on playbook +execution. + +.. code-block:: bash + + cd /opt/openstack-ansible-ops/elk_metrics_6x + ansible-playbook site.yml $USER_VARS -e 'elastic_retention_refresh="yes"' + + +Trouble shooting +---------------- + +If everything goes bad, you can clean up with the following command + +.. code-block:: bash + + openstack-ansible /opt/openstack-ansible-ops/elk_metrics_6x/site.yml -e 'elk_package_state="absent"' --tags package_install + openstack-ansible /opt/openstack-ansible/playbooks/lxc-containers-destroy.yml --limit elk_all + + +Local testing +------------- + +To test these playbooks within a local environment you will need a single server +with at leasts 8GiB of RAM and 40GiB of storage on root. Running an `m1.medium` +(openstack) flavor size is generally enough to get an environment online. + +To run the local functional tests execute the `run-tests.sh` script out of the +tests directory. This will create a 4 node elasaticsearch cluster, 1 kibana node +with an elasticsearch coordination process, and 1 APM node. The beats will be +deployed to the environment as if this was a production installation. + +.. code-block:: bash + + CLUSTERED=yes tests/run-tests.sh + + +After the test build is completed the cluster will test it's layout and ensure +processes are functioning normally. Logs for the cluster can be found at +`/tmp/elk-metrics-6x-logs`. + +To rerun the playbooks after a test build, source the `tests/manual-test.rc` +file and follow the onscreen instructions. + +To clean-up a test environment and start from a bare server slate the +`run-cleanup.sh` script can be used. This script is distructive and will purge +all `elk_metrics_6x` related services within the local test environment. + +.. code-block:: bash + + tests/run-cleanup.sh diff --git a/elk_metrics_7x/ansible-role-requirements.yml b/elk_metrics_7x/ansible-role-requirements.yml new file mode 100644 index 00000000..4a5ad5d4 --- /dev/null +++ b/elk_metrics_7x/ansible-role-requirements.yml @@ -0,0 +1,13 @@ +--- +- name: systemd_service + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_service + version: master +- name: systemd_mount + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_mount + version: master +- name: config_template + scm: git + src: https://git.openstack.org/openstack/ansible-config_template + version: master diff --git a/elk_metrics_7x/assets/Elastic-Stack-Diagram.svg b/elk_metrics_7x/assets/Elastic-Stack-Diagram.svg new file mode 100644 index 00000000..cc360841 --- /dev/null +++ b/elk_metrics_7x/assets/Elastic-Stack-Diagram.svg @@ -0,0 +1,2 @@ + +
elastic APM
[Not supported by viewer]
Sensor Data
Sensor Data
Sensor Data
Sensor Data
Compute
<font style="font-size: 50px">Compute</font>
Storage
[Not supported by viewer]
Switching
[Not supported by viewer]
Sensor Data
Sensor Data
Sensor Data
Sensor Data
Control Plane
<font style="font-size: 50px">Control Plane</font>
APIs and Services
APIs and Services
APM Data
APM Data
Instances
<font style="font-size: 50px">Instances</font>
APM Data
APM Data
Logging Server
<font style="font-size: 50px">Logging Server</font>
Logstash
[Not supported by viewer]
APM Data
APM Data
Legand
[Not supported by viewer]
Filebeat
[Not supported by viewer]
Metricbeat
[Not supported by viewer]
Packetbeat
[Not supported by viewer]
Auditbeat
[Not supported by viewer]
Heartbeat
[Not supported by viewer]
Journalbeat
[Not supported by viewer]
Connbeat
[Not supported by viewer]
Beats
Beats

Architecture Diagram

  • The Elastic Stack is used to collect sensor data and store it for analysis and to facilitate observability.
  • Intra cluster communication and the transfer of data within the Elasticsearch cluster is done using a mesh topology.
  • Any component of the elastic stack can be scaled out independently of any other component and has no dependency on the version of software deployed within an environment.
  • Collectors will self-adjust based on application discovery.


[Not supported by viewer]
In control theory, observability is a measure of how well internal states of a system can be inferred from knowledge of its external outputs.
<i><font color="#545454">In control theory, observability is a measure of how well internal states of a system can be inferred from knowledge of its external outputs.</font></i>
\ No newline at end of file diff --git a/elk_metrics_7x/assets/Elastic-dataflow.svg b/elk_metrics_7x/assets/Elastic-dataflow.svg new file mode 100644 index 00000000..5ccdf433 --- /dev/null +++ b/elk_metrics_7x/assets/Elastic-dataflow.svg @@ -0,0 +1,2 @@ + +
Load-balancer Path
Load-balancer Path
Elastic APM
Elastic APM
Community Beats
Community Beats
Master(s) 
Master(s) 
Logstash
[Not supported by viewer]
Node(n)
Node(n)
Ingest(s)
Ingest(s)
Logstash
[Not supported by viewer]
Node(n)
Node(n)
elastic APM
[Not supported by viewer]
Node(n)
Node(n)
Data(s) 
Data(s) 
Logstash
[Not supported by viewer]
Node(n)
Node(n)
Elasticsearch Master Path
Elasticsearch Master Path<br style="font-size: 20px">
Elasticsearch Coordinator Path
Elasticsearch Coordinator Path
Elasticsearch Data Path
Elasticsearch Data Path<br style="font-size: 20px">
Core Beats
Core Beats
Other Components
[Not supported by viewer]

Overview

Diagram shows all components as individual blocks. This is done for better understanding of the data flow and what each block is responsible for.

<h1><font style="font-size: 40px">Overview</font></h1><p style="font-size: 25px">Diagram shows all components as individual blocks. This is done for better understanding of the data flow and what each block is responsible for.</p>

Components

  • Elasticsearch:
    • Master node: Maintains cluster quorum 
    • Data node: Persistent data storage
    • Ingest node: Accepts ingress traffic to be processed and written to a data node.
    • Coordination node: Load balances traffic throughout the cluster.
  • Logstash:
    • Logstash will grok and process data before forwarding it to an Elasticsearch ingest node.
  • Kibana:
    • User front-end used to explore data held within Elasticsearch.
  • APM (optional):
    • Provides an interface to monitor application specific transactions. While not required, a load-balancer is recommended.
  • Beats:
    • Monitors responsible for collecting sensor data and system information. Where possible, all monitors will send data to Logstash. If the beat is incapable of using Logstash as an output plugin (some community beats) Elasticsearch will be used instead.
  • Other Components:
    • Other ingress traffic will generally send data directly to Elasticsearch. This can be done by directly connecting to an Elasticsearch ingest node or through the use of a load-balancer (recommended) which will balance traffic to all known ingest/coordination nodes.
    • Other components may also send data to logstash.

[Not supported by viewer]
elastic APM Agent
[Not supported by viewer]
Application(s)
Application(s)
Coordinator(s) 
Coordinator(s) 
Logstash
Logstash
API Access
API Access
USERS
USERS
User Data Path
User Data Path
Node(n)
Node(n)
Server(s)
[Not supported by viewer]
Other Components(s)
Other Components(s)
Server(s)
[Not supported by viewer]
beat(s)
beat(s)
\ No newline at end of file diff --git a/elk_metrics_7x/assets/openstack-kibana-custom-dashboard.png b/elk_metrics_7x/assets/openstack-kibana-custom-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..c778db2521d6973a3c1500ad71354eaf1fd3eb10 GIT binary patch literal 121999 zcmaI71yodB_dkyMD2jj#Aq_foOE;(tUDC}+*U;Sp0z*4=BS@#z&?Vj7-Q6)Xzwv!u zpXdFq|62dOSlqeip4jJ{eb4^vxIv%f#4*r_&`?lNFeD{FiYO?k{3s|-MxQ@MmgM@K z;vp|j>_18>KY#vwc3FN21?3HjB(OY+{lySnmZgYf;UcRx{WpJ6?Vkxl^sK%(_a z7!i+y_=ujYZDA2hP+<|5nysIGw7ZV#z?GL5kypBe14eN(FsPr&Jno=1e`4BBtcw-% z9qPB7!S=@f(KPY1%R5%z*1j+7P@j0jZg-T)#ezlAorZ1CW%+?R z3ew;6`kO*CXREAttu-T*N&Xa}pnPtw&8c(GH2Aj!1qifYU?_lvKl--}`jh_Oeww#@ zLYe+~CV-uzYEMDhiU_VXGhH>BXYccAHn1qsxvXozQ9NzWOAC2$1MbgamYdTRKl4&A zAn_O+a!eWEuf8{qJASl&ygwU}#PdS!D%?TW)a7gf$2=qZm-9+y`fnQ9D!MvZz8zLo zOMO=-=$g&A464Eg4QRry$xfC1R-57HXqRFq6%irhCC=W@=i^ z_?9Y1yPq=#KQ=seKxoEhfq3VnBA!axkSY#*RoMRGLEKm2(&Ei`fjM74>jz*R`^SdCx&( z%86ig;Wk`mwFE+@F!}x>O2iiwwPFEh_rAz?W-@kGeqMgEfh!yR#179UR@;s~d3r_4 z5HW`=L>k;f*sZ1xFdSVqJ!B?0Gd2RA^|G^SyK463_nWOdof@*T>P2SHac2Op!Ms3M zW0z`Iob6{sgW10_hfp*IT8#rsZbVu*yOa2GYnH+YdZXa=G}r)ioW{DrfhGEF!g1=n z`2|lZvtUdke*Z#Or%h#k^BTN5gk3_cx}BuEBP<%Kxr;4aNNb@NGtl^y)yoSs=4LU9 ziWie8XNMTn;@5!8s;DYryNmT>2W>LX)%A@B&0r*{Xk%Q>80+1HH56tpsig4-hH&W| zuDtBHR;UWPjNX(R9hsD+fsik)P5b*YY3LP$K$Peb5}4nNq6_?DIZVWIx~Pg1H?jk$ zC*v!kpT7zy=#!HgfZ0<%mynom?D1Pm7}i?HR+BByP7ex_kPU%xkj^AV+ZOrpph@(| z>`BNo(CtrQs}aOT6CLL8R&dg36%U)z($cZJ27QyzNxkZr&~`*F|H>5v@um@a-=$(qorPj!;WL4J zgV)h)9F~HTp83a(%4_2=aTzhq(67)$Gxo56SLR^#;qTSA#H)H&}- zVK6ESDQOA1?a$itR9|1f|CT)JsUU+91P#%h5W-V2Eh+LaF)|I0l5&M7Bi%#ijMZkH z?wC&Y#q?7E89E?mBR6}kE^KKSANE3GU{Id`D3_WNO9~*`Xi@jTmP>`jdud=bQaImW zs{u2vlebi6S)4yTY=cj38D-UNlsfLOe-cDIA|kRwzuxTbkCtY?=!|G9nvE;Q`zyyV zM>PImqq(}LBC{}H$Qf-!ttS6vRrZZBX|PmCu%uM%YeRu90*$fc?&CH3(a5sy!TEue zi&R{_A>wI%z6`ESF5G&6NOVeV&#g~6ysXU4H@(FmX=%N0!AdFe?%HuMRZd;!>Lhct z(~2R;oSxp|wXo8~DQAo@n6i=vsLz?QJ6Xbbv!0()Pdsa02ZfG*g7o#2fr}+pRUr9W zpYh@hdTBQ9>}1zjL?*z(bw%kRp@~!({FBzQ>gD;=dbR4du1foBzTjf!kQ6#AGm|iV zrBr^wCG=U{WNo>|$iRnK)`Jhe*%tjE5D~9vR;G!e5zR}ss<>Rq)D%r3SgH5J{a#ym zsKfmA)5K@N5|V>mJ#yGQzARtOY;WZP7Xvv8l~Y~thMu2G|Idj<0+%4JgH`DarE7k}N=R^~q-D{_X~0 zA|b8FM$hXg>E=Vp%U~ft5dLX#h^g$+eV7Br(SEgR30H8A_YnxB9VK=>kUDqAtF>>( z*24>2Nu(99ls~xNqpa)O_cvc92(6O@$x;PsN7=o+XshdGSL~r+yV%??rmW`XpfGy* z9~pbcQ|usZs-2z)t;TtguJueqD?G}C=4)WD;qb7Qd~V)$8^7)H$dQePwkDPM$sSK0 zhk*9$6vh1P>|fD52{NylV=O)2l?<2;`kgF~3>`7?ifHhEv&D0W-o#hQ4}$7Jq16^< zwq-VX6;&UKZ&P>@7`2J$XWepx$nesQsz>@BfB2}_=I233F*caQnc_mjEFd7DO_cgg zqnK-cx)Y;08mWxfkVh-C^xWcmP@77&Sljw8wVG3OPKUU7XO@a4*I2Rue^U!H2bEd3 zUvfr$ZmtO9>k!Y)r_Zmi`(jM+t7#~y+NahHcjZ2xWRHZjm_o17-4b}@e!xsjlJ$c5Hdvf z!y=zqR@MwE8Fb1iGezXMXmcy8&C;-}awc4o^;J63-l=P@dnEy0pGr2$Fzt0ER==)y z7IiUxTvDPeXiV{bOcZoGFi~I9>qHl{nAg~P{S&m37j;FBSEM#D$FCMEWn3uFeTgHZ}2B6F2xHRc2L`8fYeTi`3|T5`Z}pr273@6#~M-oaLkJf zJZ!+~&VVV;$@B2fa7s!}O3JQZl+Sq8iW}ci2sZf;$Sq{T9I+g=87j46W)8_Cujkqf znVGq#lbg)VdZhPNv)r%%u78P7ttVUPw>f2QzsbOqMP==4wsgP(7~n?O9)*I4-cSP5 z1HTU2)C#883O-Vl^i+^l3e-cVeCuZ%1U(LeNf7`?v>EUVgO7(W4g})%#1Yv45zr;n zoFCNAA%$Pxmc5kd`Ie|vkp51btU^$j_sd(ww5dpK=6+`ANPqk){W=Eoi)nYDcF-6N zJX2Y!-2R8aj(x@|drvDoL|O^`;?k~Reb?TQm2eWM_=K^~r$)>$(@b#```5@^R#11h zBCvy-wE*n*$l7on+jgA}K=<@q@AO&7&`7yOB5{|BzMXOAHAu>?hQr-E8ddE+oRN-# zM;M0AZz1p|_SySu{9$M5jteRWAg?HMFEjLHiCzg9iGeozl+wu9U(jwN7DnyLUV|zq zBP^lMW@muzuAQiJKpeHU)6(wJW*BEb>nG!!%@Tmh9kGDbu#EVQ)`{Vi;1NWy*cK`X zCV}|K(=c?fR_8(Au}_$ughI75U!=$8SOh^6*ZQ3fu+_}z=a3rjAn`K; z;KjV(99>;{g6S8A(Fd}N)iXYoL<<|)nx4>#?8~!vYO*y7?00o6%$$v)7~A0IuEtV} zu7P6Jl+0Gf2N%6#+TT>Z{p{)E9g#Oz`6@5P4AkLgJm?y~?Z$K-h^?1A?VaF1pgL))}=5s~c1h!l18q>qI*@ zn>p#qtMra<&8gikEALRP)uP%m4~CoUIUBqg_9%jSx-b}6x~7O?h+|R!>CTx#+;m}) z_DXm0*~wc>*?!A>d%t{l%1_tzjyG;`4Aj4BL=wON_)$xn2iVEU982=*N_lfusp3(# z0v#={BG0LSW$fWC4%~ztW62Ml*JXdB)#lM2IY#M0&m(`g;<33JX6)&U>M;%to~P|J z;+)-2AHASfitV3VOAH8Dt9}fWGuTYuklAzXQdyCEDx!I>LV#QDDKAqdK*LJ?cAR29 zx8?emvtoIP8I&lI!`G{XgBuXx-y}3sQ>_@KEkbP<)j#Pb9&n5iWn*R+e(mLc5Vn8( z z4pStOD92Xp<==Z8$+mipM|=clH6QwJ_+_D?wjH5o9pF;_qE)dx3R_=skaM$xvU=07 z{YJy1L7R%1bp3=H4Q)0R@8WY>ZF+@|_MFbM!m$Uk*tTuQp84;zG=svEbDtWi_>_=D zAu;fx2g)WfyEX3>P_^~Fmu>4|i3UT@i*=lQ2D|c(4N%kTWSH2L*^c9SIyD(rAS7hF zELm|Y^5gbm=bGKgdVsJ1vh9FjT|5?z>)t6`nb>v$ z?(paozm|8zdN2j5%oV={nzPUzqD4<7n5ssMf=rA=S4n|85T^TBPIbHwt_K`U$%rmO zcqk>cOcc!NBSQ4AVz!%UF9usiOgu>FMi|SR})F*W5I*bJCZo{lVjFovnaaOVUnS{da9y z(9X%@8u6cX$iwY1PX7+z^4bh-kP`ry{G=&Vc-ZmRe@-4R>=-oEma5mBNA97(&NG$u zqh+f~q?nDNlJX5N0|NtvsklfV!+9WoftOG@`6Hz;5i5^zxygHb8YudY*9H0#Lvt=S#A3$lNa3e=rY9&}A%LKEf~Js0}>l&*H4k4qYU!d0OLsvDYmX z7i7|rMUZWA9$zggdrR?M{IK4yL3^O$T0~2Kfp!E7DQ|0(T6QI;g++n9W(?N#h?dEm zz^Z<7B2@*#NVgDq9m@Di4Ue}qQ#=X_}{CM6ZTUMa)XkLoO z-CY>a^w6eN9tZkN8AKR#NZBq~<%|XEt^azqz-dJ2oc)HC??-yVONp-Mui0cinky?R zey#B98sL6QG2ixaLe{_g1)-fjbdEytF@e6X(E;m=GHf#`F@2fPcS6(xo$VBFBoHQ? z9V~k8Q^YSuAY}%=a1)xBP$&WQ8&=*o)O7qj{0m3&Y#=1uq2^c&hibQult)FjMq(S) zw81PuC;VRMc{)#HUI0cMd{ROvfvlp3i+FOWq%=&Qp%igT)fE-b)dE6SSkW6>+DvPn zRUgQQ%2}IJH+`-6{FasV&HxMTX+Zx4F{J4$OKZDzqs0}Jg zki=7`Oj`^*EY_7F*Tj7~|Ca4jH{q{HsN^{i+cx)Xg$FGd{1Z5}-52X(22V+O^M;N> zi0~<4=L+I_b6=%56{9>?4%@-6}cS01u~wK3J1`K znG1WNfJix~Uo87Ofj}~!?3_*_*P zbnV+Eut(t09hugc496+>O9LFq3^mz+zfdX9n>k~EAFH| z(tZ5mZ;KN}{%LU@2R9SLgSJsrN&)yAqn>nhmTO(kbDBZP?kyzbC6tOyDG-Jw!D76N zKNxJT5R=4w!&*j+MHt?tM~_*IM7KT4!4`J7xv;#G)s>e=*oY2yINlC!io=SZ0`&>h z>MZD3hd)RXb{o&dE;b<1y%t+f(o=;E7V951)FvBO+u4WM0E%W@)xR0~l0&>ogj+7y zb>wd#{3(>u+*W!m6_WBKudP35yi>&Q#N zry>d2cRIEhvyC)+!|i9T5;z`FaL^{2f1B-rLU;FT**et!&Kl+26YFXsshge4b!%(k zTI~{}`vs6gVaCkmr|(nqwxM@fiEitjPDhbPg498S{RGoHd-nd`fKTS|v5|7;6+{ns< zMpddtFv-YcO}}Sn?VsOefpshhw%`BZ4#lgQgivc;s|>))SrRB?0@3*kQ&7;Mq|)-} z`lQ3%42)r4%Vjw0-a5O4@Q>VwKHT=r4h|3)^zyr{!Xx)dwxR#_1f7|k1-tK8E}Qo+ zEC{`O_l}<4yf2B_J4<3>od?QqFSe}0avA57F-|%u57hk;RDqd|8zRwTaeW$Sa zv9{8}n(knnRug@qbf4j8pLg&mfmc`tRnwW>V*2~in?gx4eOS5Gsk*46?O|}0KJ1io zxLPaNT@LdCaB7UD5SC$tC>?3!io0@04Hiu9IC4y?i-?(^)g)_T5xH2%+iiN81k<&f zhgrM3_S&b0M>Kl$5?Rq1DEQb_i0!UJRwUPhg|K(u27^UMYNUGQv1<};eR@Vh#(;|Ko% z_3}FPQb(pV6Lv+te|WrI!f2gsLXVD#Nl=0`^TMAH9#Rq++2qD0pGs2ZN`Ru?trU~i-C$rH8U;#@h+#b z?eFhz;1b)A&bDxx&0wqkgUAv5t*B}@v#v9`ICCQSIvH7}`l3P(>xFkv z7&@-|7S}mWA(A`fnp}{MOsjaPBJ?X(f_J*E*pd4eBD!jh%hYpI_`%LeMB>{$WKN;W zb0j)Ii(8EExBLHZa#8Hl#A|tPYU9OalBMM*RV9^2nDqHXThaA#!3J~ue9H3TqoFpt3X)W^vJVR3F$^88{8ZL2WL~ZeCZ~Kb**Z z97bgDoo=&ELTz5MWa##VZjR*Ic#FfYM+MJEF0;aGRarcez`Arj( zYlw$&dR7igac;Qhp#g9P3mbQ&Zm4AmxH*`)Pzi95p4bsW)F_XB_Ck5)DBh%0aXa3h zq`O$Zy>1i{S4_$VFV6i~;leXo34+Xbk2>xRg=0dGH7`awF4c6w+*chiG(13F584j3 zxaGpxb*)(?IF+>_hrIY8BT*rGeoK&>B9jrBn`yPmM@}7F=kB(1m{;pwdvo!1Fv;uG z>U2^qpE3Bm*l?uR(Piwo^kQTYqnCT<`}PIRDqkpaNyd!Kobl4AGFA$>fxgBNjfRSx zoIp36>hiXOxAzO}5t@`ArYGb;$XC3N-#x(%rzJ`|r5(6QBpM>Bb1BD~DLsg^*+60V zm}_8VELPp#35lHNLDj^8uacE>dBf{f>nYFmB^)C%>Di zIJU%fTJQo-qQzL&!%0huSMBVq}z_bqX_fTpo9mXnb%Tnb!8K|raa<66TU3`)|F^Rm|}*R*yx)yuYQd~~JJ@M-@NxIyu9~ z8@D}!3dfzjrWoAL__2w%w$AC|`d<+sP-UvpN9Zz-VoNidl`u!nILE|9x1}nS8`d2q zEa{YY5aGRpe6i%IJKB}Z0h0~p6qh{-eAsHTJ6tOs?Ihy>>Qvcn8nQu@Y#o<{Di@~) z_#wNb3u_I?01RAg(=+$S<@@36T^%{!QlEkSy<`*TlR@H?;WoODjc|=AF9Q~STUQh5 zBZk2aH0X>)7Gc%H<>{3Vyu%a0axb0-F4QeO$4ojo_e)lGy;St6XE^bz?b`HRbU2)< zE1{GfPKV@t;~YFlHdnLX0K9t5toG4eMS(h{Ek+*7nX5@;$^Wo$AlQ#3K3ZqK>1w;ow%2 z-IEKFl9aNJka#+VNx8T4!KjXfaolMVI(Pli)oQxVl;#5WAI{fu`z^>bsZ>BD+;>-T znI6SX51pcw#e!PCdNV`3DHTIanGZGNu?V*z;IT(Y`A$=LNCrN zYBFT@8|Lh}@H{or6Yt-o>{sevjTiplzMYgK{Omy@C|F})9{csxMgvjANbcAw2 z2>ikbeOJtl5EB!2y(3y_`c?##T)X#!6tAJ*>0Pf3K8`DUFdrv|E8!og{8L+2)Tn>O zq{sN2S|2Yah@0wZ(^=w>m$RpshR*}Gu-{|j&&2dxDl&aHC7*BNjYp@ z7rl5BO|IohIY&)zx5VVxL|tsWS3w40%#}jbCR$u*Tl#Q)PL=wG$ij6Wnw# zeGba3CW?2~;&qI4Y56tvN@}DrMdQ?Q5+0wPWfA0}8{%oH(%D42uY;O?cO91ob~m

W-EfPjTTMTJtyccsyvgP zbx#c4Cm|tH&%)7k>e z1ug;!oa78Hee@&J8=ma*m#f3g-1gg*z+UQW4O~*nI%;aLD9-@sszg^Q$m2Y0WmRnq zl({tK^J>S1IsQNb)JDHU@W6RDl9OQu6xz;C$GmL6yypog2tb}(1DAWRdX5M;KG>6p z8|vTvC4OjDakwhbJ~A}DAZ8I9zw@g(G!a{NSUx+Sc_Zz$)MSv1!y3SSjGH;XAr=3a z)C|peDZuHeJ^aWx?@5^5aKD-!O|jmUn{*O2R>pFq>(=cidi^f&Kd8&tH>y(6B0tm;Y#}T6s8JtSh9@QmmuGhr8_kK|s?s$S0eRi0 z?)h}^mkThkR4{Ar&Ba~3+e%a@t$X(2VsttY;W*7qb<98RtWF*aHQmdW81yHk!U0Zz z(E5lym0Un6Dc9S>a+W4E#~tx85KqI^h-AG2h=p42>=}Zi3Z6HMs@-jO;u2?Ke5qc` z_3OO)Tll$EZp z!0_77i~V%F6Fafup9TDrKXUU%E7^uZSxd!P!gpQ+5OFzxCA@nqZUG zobbjm;)1u0?69t!rOniwUY5@L^N#GVpXgv9kNxkhco6*+HM z2VrLs@% z_SPc!00sBa0ea$X5dY2|Q0vCue)AxCM)&)c-Sx1`MSZca4>Prw;7`kGISw?wT#xW+q5&qhHb|6t;l|qh{i|xCGnxx8l zTelC~tq!A(3u%BsE@zym019zsS)-C|n>h0|UImKWZ{g+J{UtLCa|A>hy0!LOC1r|n z2UqnIw#Oy+M;!$UEcHi8%`%6!T!E!>rBK=M zakxQR3NPowT#Vjxtm~W4opRw&g5fhfi#d`6=rrny^WGYiU@%Kz`QEDhNrE7`myH7L z*%=2+#a;_*+4GHwI3erW`7MLus$@dgA}%S#GA=CXIB)?No|-zx`jlV1%URpp#;G&V zpHS@hO4wMzn&-(_uJN#wl0;(NbuLNR)mAgTtV{&o}I$cdX&av*=yK(y8 z!}JRkNmbFg#>4a@TYf+UizhYDaKeE$AJv_vEF2^kjrVRi-}EGDKy>8QPMe2CPAR4bu<@o(=bmb0%%ZIvQv-=UBhY7Yi7Q+1G1LPN z$>O<6O_%FpaQ-rAf9&4a&5o(9BfclRJpg7#?O+~}FP>xFN$l2t^@7^Ujt_XZyMJyW z@<6b+d)5MID)%Uzy!1B+$}QO2JvDmNVwO<~`Fi@xQw2us8CqwCX*s_voSpk4V}j93 z%v?HurmUoCgUM~9of~JS$|Tv`t1m7>t4TdUQm$xkjok2 zkTiPGU)R4;XngZzGU~MWe&ej-i5kwN`rXB9s&KtwS^Z5W9Y&Rv2TeN0M^7OHN|8(V zrKf&F^-OD;&N-L?X)`FX9bI1kjbJjOS;m`NMwwli1^8&cgl&X-24~bS;V6EqRZy~ zY}Glp>x61HzFTfn4x7Z}1XopjLNQfF_o@zcU0B^TPyXV{QxRZtoqh`*r+D}i*@`tH zyuRqbkY78x%FpDVs4_j*@p1T*C@27sa#hKSTl^)yuGi0akzyefcO2_B@pXegg16mi zNd+*&&v(vEtU`46S23^geHSU=Y);ej($qgyBxny589wokqIhC7~6 zIPpssByf^e9lM3mqa=%^Bh&$zU->t^dklFkR=i+gMy1u+7NjgSGM^5~{n**R)z8F8=KAwEHEfY|4yFy-6ZtO zqsdZ#z*{FLo}X_Ge`kpPm%hOCEdBo{jk9^{v#cTK2OxT1!la$0PHhA{?Hhpil(q5rnV)_O^Y2aTxc--yjYn9B`54 zF9yG&pgi_@?;U00Zv+t!&&k=82>ee+6p*=U+y5r5hEm2zWi^@2kNQRbks=h7=G-sb z5&!rOQ9@r&OiW13{K06*38M)}tBF28Ml2w`FoYbV-7UJ)6RB!$uFke*wxZ`a!LR;d zcq!1&L}6Pe$-Dk7B@`Ndo^6VBTy_TZEvk52#E`lN3je$4U&ygvZcOcrR9 zyQYorVu&e3G$%-Wk3MaezJ6&Dd5}R4mQtiO&}Vs`yRbe60(5H&)2Nhn<0yB0ELxemg}BooE)PPI)2nS4%7nP zSwlTM9YJ`Mrvu$T-4OiF*=4Zr5gkZ#EzHp5uH)jO+(NdOoYPJw*;7GG*N-r82FDuU zjRQWgGxC(1Znrl*>I1pHf5hlDqhI7 z2dok~eUZhf$ZztzEtw|l^wRV^93)S@42+z*4hAPT48j-si^|_vyCW1~y$_z8bsz&h zmVa1l*4}7z?XyL1IO*qyc6Nbb;i5dT!`<@+>Bwf{t2!~)HFV@$$D@&6mi=2nbdU}(Daxtf?nI|NBXPjQ| z(Jxth!1rE}lQzmnsane^88&>Pp8M&lQ|K3)>!mfLmLmu}iG__=)|c^cFB|p_Jto$0 zE@;Z)^~o#Bk{sVV9lgp_xns&X6T~~NOdb(|ROhuW+;7vwtV3vI6IFh?)kj&GsBI!0 zI3hxYiR4`N=6VTHkm*{~th$mkVH`V)P{_D1xMty@b93`PeP_jGb<|tHzVRfV0|5^n z1UH&1t8`>UbB=ht-4nYSOiymKuj}J>^ zDEAxEygpr7J9#Yj%^f$|2e)<}Z{IIor~Go;nS0u<_gH9nUOH}pnEzBcdbcPtW3qUG z#l})aHqoBmgCq@FA?q!qd49OSH#cv+SSJk%k203^aNKid{pajQ5hmF%h9JF@^H|tt zaTu^p`h4*8vE;&ZuT%qJB{BlC)q~E8rsXcokBlBg=1;yln5ELPr~MjyYIiz!#i{b@ zwQl^Q`<1S$MJLC3^Q`sP z$%VUC4I2|h1u*jIZ^*cq(|HnOKQZD4-#Eo`zfnu)>Y!h)qxA-HZvZ{K6d0-ev)d@G zVUzRo268pXD9L<#7V$@4=vI@{=6RL{(!+`j`-DDEEUl}qsLpURWxWh2M6Y+NECx51ubXK+*|H7al9QDV zJ86!1C8UnxbNm*#|80d3&r0WJ!c5e(g;5Bg!$3EqIA7zA{cyk;Gcp+?4>H`JuvW?` zxLrH1vya2Obh_Vfed$*@K#{?`GHp{-`=N^P@NGQk-nF1}ThrjCUk08ctizVJ^;-V^ zpnu>vs;z_>>ocj=pILsPY4P=!FmU$x2tv*f>2hXlbQ7sks*?D1u%bAx5W*uH(RWi6 zVBKr78zFws*bdm<2$s`?3kgo8Qn}Oj@~dgqTAW6Ncoi7M#Njxv?awgspY6)oeP4+X zt`UW_NY3`e5s5fi1WBkQR#~h@$TwuEI61|xdsXz0n2N>Sh%?kq)*roL;p?8gI!G6D za~&utPSH4zB_|` z14|E`!BpO9Lc1oD1APN;#2Z(4S^g?w*9fte26>&%Mt{TtgQkfe{1@*ef&h4Ue!zQ{krtI+>i)xVdK zAo*P4k5BFQ{i`F??LhZTiZm-3+WzQ)(M@Q{5MgEM5 zgczBMd1u^3b#<`Pzcc85KO(>@EGqmzAFup>@I$=*|IAD(p1;#631Wo&bf)9R-ELw0 zj=NCdU$}9Lb$ql?s^G6j;Qw)WW@cu7UT~!J_c+Z*<#0F|zf)mS5^-?q76O4B z{oS@=L=csWiwo_q#|0nM@+)0PwdPNrBcpXzQAHkw1T7NI&dwGV1qB5RWTU?sOjSi9 zAs|fR$1uU(q$!6B=I_0B3Eob9(tjlvbKccgTIu+(xX)5T@?&=Fa6D8Ga@A>Mg=)o#DbrTy!@-wC;u7Et#e0S{it!X0SQ%7t?3{_PdjI1xq{6vGX_*#=$R}I%eAj6@< zYMOHA4SeUwLS=9*@DlJ{Q9x7{ykw4l&q?)=f|_|$tnKG>mjlZ#Rx ze-Sqg;vnmn8uKv)SLgU%P1m!=`aI*sj9anJ?`mkVbx74mqKO2(e`t|4WOM4Znliqc zGQ!e|*t{c_kJwqIhU(fq>ZYjT8B4i~x7{j$=W#lZxlL(eS!46JSuX%w<2A zFypO!{c8zl(|g3i!mZji#b%}s9C zCVREXa6cEYAr>$rbm+@C^53fM;m=mPBt+bO`_I8kzoSN;enA91*` zZUR};uld54zokr`r8o7&&2E8oZELnBkV$gMoFT;e%2HcW<%a&oxYvnm!~0g@o?ubl z>$GodPR_233TQ_p+yak+R=vN_Ug{<-Q6BPD%b_IV*x*@>=p|mfReB=skdvJy=DO60 zs>Qf7W7-iX@G$dI7e2Gm>K-z+w8ia70-c&MNIT$NxtuXCNzbC5wFn6x9bpoVn!>3p z*K|iFO7Jqz^>vRZ%7*v>idVOfpoN0$L7==C*YC2MOzTI-cIIX0y za85rv3WKH9yCE>cehf$fEsf0l7Nu>$v}4YE7`lqa?80s-4lR;ALQ3EPvW}`;R%ycV%1irBY#4^T^VR;9c00^l zZQLBO*RV_uH_jtg^Eztoc4zE`QSP-re~|_)60~0`FLT$__fQoT7l=3dt`dz5NK}7r zMJo|+U%-FGJNQkE$);+Oi|paV^r?7JWmU|_;~mG{*LCQ+3X_2SWu1paT6flFpWGrl zXRFrW_$=j#rkvlejy#0`8s?qC&my~rh4Nso@VO1~l0+A-8!&J>#2&eE`Sb4Pst|Rd zQiK{X)o+rcRx=AN`A}kvx=8|?C2%&o?tNkXQMpG=?B)_}1~W2oSK!(!XwibjWjgZ0 zX*qRODFf`m!#=qcytQ1@r?YHxU(Qg@^>pXZn!O&)6Tfs{_okZnQ9V&ju!R{;K)31@ zKl+3ak}VJNgp|_#CW2o_TM!8s&!htiwMv$XVCY3L-^J0T_rvnxT@M%ulOylp#lZ&; zv(!7JQw;{42YTU-Yi3D@u-U>u@AXYysdzZQB}WX@GW%*5*x)iVL-8=jl2)cSm*rPE z%H4MJd}>A>LDF<^daDdn6}6ubmAcftihE#$#GhwOj_{hCEZqEJfwbJBIHNu+@!}A< z4)2;gheBN*6@by-WNff|^J|VWAFpzz_LoG0I!oM-;vS5FBk-OOp`t|WJKs1#g`4j% zXRfr{LPZi%@@<*?m+q%%RKt4ttLv+=P4GThhQZ!w7EHFgP`NRWh3^#hlzT1KvyZcR zE4D-7hEQTZ&0UzvE#a9p;t?{tlM z(Nw~|GBqT7R?L~qR|N8tq8IxL0%pZdC&k?qR@D;Hdek7Hu}7Rn?ib2=w+-~DmgGoo z8oiN@iP#2Lbg{Roy|~EirDN*LP>a0U$zSuimO}^KLeiN=-Fieh3=|2?G{6P*bc|Wi zHhZmFA*<|L#bsFGs&H^9`1@qCs)~Z$ZAPNYjC;`@BNGf>8oP<~iQ-p|#a)o>X%f}V zO^0@VxNu?`v<_njzi6%34%@y8D|3N{dQ$Q7a zs%rFz9t$w2;G9lM%@>Ljwu%E-9R6@qq}obc;fY+QOR|;oGbB_)|NJ_&m#5YtB5R>j zsz_>px6k2?IYmbCg5s;#FE%Mv#-ooLw$T`2VPUpW1Cccr4Rb>M0vi0zQi}_*WlzLS zuLe_{%PkpZul%<^S7*1?-MrE`|4~V5;;VoX(>gtnx}3pPn#J zD&n!K7Ex*~f+w^AAhAukTpSpC&nd z*;I}7{7LX`vg3qgFUX|8(!ddKjAXIBC02r>X(E#fxKbx;$gqB0Vz(NWROt{iwwvUu z)E}Wst^Q6yhTCC$HotLL?iTi>G?<>6Q5VkacCKCG1Kw=r9tOmj30dY0fQMXU@e`Pl z9HJcI{`&C-fU&ReX;xdK7aO33=od5z8j2;!xpLGI0B74I3AKQzCt6IN9j?Jn>j_^i z?ieE-|4GE|KU76PnlZOdNwHlMViK10vC!Ni_=mgPaDwF;fB&FHy87Y#BoBxK-ZLL@ zeIyRcdDZyt^sdETgYG7xr1AFbA>1^`v~Y4|3s1LJ5t@QrzhIkRgNzPT2D=v@U2+G8 zOnwQNt`(^LTTMJcs)=)jPfG_jtk#1tIG3Fk3DYtTtURKG`gE<$u|P%+>$QY&P`J*E zlNFrv;_@Z7$?q@W6)u%p3pWXySD}8U$xkAn+Qy8Ib5-t#7$zM7hi}Fnn0oto_Nnv= zYJCd{|KtM9KQyL6q1?k;^yrTx+K~GvPhP<7eG(-MUnS-%GQUH9>v1dPGP#=bt{pcwMoLJv*D zuWddR{WOTzL#M+b$5Owc*5%t8uREmOP*+)5X;qHArl+?o@qRm^z5HrwA>7}{CTtw@ z*Qgq@<`Ta%^7~aaH8pI2K;&28*G9h$qVWA(V~f>h3#9tHP`IMK3sBN(ZxKxB zRJp+15I(Z2Iy|Pd!UWYyE(i#v#{5QNGA%!rqK0&}vTf4VgTy}}O9+Zwbadt^NS~nh z7`0?iz80vRg4!N|N%yTa@H!J(GLl&?qS??L9eJM$5#Q&Qxc%Prp^*;A%p_8Pap0@^7Y z?2tt$>>65HT44_8zkmNt@6SGm??<~*kpAnfM)`LBBjnF>3IA~rpnMtrMI`w@ zw+fH9|22NU4Evgx`C|pm2Jl8v5~qP|{_RzmHr5E@T%=X4 zH(&_ln@GwCOX25rI#|=$v~E0^-d>LQ6OcjfMPQq6Xc4nc5?BGh?l3xXe{3I@pEt88 zftcT!4e-|NUnBI0r%$I-wTj@Qm#B{6g)P-Aos-?A&Hl&ID3P!-Cbk%0 zT!t-uSDmNem`sL_3u{Zc1rkNRmofNKIP{RJ!ZdQpeP=$)+pUSAU;oM)fvy1iWNCjv z?)*EGvI7+Zqbe;2{u}D?;1rTr$vG!*HE$bjMGB{8BTfWVCCB^v&;j`W7<9M#*=yDzRA zLH2}^Z7MLZY;`1Y(?dCKZHi)S_u@s@CMh*>ZS+&jOO)(*F9Y%QC=!ukX^607>CD&R%mzF=3<$r@1ULsRgY=DAM$tdP;1}f!(|n(bO{%O>lNdF5#co549u7DE=g2cv-E{40ziCbz ziU|KOmXxd?CyOGeB){EI_mLt7;+&Re+syOvJ2U5TKO4f0KwLmYb~+9ZWMG6%Ml&V} zYB)A~0LE~eD7G0kq}vcqTF0h7N&-2j(%%;=NB#hPKRSWtT7pgF3&?e$=;%TR4=ulL*>ag% zSd{-mNa38b;e{#DT%MlpYP?5_95X!-2pClF)#%~RfO`hqao#`dU1_Ce5(89= zqssADsq0!+{~z>MO)c;h!z#&rVmqu&yU&l4whz14Jij13(r*goY(YFh?g)>Y&#(8q z^O)~m7s+7R!RssCw;!oXX4vhWU`F#t<;F8(dTd6?;d~>!R}k# zQ7AL%`$~7NY8qZ-@0kmOY7FN&s{7e{E@x6dHtP`jeEq%RKbU=O0b9F72`SWl)~5gZp-OnnN)=FG}swgB}P~ zv%*U8kEPBJan)}rvE?*D%G(Dj^If6+1ZioJzh`wmG&7I}(Ad{lj8FGhzZTIj2>B9~ zlt|S1t3+>qN_CsQD6cHLjC@uzBB;}U?dwRnLUW6+Jk`c{p^XJyX<0Va4Tttv%YuD! zdz)OpbfHJ?@|Vy}%o>auGK=S{B5M9IL-iNy6IW$WC{kwHqpde0Q#oQ_3!WX*9#%i@>R{X7$NWKQ)mcn?Kv3eut7zTI!4{7W>7HE}kaqKyxq2u{e!8(=4 zYl%L*G+c*8(Ly_fzoIfCv3}NjSW8p5(T~Epkv$}mI&SQdyTmNc}0FI%=NemK#h z%AZ221Th2~&ygZH5^-_+c~52b%tr_~zTP%Smmcw~@DLy~w(tWZ1|cGd)k+p+HrJ<; z>;L@jtwoYih#+*-rQWf`iu+U-)XZzfJnlAV9J@_uVYg`A6HmrC*w?3)E&eua6{_sG z6xFlGwxyToQu9wd_?-KE$MkWU+Z!JDH{D?PERXaGp$daj{iOr)G}qyRJc%$1mHZF* z>a$3W9g?C;#AT3;OS~@kL^b|w4DgiuAR#Wi?QkIGIUt!S>$v_{`1Q`+9d!oZJ; zCu8b*l}jff=kw9PO7#9MSLgQ-{(lNX4TzyO9kJPzf{t7F$(^s_=AuQG_XxpcDLc#6 zetI3L?`p5r^3KmIEmDt`KJzmNhu8t3Q=$5<#gB`pFB&&{k}fG&_AlK1 zR48TT-=Y{BQO6EwVG5E@#igefD{aN&Xv4{14h+|8M02<}IKxgZdN=R)#02jrHZH!o zG42#UxJWe;@e&{xP8C4+*VosZ6VO1Rp3aTY!d;t4H+LMA+}uBHc0oOi6Wxp`u{CHg zaP|VT_KfXWA$oi<7>vQ@uv)zO!jupO&ax}q`TjaY@r1Iv*V=Jfi?M`w88#XbR!>Her(I70 zS!BLeVzFkGfQ(LS{DOpv8eB7bl4P9s22hjkSnY-dg4*h~%Xl02 zDrJ!3LN@8AwlyH)nb!8&Y}K<>z~4Ld#r>yezx}Gk!uO;a#op@OQ{29h^de{ZRwj)@ zut9sZsy~6>R!15v9mnaN-C|ix^OzBC!QN=?i!R$DvE-3>M4I63vv6_O{EHL>y4cby z*2+0AtgId!kZr+})Z6|qk)SpNW>Ry0wzG>ZkD;}1~Ns<~S zH&+d+DL(V`+0qg4zMl)3V?<~qLU{3#TweYYt4Y8}5~2HqC9$AS#3mAm6npwR6s`f*ycn-)gyCJ*^%5HV^FlZU^67#ybgLh>F%c)U6)ab*e)rY~}kx-zh2z zn~2>7B8~CB0D#I4)Fp@@>#5KhId_qG_=r^-(Gj(?(yf=aOS|*L8+wl0b&|Mp> z*$SDAq}ca%Bs3mj$#f}v0JSB51p@vF9yDAd^UwO=lj)Q$JEG5@D-Cz4a~-Qy9jk8% zrfY)Pqq#qVx5}!z8l`qT`?9G8yiStUo0snZ zfzJ#gFVlJGc=+`q)yC2%0V(QnoEUY;5Qqe@=C7PT_2oPn+ZR!z!J=c&Mj zIEys}jUErd;GO)pOyO!)#&NV4ox4Amegp&iR#q{e!Y7b>W_kWdh<)o8m4H2)8lH(n zE`~4J0Un3XrAxtP@q=*9*dZ|W%#IKhcs)VtV^J^kn+~6S~Kf_V2oU;)qq`x^C7dQ1~`dCfzusq=G zh$Q{y$H+dq34`a;+Mg_Vwq9>-O`Jf?S>)a^cALM`o7&HFTO=?)9d9=mea%n&tdetNm5G%@NU> zzi{Raq<3MwI&)j)lfbOs2QKz++A*Y9FP#pU`HdVLIxeVMjN8x;_BUU22a9(v1Wnjk z5VYYK?o;j1K%MR52+b@~h;YE*F5ua!k^KuTQL>`;SjyG<-6?thAPa^9uozV(s_IrF zSL0a0M}+fA8AAr~e5usJsaT*MOx0rN(eFvHT5-SJ9uHo^>uTtf*Xe!fkJjsESnFNu z?>a7<+?OEXHt+n?i_NrnwTWGH%L}0ZNa$Y5qIq%xb7JQN>v&f-w_7&Zh7=mcZcs@% zKm5gw^-|u5e8CW!T~`YE`^20(CUsbijDLmOI}&YwI~5?lU+wi_IMhV7I$NP-3N{sg z)IWTgb5PvdwJNlGFca*R+r27eX0MUAWt$&qNndl*U8!_@LAcF9!$r`{K^jn1Kd;=s zPXy6}(9hP?b5U^jWEK8a$3-DiGOQ<|%za&W<#hKp$J&e%(Sko|bay#@UVC#c1W6_m zozhXeb>=U|4rL7F6_|~e$CFCo6$s}rG$0&hiAvE&eftp1mpP~ObUUlfJXc1UTbw}G z!QBBe2+ho1PqH9XsVr7yrI!bjDCU_+7_t_e#qgl&a!~Tm87m!+jx8_e0q*EDKYRG8!I<4bqO1o|5yp@k+kRMYP!y?b2+LD<0TXbk{@FSra z&DP?2Ax=pbiDl@9VC~XmK}Wg&pSbNv8T^!sL-Mjfm;zb)y`!x*cje2QWF^qDGu;+F4 zT|#r9CyUi^uccRoKJxhtL`hU%5>|Vb|D9gD9qUb!Ces%u=q#Pap%TK1tTAV{;x)xf zi+BZEVUIVL)?Av-lSf$8Hh%#LI2dh&PSZ? zZTmi&eYk%s zT-r(*q0H`Bval&19kgRI>rvyWF|%Op&F^3a#c@(@jo>xeU-n)nwqcE&*ZtL6ZFfu! z)cnkLdz;9B!Gzd?xH}ji-&MyyS==55{dv}YCyNBKy#j}--IuxP5TB1`9ZHnk^0lvV z9unNHUwfTfuTLqbmYQy+dUuRart+GrM+Zp^sZo;_>@*48?@Ssd%U27EF;-RMbWzii zGmlYsH&#}VBPPxyTp+#++Q1F5kDadolc1IZT?J1@D1p;tE}I+Oxq2{^Kk)dw04kMS z6BqW~s)hkMj{o7Pb=2p!KQ&o5UN-&c&=J%|N+G$m3_aVWR82NyW)<2Ram2JVa=>C~ zjFrSRV4=)1fm{?U{CGcYj5RPS(M0a_%J_qY4Ybu#j-U{0f2IyW7moTws~y|`io^7Z^5T7JX7 z(HVzgzIMl|^E*U~Xdy*OHZJFDUa4KQAMQ;$^%oqz4w5sWKwR9?94YmQfhD`4)aAcmn-idBk_{yXnu}Nv4teG{ra$&=JRln;yQf0ndr@t zk%Xivz|B(caI&<~ae=b?r+Sd}r4?4d@Tm7SeR_7F%E{Cv#}_@w=W2_~)f~7D)`Pjq zr=Um1Z4cV!>d1d^iwlxk}*T*37A7|4Ho^S8EIVCO)dW4oVn%cjx`|xsdTA zmvazgdvJ4wK;BnAg1v@_!J2z{=R*Kw!a5cV^Wgnc4mf{)`up3JrZgP=Cp*V^Nq}tP zR1Di}P2<*`1-e8)b>l`Mm-OD2*}b*1qWm$q)#k(D^$vXaR{>mFM?*tm#!nk|Yqn*x zSbB)IRfIMI581PQ_I}n#DRzxtUDv6-^ezj27`!1N(KmF!uxI21YO$;a5k`iZ#zUqX z#j?%V@bZ=R*4HPhF~~?V%f;l%ACJndtXmtzhD<9gXHrW`3esBowm9)brtYCvAoOm?IF5*=*q;|2gUJZf>A!IeLq-Ld!;s{((~qG&&<#z zHsI4zT~@r%s?t3lQ|&XUJ{`wTkf2~2;4^>_De?F7pf#kCUKx%=Tk0;Hi#sn8paOJHRX;x@2^$OM=B%LjTXV+vcRO-}HP(0|7XjfD zt6Z0*KGWU~N~hbyKYoA2u!0liEqNOLLd}W7@t#yhR_+?iov{q zNjZDRYCT99sO@DLv@?6qbRbMa!sECl^gI0|(L+%idhO}pe)5hTTB8BsQL8NZ(=P-r zO2qLt)q>DjkCjl7h|YM1AJ1EPnkE2|l@>KJUuRf4WMe)FUxOapHAhE=fhh0^GTtE5?LzWde5oFj`swaf$0pUpr`tsBqRuU{ z+}eMPY&gisf#(WgX2Wu=_225JjV7j9aywBkzixflW?v|>vV!eHy&75P=YxQZ-~Hh* z_%nDmP-(9q$>dl@v+|>x)YeAHJ}*i>hi8KB6w^;agRlbrSqoL{R#OXO^~=Cx>z&K- zTTFQG_9$XPcOR&T2A`uDfc^ScZSnAVmpmV{uCxZj#8CK4mjj)ZbTu3@R%=;VUV3%w z$UXDNdU;-_$`5fqDR<$rZ<{CC)nrr6a6RTNgZg%D34G3vZ^y@9<8#`(c?tyr{<~UR z?`FWElK2Cj&7}Pz9|6q(o2;jx(EQ5NVNO`nqL(yB$gPDOF-ZSs|4=Xl@r>kvy0B8E}lAk?20FaOy&+VtgsiEO)^pZM9dH;Co=|)7qCuQZ>jTalV(;6OCD|_hKn(q(syx-zuXFd03UO!9$xY*y-f`ZO1 z6~l+SKHim{@^f8zfh>#j#)H@A*AYp3?gR;Blo@SZ>R`6rtwMO8S)QVtfZMAEp4w;v zL<@`kTnpmn?L)fVDzh0g1SWSjj=0A^2Mg4+sCJ}6#`nurJ2P-LS3kc9cVmIt2%VG9 z?Um=Zas#Wi@afP+yWafl4IvHzUvZ(W$OG$9U{_2hh;`N@KYh`n%snvuF zTRc8vEFSF$n<}#A2>2piVRNo7J6M8r?F~Pu4z*D>|91ZFFm<%(u$P0F{VrFa_=KDV z%W4q_yFf}j?B&+zb|{65suWqN9ew54P)BAlu^lwZ&o3ur`EM-KXK0A7YD`f}r8|_v zK59Y+>B|}zKp4@IsGq0B+|NL{YDh!vRKYl4_{%~P03lAh{jBk7uM5QV1K7Qa_81Fo zWFqUj>k*NL(eq@fNltYXmyoU!c($2D?6AsijT*HL68|jFT!P@%BJ{@wq9>&FxyvuoWrpDDEG}d($AMf1n`YiTaFx_Bb-r`m z7Q4?HFW~xx@0VTEVbz-JrVT3P>3vL8g{ZDk7UIc@cO@*vX5n$1v|a6)c0=Bq z>7117FE%;iqUuI9(FuQI&#y67O^)imx8=q>q771uon*B<-p@fZB5K^&mB!tx0x_LE zFW>zrzpcL7HpjfO#U!+JMsjHl~ExJA$@Dd@CC!9aAK>K4I-!RY6t{GKqDhmARW8m`Jco=#Q>T7&5KTp93Sq$4cN{U`&P+06!PU&K6sU#i}5=lR5*jSu(g)YmqzZ1S?05O@?#93Tx zS63(VLOdmsewlJ<{V{TU7aZH+^%|2WpMgzXbZ)@_t((W+-NW0>+!%2dVB?MazYJ0l z4StA>!%5V~JsedOqdUT_hF)TYar1E}d{Xh?s{K5HR!*k)eGLeW;%PL{1_HhX+vC); zHZr$TtC|X4ED4)L#%3b{RPIA(^D-4X>a6`u`q}QJr3nUm6b8! z&lw4ZY>G>Wx&rIqlDp6P=ftp|XgxY6^9dM^O5Wh~&lN!*69;uFW`kV8V3K+vW;uNf zo56{S@c|)8-FVdY1f}_y&3@W#obv%0mYly#Cgnp}wB(WC0hK~pq+Mnfl7At@&~Fg& z_+VMef1>fnu6X2&k0bz2iUi7nN%-%8s60?rrC zmMb9B#Y*ya<5&<6AA_c>92r`pPBeOchpJ_TPI2?9C{%$iIm#Jm%PQuPH=T8TgI~|p zUk@9)V;pg3WBB;ux(hB;tHnD=F|TSnoYuRDZ6+Q9F$_9&s)ybGjAfyYo~da3&A`PT zOFztGq;Ki=nnI;g*UD4lA2~ssd80|uoC4Zx9_qMMq0a5rn??891!ZBqfh$0_nEC5Z zIuffX{W<5qY+A&)Aia-#nkD2=j``Pr#!#MaHW1_hd)ug>=c(W7vTi#zjGHhu_t7!d zf`uy{o>_!U2!Vlc}j|HBl$~^ z{p$WPjW&_C?o3#mN*|I}8QlND>F)cd$WhX4Ns)slM2YUBYqQgd_Y(dkK;JY^e&ELn zmNDa28?DjwAeLadL51V*hOG0JC1RRW7UkqtY*r0KqpOsIPc-c>tIi# zhWI}~J;GmIj>1l32Sn|9m5ot4EmSvXf%tdGaT)KfWW3*>txhA)I%)H~`>mB%sr9Gp zQt9bzrYRdRUZw-tyPka}ku1NM~0 zV--jwrP&UkS;jaawIM{_46G7hLmE|U#ylM%AZPnYju5g;{Z|0%IEhL^BTAvcEtk3h zIkVTQ4Z}-`IYvGgo0W5!k#1f)C1KZD&+ind&i%Hw10?P zU2{KLiF;FH-wf&5lpK_PRcY=)cCn&2 z1I1c3w?s9`W`VxH1iz-!nzK1FBd`h+?+XsA3^V> za7`FutH<@rQrj6z0tV~O@gacb|h_H82f{*sU3w`AZp*<$iiF7CInt~?`0 z=v*J~e!;lW7H2tFVN?T_@p9$v8e$&{DZ>J zDnqa_xE=v{pTih&y2)Hfa_nw9(7`zm||0E4_9w8WBNn2%+4WOPyRB4Hh{Kw z9p?_6qx&%kLcQofq~T#s1P{7aOY?Z60Z4K0(mgM8L{P-;-n$8paV zi}{Ksuh3BAdC^o03D#^O)(DBP+m!U~5K=z;`_D3`?}QU4c45}_&$30@W$!-(fb+WD z!`5-HZY2%PeyPJ+?i*L8A`@nzPl1C-spLK@nGvMe3#+p$y^l53@|o>`O0wc!ccXv+ zpn_A^lts-!KG(T~zI0gk@-~B;Ax+q%hLsB+vds^hHmS4pqVSgF-qVd#cg>NlNzEia zoads-w#pP)9{Zt_Dfwo2gMsK5E=`dsu&*)j2FQeI5h%JJ)oyYHfx1Si(DZb8QtbP+ zZDeQh@blLooR@XLip>M6PN*rzbot}dY%pddQ4kg?C2+0Ol84ZCT3LG@JPw+1QrunzMYfea;aq7)n8OxW357 z@nhl|>584`{q&rL9k*5@(@kmr=mNJ{Nv{4LT>#(VCpnSlim8(#RUN_kY3p;PDqTT# zD_E2*9+HdtWxpJUS%2$t?4oC7?h75R6SHDT|0TfU2(@pGu$t_8=ho_)W!6iZh!Dk} z00>ll?Q;ajhfsFB*;Yf{`vdXy(3qf7?--g^{>f%j3-9PEU~u3r!f%10m&6gBI*vXq4UN9EpcAHDCf(;jqgg%ZUw+z{e> z*Y9p~TI=~AAG3iEcAvLasPlPd;e%X%WR~|EUpk|$;~v)}R<_iSZXw6L*%NPydE{Q9et)q~;b`(DpJf zw;BasuC;#g%N?4^El!E^qeMP7GxzqQcXmY+D%MRqk*3=kRW5zjJ^aKVQqjQGX2o7z zn!@|h?t&&B#m{=SMU2RWmt21BA3l8edzm@9>sV7$qfhvj$zHAaD*bUQEB5j;_7@+) z$7P*VeqL%@#3E-dEr!6(#HggbGL3!Ne0bu&*42B1BRxwF$1B2czJY43Zr2xz_yaCS z%@;b>57{6I$)X6#QC2t-zxI!4L9To{*UJciYj?5t#5&yzXlSYT`;VaOmDmJD;D<2_ zkM{+5Wt;;tc{49U)nqA^rxF~_oE90AcnJcPWTJ)rD{YV)jO5iW>s9>oW5?{Qx;-OkQCL=es|&ebp0-)>1C{-ot>l$8yXf@1K^lt@{+npOJ~B5FDM0yXJAip~7?FG}r1 z-h{7yW9h;4%f>Fb)N*qd`B0r0G5`ER^F_!T5YxCV*TqrGpjXSqXhfkwhkryV;u8$N z|IS%!rgi<^B@izf$QY*NUC_str2PF*steCrf&IVQ49ijE0`*W>%y;uCj!K31G`%^$ zxBZ0rZxlxkXnFhYmI_wGB^=}-u*LL&!dU-`Sh-w@Ajs5~^>CqD{J23I-)(>o$vAyewL5S1obQJgJ<5FvZ3xjfVY)b}?Gd-R?~{kKUzLw6+}s zD-XfiYlW||h0sMJ3R03x9+TQR3S;T5lZ#L`J2P1w>UXfHS0e5i7M7!M&nEVNd!GIq za23fgV{OFa^-t@Ew6d13oy2mDO{p3Tkn<>iiJYF%|CMF@KgJ}_9ui@Hx%Zk;)%jmJ z7{6o{GK~zV#p+Rg)xj;>+Etc_N+T93RX9>nEK4ZnU_$MRGqmETK*p9d>5Wr8(SKiq zB?8Dse}YpEld9NP_h^IY)3Uvv61cReWOOnhxL4V<@q?#B+SX@zW&Ew4lcaw@pr*my zECX`-Fwrabmox>#t&jWl=kd}~M2WX|gy$q{T~K`PfWc~p!px%8l2VtoB*k0Qp!&*9|R#M5NZAuUU+7 z_~B;i!nJVkngPPA!Jzy8VklG@g(^ckS{=Yg8rhI5@{GUS4x#>=6pA8$zKsy<#i?nF zZI`VwLV^4~jQcyu7l%gfzjESWT}#)B{aZje17%1{^TQ-7N(y3ezW@Gn6GW#BbFv^^ zWcn0Qs@uTE^qC0zt5Zo${ptn!d~oY}xYlyrCam9o_ZaG;<%0|q)<62+(t9wrm6MW*+J^6N=E*gv}Z_uchQba=596ramZJ* z@TD&$4Dy3g(Lg4`Vqr(`%Qo`Ok%#3})P@l*bwG-X#NryNpAR#;9 zIh1LE=Z;FCnR&Ae~p`AUCnD|zcmLgHebFv=;~$3i-6O< z0k|rhfKjH@Ov``ywRR3Nm_{dYDbLM^*zbByn)Z%dwXh5g#-~uA)HO2rOJQ}YYbMG{gs=3Fb%iJ8 z<$e|s6`E)Hs)TBN4R2o-a}S-IcA()&MXIeTnaAeYhvYOJghA$`kwYm10Y@E?q*Q-KeO3h(=as+~@5KmX)thINGn%lg2Yq3#fLsBi__w1y*n)B)-UI5zD8O z5tsY0t6a~07t96#neqC+`|NE>QPtJIkDI@uD`Ir`sjEw7v#2_iXr@>*i?rL@!5&jm z7W7F`EvA$EiYE4vG#0Cqee<_Lo$SZ7$USQk)AYi_AEW6 zr8lmyH*BmHJc7yfg{F(N({N2InJm4p8f_-uIQiVhjgH@MblfRR@T!8NV(GS#jgN{6 zx$msz|F_~seB2!aY8&9u@vQ8yEJrb&UaXav*nX`}e?^EdHK{utbf2gyD1C`~)-!{O z4(;Tzbk2RKH_${P>}l^>wR4@fhGlp4KJ^I0=~2z@<5#&=%djI;(6H&NCm8;yMaK(a z!;?`{!-=1t{XMj5J2^cKBq}RTSq!}{fH?2p@Gf(J~< z=G1-IK`a7yDIr!hpe5sCs839xqAKrwrf{6#$K<1j?BC^7w7MzfG!g(zH{Qo8mWy68 z^#zyGa2bCjAra-+HeU~j{E|5)o~=MkoL-Xs5f#_t@hxFnM2F7ioT73y0>^zSBuOUj zI0cGQR6jt2Y$zV(y8K@{8#ccZMj5rN?MckdpN;9-$A#-g@&7HlUBhxG8U*=I1W@HS zF3cQFp%oc9?OX3a2B{Mg>BLq9ruA`<$GhrBwV>0)P>54$UkLJ~bPzUxt5Qzo0UvX~ zASFEiU2~v!skaA{9l%xG#ll0$`F8TVKy3j236hVH3q5;AgPn{f5zfC*?;mbTAFTt# zqHyVNUVPUR`uig+8Cjs-n#sCu%&y)H0z&!f9+^G z8d#co`gH`vgk{HdF}#a)y~Tkyi(RYC=U93W*Vy4|@PJ%r!K~$N`T6DnHQ%G!-!Di} zE%$M3(&OP~$N1U*-zl>^b2>GKPi%a$=zUjt41`?1s8Om8?6)0J(YgF}O$X}{Z_vda zMqTu=8Hu7zs^Qu`LiER34c@TpUF_Nr16q*#g}!j~nBk|0c#EL)GQG&ADz)LLl7f86 zYM)E| zz|#fNyFv2yJ3J#_tz9DE6W9;C3JY_>$T)s!BGdu!NM!*qp@CyUD|7Q>AC z14JddWfRWLA4R9q4lZet?|+aB@?1QYOl-@#__#-6q01Z23EM1CRQ>uj=~3_|of8x) zPk`)oI;=OJYe*Nzog#a5qm_%!oibw5T6+uaeH!5@y>qw=K%T~83=_Z9QvoIddWd`i z?y(7j%!l-QyQ17ETqoC-mZI5|tAU<}2*dG@Enb4B)0)>!?0KSqxz7N!T_5OEmYC|= zX@{>u=YG|~=A=A4F8DCv&}h-i`>gQUa|JO>C)751m@$1Z?tOISr86ydZm0bH zT+Uh3Li;NKQ^JjP;l~3N*P{84DSSQ6#wyEzf` zxTpGz%ZI%0-?g+2`-sLzT`!45(1C0!HInCUr^GM=M15!h+0kHW*TKp!F^z-r>%sE! zIzcZ-Nb~w5wV%0#ZAJNlIkA%PH{c?^j|RL+_pfh_u4R`luZo{My+$&44IjNFZZ4?O&3uNu?Yj0|E%YAskMD{Nc7_VPW$YVSu- zs7DEF?+{kwJn9d!p$^R-dVRb0_3gA7Gs6 z4=aOS2#mV-Y;=>i_Y)IW&74nAIhjVlr(lgA9Cjs_MDZo8JbEJWqLcU?g|scN@2nn^ zPSZ|%DCqZcQBFkq1IKmD)Me<@bZc|V#$b;{Z#nsVO}1y@hvB|;m=*wvUE3Xd5#cT51WHMYe@;rs1iBta^Mx8%LI1N z13Ra*_q3{F((hSOC4QKIyGoj9>B+tR6t(|7?SEn*z8A_ZY_$$-W2Yqzq=t_9Zm%2c z?T}D%K(|TkzeOHb7eW+FE&(Y=+?qWBuju9YgR^KcBQW;ByRG;F8 zKs+I8sRjR$k~+>gu?M!uA&!_m>bQkbYEIz_MsX0*Ejsx=nzM7MFhh&-ZpK0JIJC&} zjRD{nb%Y;6op71|Q7~&Lw7iTq*3=7JO5SL7U|`1XUAzt@nNl8X!uw<&S9VVBuO*o! z*mS7ZH5aNkl>?m&@u?LMjm~Bp#;_QP{zp%uOltIb|BQlwf&ft`Ssk-&9PSBBKJQby z%e!aapxGIkNT}(}qoQp+0h=hu_jF`{N7h1PF^vJ^WNYNtNuAnPoo-v0i%b&j3apOs z%cASZQP%?}u%1jCJahpZ>#!r$(0~bYF`uW+7T(dk$Ccy4r`6y-ZJXr=r}YDYM)E$P zSLZg}%3L1R3GL7ALj zq0SWUNh?Ebv1?yxX??J(eS^@F+rDt^TWl}=##S;Jai#*NhVgNT(w3Tki;&Ejgcj>y z#4lP5do$hiAAo{$wTGh}!S*cVXs{POLn>Z^8xI^%PooXH-<%$=OF)-mVkkMl&&kge z0}q}fnOdG1*5O!I3h7JBf5O}%cj`z$k}%Yxx@9ZmHi;2J_3esrJ;U1Tao5Z^q`9o_ z)00DLL)I*N&s00sRQHjqfPj{O;N#tO`h#t)2kIPyu61$#y9B=3${@Hlq#NmXNz!5L z{3Ujo+wVOAjmH74V0n~YJcfc%!sx;uY$>v|U3FvTpCzQKApr(TKT41ao0d(qNXKvt%i`Uh-&G6hok_ALm`9Yxo-T=9e$pPnjD!Is;ZFV^3caC6spLpdU!eQjrzm9Yc&Xqj{(pZF4Dvz2HZl;@&~ieKLzsa|AsDan6< zi%z1-AcA_lF*FPsDsL(oTol7AZ8D^Jt{yQl42n8au&a~UZP*irmcZ81g`RGIa_#+iB{e{he)v!Q z$A6Xd;drY^8OT5Sy-dXvaDr{@WVd86F~qOi^3+q!>hw}t5Elw&JKG#}b!Bz)R`4ew z0{}Dgv2btUe#@Lz(EZ!Z%iXmRLdpMsX-KiI7;88i>6#)SHvBI`*&;q*g=uqz&RIJ& zYDs}>hd!(e)a{o?ld?P3%wuW_2bRZLq3GT7C1XDCF6PMe77J&NvhGdZZHL$k`JG}H zA7A^!%Q9n!Dxp>9ZWUeU>0MjfIxcs3y*fMLEjc&+$C#yfT=k#zT!O7hp>J9|$MWc) z<5ag80MXZ0x8=w)NHh0O*ohkJy1Kdz#F*@qWBqAU#ifX{n#Mr;o_+dqK;9^=c@A zY3gtnZ=L-9x!VTtUB-OJR|`SW6I^~=hE9BC>U8o5nAUQoyl!Z^OE24Qx|}iqtHb&{ zOp2m*TT~X!wmy&28=E1JmzS#ja5vVf8ZdFSdjc~CHB_;sI(d||l1-~J{owr=j!3}Z za#md1z~8(oP2JK2?|T|#K?%4-^zI96sv7t?#XBGmV@UTsh8-)Pi8<%Rxcc4lwYO*g z`7Q{gZ&vL(Y(6?Wddq_;wmf)y9UX_T^Ze4urs0$X&k(MSYtvZuClW0dx66}LXRrv7 z+*^>|3|G(|(vhkqW#D_hgrS)@j2&--pBj}*L5$eIp0u5F`I!Z+Fb0XB&p)o%)}u!+ zSQ_nD&GRND^CzPKJ!abo|6;3*E+seU-71khr*tIlTT4nL8cbdV`*5JPoi#@Vr_}Dl z-EzgBHv_GLDH|!=pQaCYowh{(r9b@9GnijUe`Y zSG98O6LA=T8KR?A4)i6SA4ZGxg}iw9IkM>E)XdW^SGU^fCmcf#YWuokpEtJS9Wj~5 z1UF}X2_MK+Nzql$-#jwjw`t4&PYCmss?<4L0)H3pSr6e$9#Y%f*-1#)@$RmvuTV%z z*m>R$QGd95B+tC6^MJ_Df~zh~??Ft#YVc7C`eJ3Za5Z+w?MvamC2_`H2@ z`gPYHAB5MD4NHr&I}Ypf zX0C_>J5SU3W)F9ZflM=Gz8IvpuzTEZ0A$kUbEm=kc3a8E$MUgC>}-efv4$ABNxXBUZD_`8| zp`lG^sXeL84i!MYK(|&)d>j{hI=t3ojnkNl%yYBf%FI0Yd9e|PVt!*!qA!%D?z8+N zAoBGzf2-YbWd9&a@k-&o)sMCh-_4$Q4-xw(0>BxdLLhXuvO zVMR~^?wqsJyIIWDFgXNP>>(#YE{cdGt*O7TOn{_ubMYH9>=(EgZd+Q=i1uRG4dQwn z3&531R>lWRsNk;Y*3+JFP6!&2_brU-5jF1BA!WKx?gEu$-`!?aUuQSA2QpPMpXN!c zPqE5ahVDIA!A{2K$G-n!8J^v|WMyd!Wt9tP^;)TJr_=G1J!Ss7MF|9J@pB$(qb}U` zdD6o%hDS|1TSvSgoc$kUYMCg$6T2~p8T#|zOx`^$0driDm+k$1HDh15v&#wc{`4uJ z(qu8gv^Mm&haDU!K*qqg!`(z%L`6>`ol$i{k+qq&PZ>EJio0R zThsC?VO2Zs(X&qsAp00dP>_)HPnyJygp+VE$r9vIkeFMWR)0XXsQmTScG$O}6d{T8 z-W6KZv&9OZZu4+6c4< z%l-Indgo<3?j(_B$l_kR7BGS5L&h>SPaVnvboO5Y4cyBIr@+6P94S)PLpb zfb#p6t0_2Q0a3$z`dRd=PWo@#={J+cz%U=_X^*0@tvogm7thoVDpNBXY8ji1eRWNL zyN?PVL!GSyip>kmms)QpM(EpG?3#023L3}CkmLH`H*tKKOFH*XoZqjD|84|4%6TuJ zOVj`_bSR^(g9*m-I0TS>>(5VtR^3Y*1Ts?O9k-Kl+O3~TPM}8=Rv-xw;&CX7zAvMNv%04$o0sBpbrvaUKl9S&C&!L5B zeKd3~Pdd0DoH57SFYE$#JnwQpUZ4kZ`RxYl)ntkw0V8Rn>UlP2>V4!I6-X)fN7~)q z0fIXr4ZqLMkpI>}7+PeTX~iq!mNLED$=5;U_yT-fSwT8tZArRxxv9rTmSr%}B^Oh( z*&mz?p&5ev|2OWhC$<3ZJkEu&o|Dtd+xFHo-{c&3ex%yer__7Cxn3~6#l3`WtP%$F z&y4?<&BDIEBo{d_v!(_M5c^k^lxr#r=zw=3#CfldY4x=HAHu!@DyrxGdkqu?gq21C z0qO1zW$6+rX%M7Sx=|38mXHn!L6DH{5&`L0dg*SYV~O{!V0?e)f6n_HkL=yOGk0d5 zn9uXfGjr$Qlc!v6{%&@p$BVJXk=H;!nL@-VK1h4GyH`cOg%~1JCJ2FqD)Lpj4kOZ& zMPwqlL^tf^~XV3vF®dXk%cK_|TdE4v4_tY?Xn1cD%mJtH$zX+ zP+(E7)u#<80>T1&VYQ}x6B!-PA9>BA<+p7DoKlqzib0)Hy#_*mk>0nsfg_H6FFf4R z$*qd{U<5!C4U+vvxqyJBqnmS8a`&k*99U2RWv<7x{^Dx`QAjTEBsre%gq*C9FZn?&LHH91%RWGj!7^GFg(%+Jvde3W_-aOIfX2=pkZ9hSYn|H>NceTw zPz?X)K*q2z>nOyRCKG$aJZs!(!!P??SRz@wS2%QD+xakp1DcwW?&VeiyAYosE2y{NGJgO4tQL2P zsnTV3!+GwpF+F^;z3R9Jmppd*5)WzJB#(2$PS`i=F=F%M4bWiHe(hG}+>Ub({~a2L zz^UTly;fx6g!7%&Godm_M)vF7$^7k>8G91$sA@q$2$pZc6JQ+lXezV|Dbo(3r7?kO zYhWfWG3ueb6)_4 zfC%osd0q*(OCXZxd6;`UlCX)1RCx1X-1}=AN#{)f%9Y4u_eqGUQ_D zx=zHxRs;s^yEj&pZhVymrYmnUFctxTJB@1NI~*<`zT4^P3m2o+yGNf_-Dc*d>=tE> z*hUXU?mvBcKU(Cio``zK?As4?fxSf5rTs%?!(}}iJL3mJhX*cJ<|%M^b;mPbgpo!AjfRd6wTPMQmK3VG}`Y;!Gb}C_(AtdbcRnzpS9^Qz^FL6qoHn6dL%y!tw&6 zu{*9e0;4I(GrN1s-AWUtN!IyT1EpqiQM>wEb?Tw0!5N*oU~2bd+v{)O1&i69Rg5kFJUJQoZYe-_nnkajKQtz ztvZMfZ-t%&?iA*&Yr*n>b_AQni_x}`9$7GCb!@E;h{h`Ht?X5=RT15GKZ(k?{-MJj zXbOicOqP`0x*>~uxc>e(!?Z9kDo5RJ=p>AN8r2}bUdZ~Um?#uGYBu#_Cp`ey; z8WxF$#))XaPXqx&1b8SSnI9gbAB}Gyv^Q1s~qhsW21gBxUb43QFPX>{@LzsHP z-&;EwVjk@+o~Yq8-~Z;GCe^USU+1#xtw3t$fpU2B20~ z4+iKs%R5=lI&w8k{3dDB{@96hGf~L& z*{dlUgV>ha-SLab2iGKi*cxWQBSnFu8$b=sIAU_7?1%C4eYx8!28(#IItD>qsLs_2 zTT|~@^pl&DA^ggu6j-09iau3Wx-@`W7+;tK1l-Y=2C)>G1g4>K+kglrfT7@IEOh;d zs|j{7A10(LTY8SWe>``CRFjIl7$$=O``8MAa&(_{+j6eq`q#}(>!Itw=Xu<`GLyo( z?&vb&dSQ>BxZv%|_@YHm{bkrgR^^}tA8Ie&SX4@yedk?p_Lg{Ch}5hJGf<$(^Vtnc==K}$=fKMgTpY&sNiWL~>g zPK0mR9@^o1oy1nUh<%Cd&y4e%ocLPo;Pzp1&~aOz(OM|kuEWV+XjIZTJ~MS~>ujDH z2uRz&x&qs|4?*Ibt`^rs=I0tweX>V}hN`E|r#lnZA|#GM>=Wn@ZHcFxPMR8TRywSQ z1VaEFXlmiOQHiZOw23&bN_0%Pf6gjxNmk4V-b5o1Q#{B! z3#l%B#qxw|vhr;FL{b6a z_Kwm_(}R-oEp0E0x%rp$c^OfFv!aGEPfGn4SNiPI&e4Gqtn)ibT`E zWu0)Y zCR|kd>QL`3P1<-gyTk$!z&~_m7)M~HVB&}BkqP(Zbo@GxniKf2az2Kc3nC^Hj0GbI zM5maV5k8-p2aau|dTQb7+9=1Zl#c>ZIik7g9Y#s5VJs&@WcXg#(sb?h8d;n1O;*{a zyu;JOaA9<|?(xo|`B>lg88<;7oBg*pFy!kFwn7TdW$Mj?v@x2Ntj12U2|b+9;{5tt zcIFRK?wK{Rg5%DoRuoZ9Uc%S)Lt*+R+Nvs$NcB7k)}(5)7ZkomT9AS+>4GYr8OFVQ9fPhY#J zsb7tIa;uP%QZBEC8K_hQ=Oe(*r8_DKc~E6blS!A46jjyq?tW9)KhJEYtWOv9Bo3L{ zlfz%ipi8L&)!s;_F?-o3VD5hMXpTV0X?LhK*n5iOI++v$8!*f@=EHDdcf+5lMqYCX zX0n~rb087>XL1xd=666g8?vw(b5o<-3AUqW^_o)y{hJ{}X?atk3{i$EmJqIXw{%1T`aCcf>>RM7rh}YTTR9uUmW&n&@A@1us z7uCph5z@hF3NWZUi$*$FrP4tM)rbQ6qbJ$a#Nj0PGr|=(*0p@|crsb{UY!;_dHGV8 zOsEur`$NFQG(L|S>5MqCWxq|D{l(K57M&7zP4L8MjOdMslX*uAvq#_5gy50M?nRGq zg5n{?b??S!jL}#e;nzf>6jff$?9=k9A2j-}Zr}7gsdi1V5?AUQvR z=w5=ZxwGm{1njXA$zYLVH#jgJBxs!$br{?dDBK29OtYt(WafcU?YWXieoilqXFdHo zWYtFOM1_yjs|RrAdC2GFoExAU0FuC!VS9Q%rxp8XTZy!k&!bPVcpv4|Y63e#AN7fD zQb~IniyEMFkh@}v424o;>ThKsz|c{13Elr`4{3dBn5kb8>b2J&e_OB{%iXSk_aUw> z;snW|D8lKKdiM~ccCC+nbctJ_l$u-4 z0k{JeQyeH+p}$psQ~mw0%jr?j6foPQeKdg=A$($n1eP5tpWff_v-W@!;p0a<17XQ6 zn_l;pVUx9MjtG9=DUKBm*r>RDIj^zT(h%|4m{-?;mxZMhV|dl?E~U~9w`f>k;xS@M z(E$9xV(%)Hsl(0D^Xr-Tx_x_+JtQ;LpKj3xnEJ_IFJt!9?tEbOH+0N{&UYo~x+7kT zePX%>!k>)QirvKOg1NyM@l`sZ>1^?on@h zT>TXH;7jCYp{7M@Ah2%~OFHm=qySgxY5ucjcK1!}g45ngr&?%mZedx~@Rt`1)}9wB zQ)lI*3F0Pf<&@OS?(_a+cThlmNKZ#A)JU|c8(d`1s*CID%5#-1j-oI28=2z;H^_non3xcC-(HcS1G72{qzKPlJ><^9r;kW3 zrqsdfs>jfe9=|O>!Kl~8c{OG2g_x~PF%@>_@eQY*bZ|Q8jO!EydCMqN_b~{ZJJG){cU~k4R6#`x-|7Dgcat32>el2#|0uL{$hi>RhVxEF91psq-T2& zb+}hxcCi<7=%q0{NU#%gw3?yP#(HNQdF-J0EwO+O+Md{bZZgdEEi?Y@okO^zGGP_> zeBBOC0MPB78Yys0Rj`{LI>s$Za=h25rd?{?5|$9hHAlk8#XG-U5HWN3MfJ<(_U6Te z^`V+zx|;x**PWoQvh@Jnv4{whuPELfFeID{CdkRb39%bTw5?Co4+!9F9}e1YepyLW zjbO>+u^KALNOca7xfAOrrsz~gd=3l9b4W_H&e_-vSSNH^VkW9O0u)=6+I$ngrrWh( z*ZBtpAafEM3B;w=RCrwq)mEPtCBd+$89MV@?m{xMohz8a0#)ZdrOcK;t0zSFp2~Qh zZUwlw1Q(!q)Ayq{VOdFlyuWanYdp8~=sEDnCkrIag{xZgapv)Ou3%)xnX>xPiuT!> z6J(y)YQf$YY@GI}hOzH4+l3OEV%ICb9)4d3OzaKs+qDnE04v6ZBL@GdH!&10$eZe{ ze_9cjW5gcfo#uB=x{+xb|HeDVv@SotN==vd^psR#c*yTmrCViN_}pq8gS%H74WuSI zH+LVDKUka6i5H$qKNct%M*YGtRSq0pg)N8( zbsQe5R*K9u-y}*{`$5njxZ&%YXf0be%prF^2?!BIj9CETYFZw&A_2290kahPJuHDR zhMQgJ!gbJ$TTWCFhj4r>FH`@frj`eB{#-%5=Xz;<;&`1PL|s>Y)Oah!)p~)Y1`7na zokg6p?XWrvk`T%bfa~#~V z0D#x_oKZ_gaB&v?NdMiEk;!d!qXz201vcUeBM~(Q+ytl&?0X%pmJCMJRwnd#KWQwfL!v3$X7jWC7-BF$s@3` z77WLfRTOdBI?A{?+|ZlWH}4E^yFk9L^d~XZLpjP^0Y}f%XN$MrWg^N#EGqSdrG>1; z31+6Sp;8{Q0|0JdEYn{u5mZw?G3m}3XLtF)ZJmwmG^2u>}W z)~JC4ofX4rmkhjJ^02#dp}LjZdh#^sf>&TP+5BXw-ev-yl<0gI1@Kz|q|9Q|qy^M# z5R_F&X^3`Iw|yR`4vW|%te>w8J15GCee!PG;KJ#6$$)0~I>_53ji!m6Bvse1hxlQF z&3@KVHwV??5~j9v4BH&Nm*#oHlHY~HHN6aj0C5mOl$&r|?Mo@6ieiG?7QCJ~c@G^U z7n3G=x#XW`E0~G^umvefDk?KlIgJYQ6;b>1f1uxWDEV&u3S&2@`=o`tW7A77Ru>19 z=F_eW1eh}3;nFr5!NHat2QX9CHd@wz)Vn#CDsK z+HjqdS~(@wzt$$cEMz!|@k%N>foa{o-q6qsY?dKoG1urG1OVTWbzEI5bj!XrO(UWeAHB(ZG$L~ANL%H6 zwi#fNUIXB`iT_N!XQ^Uvb7wcPWlLj&Y4hT|{` z+I7>Q-|*i6wGTYUQGrBIGj?4JU|MD$&&V$c)P+V@7`&8firJeW0*CR1yN8T!pP@iz6iPE;&+VgO)ZgBb$N6mi^!#+j)U)Q8H> z2%UCQS*Yf=%&^19U3Rrv!!X&DUq6|7>virqmLZk;1K5j;83OEfB)HW`cEvnTPsK5D+Q!zIIe$ z#ZSg%{&ID%I;lE^bg?S0Ae~aEdU>o{F)DGixMSE|=u57^kqyoL8z3zaC1uWWUSRP& z%`_1Dm|8=Pp{vI7$w@Lrl68$&W??h-W=;2)8m#iARLd}Rfz^s>N~F4xiktNhJr`gH zr1z8O%5o46iPr$%=OzqnxTpc@njnxvnb|32BD2TXs?4>Scs7$?X4l2ey!8Htu?h?ah|mEbP%uX7Eu zbm|@R90B48@@s3oU-kHzG{| zL7k?R9e(X|^ShcH-QZL9p-g!O8^bI(>Ga&i5WoxaujQR0iP*E7xbmbLs9Gxo%5Icane;Y8R5 zf(TRVc;lGkPtwR%9KXC@Y;eDrdBk*Cr~XSD2L@}1cR3ucLru<_J;zw)ET$-?h$B`? z6{5%qAGMQJDy3_Dod^Aj!wZ5?b#cgTx&RZ<6O|911=&7b#cE;acK{is`Dn(aId)X% zZ(F&bCghLy1VnI0+3Hss3o!WJsEx$PU9m{Qq6Ox<%a0-%$cV`$g#+(ny!wbmyZq}& zYT(jaGBrMva1#hFToO5XP8u8X+?`)U{+6!fPhEz3fL31;2mKHMPJO*3$XyjMuo!$vKoJCa0tzyN5y`epv#j{N;P-`w0BqY2eX94Ln>sG>4AJNsNo z$w7Myo((K#nB~A$=J^0lKGMj}S+(K0kB#l1U4PpDQrUvyT^bEuQ1j9*ikC~H+w2Q` zplgr#_OvI|8SuUguI?T);K@uC2c{jl3ng@A=9oRgng+I6gC4~vy3sE09_Omk8;EY zphusbvSk|4kb(bU-|%@lFFT+HsF2oOfIZ&T`M?E|9zEC6H~Gyly%Cdf2V6x9E_k+5 z7VzT7We*@+fuywHoSg0L?K^kw#ALS@?Yy|jDhF(KSAms$`}U1KpAc9&L6V+fpF?e4 z-bQB}J1s4344+?--c45Ybl|O&f`Y%y8}cSB4-UDG&QAO4i5OnLc0FX&3JWzhvn}y1 z^4H68G>czcxJOW?Y~zw?%2thc`2)eiKUz}p6b)E<{zX8eb8_x-!ciBkCr*ov@0rJ; zl;>%w-sd^#KVd|SIe-}}^|~jx6}oz#?(-#Q8ceL9m=1m6Ejz1CvF>(XPQLyHDtXu# zqkGy>sOqt5TwltvF{dYy;##(%gvs!uVv*sN>#R%Vr~Jw+o`b~LFyXQpS*=7Wa*f5r zh4P-ZnyM6yser1H)-Lth^&GFUrTQRP^_QWxB;9xtr)F6G4D!`G0D04V>N9w7ypGaA zsu6BH2-o#+3^KM_a0;A*2w+&}emRlp7cF0C*5PeX`N?77Ifcl66NNUHIj*X1V?Uup zEgI4K{%dBp)^5XF^zh_#=JUe`7`U~ii(iBb5z{9*!lXMAZfdljZ|y{Bd*Q&n)~1`KU4&zi)t_e*kK zQqj=?O59{Ev-Qa0o^#Ffug?3#D2`x-CbtL4QyT-#sx`Sgp_5ggJk1V+ddj=&E*5L( z7bauEGGuEV2GP~Vz@p0AG8hgKRps3%Gu&SYKK z>?|Vier)bett{;JI=E>7-@dc{dRCiNW?Gpnh^YB==;eAKR$F3=A|{ofDM(%x|l+!T4ECa+MZ5o)yf~ybdg(pR*TWz`JQsRzOB~*Wce1|a=t4_vctzdW^ zZ={6bPNY;VE%@~#hH)4-XRVxsXyEu7Fwl^%_z0Hfc3p?=cIR|kJunjb0UTpZJH=9 z1lGI=Pwp_C~IS^KRG*HNkdlq9ZHs+-B zQ``v0StoysDdFL;QhmMM_*?to?kSx;d4yC$WtQ0nYZu&BJfS3q4Mz96@j)=|ihv_e z>Q3>L^jd*(PWcubmc&ZEIqNRXWScfsxtmRW@3zy*#Kd+G;+U*6yT0?c0Hx#Px%Y|4 z&`S?PARSEdf*u1dW14WA=LN1t)zHoGinhsVR+LdkaNjI8W3qP!PLvt*xvh3btq9+&83;pMdbO<4hEj$aQ8QdjEO z^3;3cC%IG@qL3#nVAKq}Gh^x_)Fh5QOItaApWjL7OgWFr)`i{rVt~hO>HOUD>sitW zxBnB@rICYO(Tedu9sOWL;72pleO>K|jLZi?`7pzSA#R6ZR_pN+o^UJh9d~!-uctIC z!&B#OlZK<8GZxuC~xmyz#_r zDd0@pz5e?8~K5p4lRZ~W%Uo&@W9|&)1_!X)?&1Ea&p(T83W)|hYu&VWN9TsNF%ehgnMxq zCRbx<^jE7)Y+aP_Clq@WsaubqG61eaZbfL+JTC(DUcFe8i*pE_Ad!ZK)hytrtmEY2myin;V~SG=DJ->JUX zPR&@yRQ%$$R%m_cK?!A7zr)Of8tLJa*GsEQ{x)s8z|@5#ab`IjAKo$wpJ>Mp!ChUA z`pv0}v>-|M$;9@A-M43X8fmYlf>aW;Cr(Cq5AjFx#u5h?9t;WY%VnIW~ui>wN{Q|j(h>+?agh$Q}$qph=e=yzPT$@1mUGz=B$Q`zQda_^L|v%|9o?B=vl|>&?VSh7{jNxy#G-9aG>oICUecW zyN{i7Xzl6gWSi5ZA{MnPaI%zc_`(Of2tO{-%i(_XbYNg$esS?Y(7sY@NvK28SJJrj zyJ=p5;C82ZEBT(^@um;6nqcW5?gOWN%hXA#EYjQlLyEHG#BE{w@b#dC4bRj%d`b0yro-Uf^DT-6Z&mHY%eTTzxh?j?W!r!LQ< zWSb?Mko$M>3TRQCfE))TiOehFD*gvwenO@s{33E+x|hiK%_RYEuc5mOcy*Wbi2U>2 zU+Ig;ODKAdDcj#-*Z%_Ef1&h~e-JL`_~rBf6n0ho1A-+}16lr7^Oo)|up#%7u)xR& z-Y){6YqQ~BtbKDZ<1dJRqJl#Sc)C{vf&5zGzPt*5^P%ac4g@e!(vaUZcz?!Q=Bu-Vzu~Gq z)K{95ka$_tZWWvSz)OEnRe0~DZK@s)qa}}yU5gH_Jmlkdfa_-l{OLVb>1m!@Ik5>$ ztRr5iVBnz7ZOdbTan^hkv6(vZP6^gc`-6yai$h>Lzu-`@;jr<`zF^rmX1PAdcU`sp z^rcfultNbpf1L3uGyUWNm9u4DC;ep6AjTIgXCnp4yD>Q5fxH6h$&-fH7j_Dcxku7s z5zzYl)iyuPx+0bd?edjMISf^Q6-bA+&G0t?y2cnF{HSsru*a8!)9~IVg=Zim2)evU zQtz-H#asMJ^Q(!b*H6Q~Q+Gt`?bS&+kA*Rv#TlpSM5PENF-S>9pr$Ib6u#}2E+S|j zyS`Ej_y+fbOuyK7-C->Ep{cK%__z5orqdoDsHq@gyf)rgtd^){5Z?MNO5QXJTP7=; z6YByy-PNz>Ft9T_<1B0Tbv(lC%j|FN^mG?<^3~ucZSL0g?YJJwT5Y; zB^fK_1v=WtAD9)UcE_#CIaRlQ20@`y8+iUI_t8qRIXB3MYTkXZX5Qwm7QGNk_oE6V z`r|vIV{kH2ifajh!Hi!lCOZoqd)hF|HZ0j8kGUw$$Hmpw5*0G7E|Q81t^KeT+bd_+M4fzsr7tP9jsgin_O1*i5xu%p& z>LHBY+9h?hkec|O{}G(hdiLiTNc`b~Op5whg%H=cY4Gz2bJ7(!7kbmuF7OV^4^dIR zte>uZ2Al>426Q00@QapH5Jw3Gq{}z~gkE!Lw1rth&gN=LHd>Zuqq7fQZ!D3nJ2}*C z%S+_Jpk14>Y#9#QTpL-pFe4m0L&l&HXC8r ziXYZt|03U=$}c68hGX>Q*D{VRbf>!wgMo^rm2nGKBJ8bi6ws6WELo6j+;qB~V=yg{ zWRrpA+u6dr+l>6R3XN?IhAX>;Rj5p%_aFOa7XQZ~?``#J8Jl_M4^=sh*Z7pw^imaVr=ZqMDYj6IOQdgpSPT3c@FdrA%0l6WgXlpKj_*rqJl!} zttOa58INB#ez|bV8^1_u54dN$un9Z{CVs3D;TgmuJk;B<}&7czE;IT^XXRQb( zasUxocIoVwzFQ%mwRWEATdgWk!PD@kKBh@~$A8rwQ256s+37TolWZ$dwt-^)+))|T zAAYM7F6CbCzUC4RD#fB5b)L~OJvovmM_(gG4vflis4zSx75#vfmrH^S1n;~Uh1pFC znY(XSE{sjr?eWsL53AItY{qc#*0(w;$t20_0H$C{3l_b}556`U{19nR7qDmhBjY$zd;N zEy-uUR@6393rrTf)%0ID{`>)W{dN>phq6T7+#vM83G!Dn-M@I_Es1N8@>*g$ivL?u zi%Di&CV2B9OCsOBnmRS+YW=s`M3!%u3ZNCWW#6am4vO7oE$;u^E1qyO)YsS7{f?GX ze#sorEm>?aFx6-%p6i^h?$~QSL9bRgW96%$jv~j}x=QiK7cD5-w?&fxqRgQm z%J`jY2D@_~hyo$$o5J7F6uET6;O3z}e}H08cE;t;ptm&$(y86OEd?j2r0n>uA3O#k=n?k!?B7hJ0p1&prnJ5!mfJ_Az!44$zOKq6p?S8txn{lRT(jlOew-5 zHRFwNy_z1Z4GA63XD%vE7=HPS;6`SpTeT|`=%p2k1E7+Qrcxc5YVp+3##u4ZjusR4 zkMhXjUV3LF<%pzJxsCI9jB0Jt!;yTNWs~I<3_ZD&;JgMF#vg)C67CucYB1tj%zo+OcsO-*R?y=Q(PhJr z<$LUEiwAADxa*&(s~^8Bm`FM6&cv>QLV2i|bjKBIv#p6N`v(@d%PjeA6$ua4JzMwM zHMN)p4(7*7Rm{H}O2*H96P3VSc%$-VD4usl-XLmK2O&9CrcWT0QjnLImyz)}m=;3> z&}cMG!h$#8CjD~v1(?fUg|;>uRdy)K6vEQvFp_#DuMaE&1&VWWa#mIvo6j-Dtc@FU zHnI3t{xN*H21Qn9szZ6eqd3wzufQ}AVf~AVadL8=ms|mNZ>R`xB*Z2Cio!NDICzeX zZtF`_6oc}k@d{pbwcWj(f4w{UAgq9Q2?|zr!>pWiTQEf4qH0!lcI`4KoX;>VXlX*L zDpRpbki-GwSOK<9ytzZrKX=EMA8p#~*xg+YiD;*HYBqY-gooZEDKt|@ng z5+gUqYBwv11@HL}ZXGN!s+T@4IDUKM+c6bI3f|_G1pq-ny2IObuPvFI)ID;Hb*yrs z&}iktwAWHq(4#e?xA0BkuzbOpe93`s51uo~;Ly5?u4XI~`MDoHNnSo;CuE73g?fVZ z)Y|H1Uxnb+2zx^}9dt_y;CTqhOJK6t_?&O%XfirHrSY-aQCV%Ul>0@DdtE;n#**Or zivnYn$0$8zMls*ZxT_aV=WNyKeq&CMWH1He(l-yYC-H(*71@-)teN`@xZT_4u zfDsw8G~e`aoZV?A9!wxQT@o&l*Dd{62~1CbF)|;vlGZ3p>mA$LuDH2S{6+PAsPq5w zJ1O;LO+l-xt2!gnE2I>DO;*S4S)vE2<@-IahS z(fu}9h@XSI>SSP{6zz;W4bkAg9_N=hs>t6+1~|oljlD%Uid6`hBaL#>p8q!c-q41w z^{YN&(JHH?n*3gC06*dM{WI@lfq_(|%iDkzjvP9%-|z>Dip==nq zGlIYC7*9Fn{u&Dq;I|>S1zi&4)(6%icrK>9tR_b3@^!C4Mo#xAb)pro!6Pw(n zge!_!!*@^MZtVDHa_VW5UxD2Pl2uKLFKk6kBcRYtfv?+1c;L25+b3}kSDf6$bMS_c zM`R!g!Y+pc^jwc%B|5P`%{2=OCHeZ8pMk3@++xim|J7F8H-&L!I*H*eGdZ-q>Ioe# z;h>1C_Od`|>O$BKqU>9xcqz$WlGfWaJA;=dh1X3qS%tN+x19~H5Qol+7e7H}em;}d z*5yM%P+-ACLQvCU%Il`1=En|ta;pNyZmMev!tj7b3N$n{#Pw&jS6zH6rsT_8mOc_}$wt@1xGPcZmb? z3ckCP%b{=Xcpg1@*$wF{>0d};I6JD4vAkN+A^`TYkdc?&Nufdevm6D%g`xks0D-|O zIE%lhNr10nf55Oe^rPP&G%LdVchQXY3Pd<$Dgb*4AAzJGPn0jm`{p3WpD9-a()%md zPA}Rvd9V50#`v06-qB5k3Z-ASYQ3Qfybri!oXsd8a+LiilHN5k&&bo zN*4$qvtOTq|H#SBfXsm_+I#-)uk^8f&S^$gbw$7`qSW75$gRJ9``<-5@F41cWbJc~ z^xtwfKfL=B(0KYE@y4O}Gx35Te+TQwzbk?N2sE&t=(1se#s&hyeZ@qiaGS*&RoIDq zUN%l7cHJ_PD2CYl-LOBv9iiAys{U;H1VcH0Gv(z z$gO&@^U1Yf;9{8B_3*KL@jFh6-!(|5w{l6F@Y2^gD|B~|N{&V`1VN2t+9ef+z9k`- z-~jSAf6~kqXtJV|-!WF@wBY^_PFpR3-)2y6 z{#jEJ=;@D_<)iCd2@{zs-F|aA{(n~GSkj*L2a#*E2Y(7OzFD4BP2zPBE0Yq{X?U2Eq*z&*nQ!0XK<1etsX?+y(${2mQt52Hm|sI{vb%-2 z;B*!9cDFA-+2tLeVPBz6nzV9a3+Wg3QxkU2#S(#bF21PNX>T{Uyam{cMvK%CEOLz< z-vRc7i<(r*rbS+db=j8jzfXcbm12&6+^yjfX4yJf1$&dxFNP^gFMrSJ>D7bs11_aZ zvO@TD%^c1b(Xqs=vz?t_)x9-Zn%)Qa*H<@cVXMgEkp|s$g&+8YXd9C{4XnfAY5vTl zzgsL1h?B;P_4tCb5(PF+a36oS^Y{K9MOS$8@YG~N<+64AeTSttAzLL-$?gy|qYN|h z*PF>~y!~5_KP9i~5Peeo;)M$Xor(G`eME}$@k&ZvBp&l-G)z!xc;bVdt*uJ=d=%aw zzZk@N)UD_zHus12zWwSkr2n|IVT!cT!~$1PrQvbhiJ-^p-UPc&j)oXO(RqJ$x=PNsGKy<3q{+L=u4DuZ@Gx$^vbq~`2O1J_NyB5cYkCE zghi&T@~P_&HvTtstlY%%_ZcF-f6eJ@hraO#?mc?`ZxJHD|E6^0 z6#WGUH~-0a_$NE$Dek*p;O8QxwmK|mwFdkA;?*LL&onxX59FEceO#jT4Ax#V7OMUc z5fYBJe2(LH9EkoYs}3to)3N4ir-jmu5KGlEOPfe0x$+}v4R_8#E{sgZ@zUl~1}ZLs z;y7g=1wzv6ID}ER$r5e-j?b=r>tqoydC%)Av{mmn!#8}KY*)9$=O47D^~*YQy!nHP zKH}a2du3dyBoK;+5}bMho8&IXM+6&PvGWga#`LiG{`~R){oKAvS?ewH`jTU-w$$UR z+igMsJ_39dF@!KzAi~|$+`GxFIVx(&$@{b7jbg<*SzB~eh!^a9eXjawWg@MttPB)@ z#ARWI#D#Dnm~zPbzr-7K&90&CjuI8}T*(CS$5z#PzR;n1+o7f1w%J!fOeFcjf=*(7 z6f3=J4y};qF$J>WXq+=b?37m%tj$L6MW*hyDgCCxgQL&M?yd0=RDluOy*CJByoAS$ zuSRJ2u3#A!c$e{zhn)Bd+ksdi$kPvgQNtsjsjVRU-^ot^s=HzR8@hu+o?XEd+4sCU zMvsYp30yn6R|c>_g+BA>j6(-mhxeV83dGS=G?y6PUA?qLI2*(&z#a!=^U8VmH#<$< zk+q9t{YLaAZ=b~e$<``)bM3DT)F894v@rH5QpT2o>~R_(w1H$TX%IeSUW}gm9)LEz zzigk-8{gYJ)67T*QPFXBY5Xi83=c&P2?6OpvPXb^i_h|}y7l+u{*DoRyo3JRNdo1u z{9FWbDSuHfs^~9HU0oto*7vW}-uk6|-fI!}m%~q4nw>5&D0|7scR@1)=}zz-gpjs$c~T%GsrOg9gq45sTxbL-#&AlX!9C#MRzmM0@hq4j;l_O;P|7tK;Is?dT zUwa|DwT?nML4vHO=s-e(@0SH@-~mL*J1mtyAHkzQH(jL%aCG{g&V8l_q&pAvdsG(tn1&aqCcm@Aq>6hi!v4NRUs8jh#u$T%lnJ z&84(emzgCeZpMI~)-#RA0PiQ7e-$1&dkb=|i%$~g)k6^;I^&1DNUq@J?k5|M{c*AO zO_)jN!3vg@?ED4iPUCIT&MIDag?k*Kyi%-8lx^Ym%llOZwizc`brKH8}ymJOmN-DuxDPztg{V>}80lU1YLQPU0HN2`u2%OI8px+-X z*?L13cdNNurvUmxg0yB%zG@;F2VZm-bK)64UkH1~aW8+qYJsh%_>iStXZXE4v3!%e z`Av+O1C_7WiDM$7OQTNxLq8KIwK&@$sxd<3>f%hxr(9(omJ54BQ71~hTbwlyYgdx> z_dR~Pp3Url3BPw6s5-RVQ@464NvT!7O5W3X zad~AZC_EH3mQUwUlUh@OizADn8jGY3nrP z)Zf!t^Ws!~x9_AWBHZJmDC6LU#Q3~0aMtxLVZiWd7|2N_8DH$x-m@ifKCEHx?*l1W zsqQeUQd%|UmD6@5jTb%<#?kDq8oXx^(^)k2I?^M%iG*|o>Ybh5DZ631!O8d_yBH@xUNWGv7kkAS6rvZ* z_>oomTR%!aH4yBsSUMQ^o-u-~f|k;BfjUx&UA9|zgI|E2;+=omNBZ{8lIm#2gDUwP z@zKPt5L3$Vfr)-OfQ_Da zW8jTwcQHA6OU6O~dpa-5NBsY`SwJ2|#2g2%<<=hH)a_0)tAY^P`#uq)A7n4vFtA+} zCM`bc!IiJjOFH(CvI}F8c!6(>rFUw%T6aGOgG*O>?-azaZhlcVDoB z4>%d)niBMY^dCFpn+n-pWfie=O^{Ds#R0Rn@LDO%kOudHx0>apVj~}NAEmSMr2&c* z`ai|?5QGM3CUUZguV;R!haJ`AN%u1rcWD)We2{#=btqPVC{ABHHV`2r#oqR4PZ zJ4?PyGEs{sTjP0;t_^*S*B#gN&RAm6Zc(TI85lpw+rT*_uf)>|v+_zSyRd~@$B)Yw z*%^sF!b9)Gl22v|*G#T{h}C@9wox#gav;t}q9@(=uMvL3f>VD4z!h&S)^bBb2;Z$o z&pUy2scPmGs!!>)5>lb|91R0$XTPN!7}*>8EcDkuKGF8SIEuk^7YBn4`gh#eECjHw zpFOjnk8!G3(H(xY&CT3X6EN>z*Zbdx@nC{Bwmk><_;<-#J0)AMLEa8;2;lCn;F>yk z*^uzhOIJE6x+87uz5mL(#I_W2YR8fdP~g=>1QkaU-n}sJy_da7((JKravJ!swarg{ zE6sU_^yJBZ#a!{!6Yy?0TGoUtyWHpVqn0IaUx#={%*Q%3tk~ryzN#7F5{`#+F5ms1 z2L=SDwc_;>P*Sxy9oPr)Hd^4Yw)7PB8>0|!m+`^z7RC*fzQe~}i(AF@5NIx={lgC;?+%aV|UgFN5>{8(SK3Kgm|mBZzE7CE`b$&bG8gd{PJQo>*a? z*5sM0hqll6sTzS@s`ElrkJSHig+}o{09-&fcgyPjA6;)97FF~9kE19UgtWARfOK~$ zsZt^x3)0=O^dd-?bc52}-QA5eEJ)|lyX5Y7ebgu3zt3;3{o`Ca=ggUzGjqr5jye9( zGzH6+2yr_(_s_Y{_+h)sC+%hBJ`ijX;fVk0#ectvErypKh1M&JP|ok+Av%#Q5P9yN zr#q%7C`x|+-4g8}7L)@;%k+_kqv<^-n|C*>Oy~-JsNOpN+nypE{&NfowfQ4vw`a}# z*h_nX&J=tHVU9v5-ueIYeb_G&@$Zb0RZdLqLhz2At6(oT-$Iq{T+)`)P15oHYLKZJ-as(s5y z^WLlE7~)EUI-NV-@c$0s*-sgyci}8xMynauA%wdA_T0@_2OXvP>wl*F2Qnbk^a*Dv zs<+7h?5U^rSf<$jM};kheqbRY|X$`4Ss@BPoJWUujt zx@e_Zh1iqTm`0|>ao$AWpq%;q&ugD*z4CQYqeUiW|L^1CLBXrT2HrhKC9)`EbpM&q zCs@KY-h}jjmKgFYOZ{gTN^zXQ{wrp;&-_;<^?g3@6cWAjJ@evfldC6 zrX;n+{E`2^68Pukkyf@VlDWdCl9r2Q{#i%s$>9Ddap zi@vDTvhS1ud#}lFD(o+g3PJlOh;w%fyg*e;CJmRB*aGqxEH3Uf z-Ir-m>sHnj>W0&a-dE(D^6-WmIfp2>8V$L+^XY^f*oaQbWj`IIRwR|l-OMPUbBjyI zadz*d35KP&9k}!U9H+=)>mIDp0PnHAzRx3R?0|B}aW^^WVk*s_CxV|FCIcgd-N#S1 zJDJ%yzw=20(5M4;A0kSBA%e3b2!QE(4u}?+Ub((XBwPMeu$;bq;8@;yc68X@Ac{mr8i{(RjBNI<&@x#u*O4Ca~0OWq@>w z+obYlGpF0|u)Mq&5XEqu2@bqipXS~>vEH@isna5b@|@)m2%WdBo`NomPsOey@eV3Q zPO{VLDUs6MH)rW{cOk)Tufwz}N$atH4#C}@?k zkEd}k_6rWWWOYYZQMn)6K%47sw+|wOuJ|3sc0a7j>J+|Zxwz8A3YfN$dlyvc*h6Aq zLfhS2z!^HRbq=k%FPS~0RpqFfpx#8Ke|Db;qCkvgAUFT(0Ed%N4aaz{Mmd+#*Kltb8g*n5COQ;uNZ zS(8l<^Q2_dX`0*a^pZ*`x%se$NZm*6_qC;^m&0GKIzQ;+p=Jgjr-?#@(%04bxQvPq zS6yNKlh>)u$BbL*`lIMX5Ryl0i&mAgEz!tRQVcRw3>uBqB2ZK&1NMt(Hu4n-x1Wf&U}RPR{i{YCU7KU0SVQv_!FFWAsj}-Woo4bFj9A2 z?FPqwhKxk?t~oanvFZr@WyT*Px7o8{fOUzS%x)PwwH{BDL<2?7*TqgRWRQyftHN+{ zK^`Dv@f#&FY!DSRD7y7i{kKwLs%2qofu1<$>Zh;+=Q(USM#r;~!Rqf}|4S}{NA5F1kC{1D#ufd^78Vn(ewyMrr$j+2|}I^x-*#@q4?_f`nE&4h6Je}8gY@B zjHI~!=E`IE%6p`g zV;-=4C<=t6WW3~E_NZ+C80g6N`&QcMnLVe*CFF^voF*e=#TZLIBooWapyO4^JQ^Yrd?$0$hHygx!GkMQ=h=e}S4QjPD<#bARYlf`I{b-t5a5*$ z_&fM=;HeARA8RL9$JpW4TaVwJ7`x~DJUHL76m{tkak8vyy-#uR?Ur@)PynvD640_1 z%L=gwG4%e^<2y^$Gkz2^jl!G)%fev1omamrv}J-HoX(o-L==9r(UWi3lT#a=pee^w zZ(FcGs%`axHK`eHMlIfqzEFUrEx_i}s0*}(7AEunwg^ud&D~|w<6yPTMT9zdb=#ul zCr9~NXvKaE8_WZLTr@77sx8F#QJRPAs-NU1uEn-~hxe3{5($<6_>nl_y1k*cx3947 z2yqzEyzOo@aR-#DHbAu3!KZNYbi@fA?ZoA8UlM1EkqRtQ`ZO#Eim?%G~)HpW2^8;=`}M?qBr;h|69FJz<<3qPl6~Yy6He%$@&Cp3#h>|XGj)$_R6b=G@iL@6LE-g zE4hWY9;P|~i_TK{tVia`SUj&@JuFp=nRS&>Ew%!emo7DO@l*v6{hB|dlZr~NQvf)k z#R1UKwq;g%ST}jI`z`z|vYRCpbnQoZtAZ0bE2)5Aq}g~>E2!h4=Ao}qApokO@+=?U z@~S@Y^BjL?xf&IlUI`y%UFKtzimamn#moLN6=>p|!$9%3jGejE}CSBVFN8%z7nF^TBjHh&dw)t@h_rAg;dr8o0saKB;0Zg;7R z(`}R6Z$p?j4>BS>eH1tqa(xT`)EyvTC29;T4#Z#*F-ooRf2sH753!a*66N&8cOu5X zgBPkCuhIUt-G0SU<0bB>&U^9UZ!?YeUij!5(&rEl&8fC98fRdgLxR2UH^FQ>?{X;l zc4Z>!p54;zFr|{0t8=-9PT^vBnilsq)Pm#X-$tZYe8RmzQLP%AAHWubx_!d3Tlme9 z?tr{(2y%A~m3o@oQbnJUE#K}x2AMZpg{!Yeg=r&ue?a zYkF#YML|Diq}^UPk6*2htNkzo*Lu1S@1)5zH&2Qj&ra%UQp_}6H)>fdupph0ZE`)J z?ug?#;Gj<{J&5Cc7re2-KXbIObeKlO4rtoOsUINnaUDswhV@;F##uheU#xH&nVPc4 zd^vNT@_S*oh4nkQ!75dT^YhEuGx6)byVE<}!gxwUDX(%a6;wuyl53WdXKQQ*v2_PM zXf34=ZErztoSXfJRxQI^H^a!NphGWswb}h;SL5*wvCN_ld{|KH_547WdSp0nt!2Kj zXK!@AMN6gC@T+QRuNxcmxZxlw_VT294p+Q`)tlXP8eBfJwB308b`=B%AW$jiJE5{@UY;jEPr_(DLnrv;S7TvoN^ zIE>wT8D`_LM!G!PaP1P|Nanf`>uUl=TmnJrlOC@6pKvVBQs9%xb`c^~eOzz`eLOq^ zNeQA^@`V=lhkZ~ zHr7|k9QDbnaxYN+*4gMF{2+PxViOxkxq}l4-b_pA;={oS3JN){I>Q=0-Zk^u=tU<} z03oiI5%8Rik%Q8d@cQ#{Y4X#z1lfnFd>r z4E+OcV3WEw2yLEnC(OD6K`k>ZxZO zt`tDQ{eUz+ZOe6DwYHd~O5-`Q%s%O`MhmbN44LcXwI&o_RKr-YEEs=;b;GJ ziWSPQ3B;dVU5h@fW1f3KGA&jN%%~QHqo5Zo27HFx+M?8ho>$t65ejrbsbiH2U z!HLA$N>2OUwj9R7a~^JHLum;ObkR&1E`N+Cr>8o8(@TlCc_Xc*HM;Q;%X#(#dcmPx zi}UJqfC!GD_FA7!Nea8(TS(iRg&O~P|3XUBg_hhZ!)4cEC#2?#yYrijqG+>^?x?j~ z-?XQhO-kT$&VVjequ;EW%q$=8PuqMC?lLJsXqvBF%&ae-1BDQsHt@GBZ*K<9b>+E( zOui=t(3EJm?)z=QVZ1DWsP%v}FD@MkF|~2K)wZ3w^LXT@iDS++n&*nVSV+4$DW=7V ztl`z&>$}lkA?=a!a&QWLC}U5_K23ST+~08Zyyjt{%sXshcT1=KR;H=gqu|`LWm5#^x|f`6(VA=h3(9 zY@>CXOwjScCv47iFTKTrev zn(uJ|-PZ4==iQG_K#(2zj;j0F--h*9q!|8h69}G_;YuccR&$kmM}r5Li7EaQjN4<( zMIxEi+KSi(w<0ajMr=<*@0>nLkfp^rc57B6kIHz78Y_$Wx1M1dIBv=%zpGqA8*RqW zs4YtSk7A;!_hrwO@GrLwWJl{F|K{Wrxy`IkdaTkk|VpRU9eo|34S%0O@F zA1&x?$l>s+zaZGv5+|$B+7p`ePA#kR_cWXZvDaMX@K_HIFOQ2{D4q1|&O6Z%7!kTl z`2K#}F@!f_Cz-$5R!#1FS~i;A-NH#u%6+#SlR40I)ODE9s1^ zH!iG{$trAd1k`l?P*UMCZi|YU1IwtAtKz*1ax7_Qz0_ptU;VK)V1MdzoY10SMc>a8 zlE7TEj-Tv$;XX>w-CEGVcWGMepdu+}HlM#FsE}WBpyizJ$9=BCAGR=%>iEP0|MIIirPMb{M4tZ1Kv>-cCTmZ?m=2uBeH&Ja=324;nVrl{7Asp6a zbrEf1Xf%-NY_rN-NuIMkzd1$arUF};*E;77h=L*EU7qX>zJ07I>jI~m!nt@ER~FD4 z9WCWA21M0fWm!v1=AC`N$L+nBCendi(ui-)X7xMv_he?YYPL%IG$VCuTHTKZ&Fr8?i4Mj7$hX9? zH233=qm;7l<`;CN6s5>v_FJUeW)-sn(#zaC=RqZMI;}=>!Q2+&C02(yH&L0;dop7m zHqS7->vI5zCg+15hM5#w>EEt%Go^WSA|O~eJUbFgJl9QUk>jkswRL&zxNZs39YQ3{ z_Tu+qH83?ZBVMa@*c>9R^*87ZA5IknZugFijI=E?AUQ`)%)Giv!_p28PIP8jR@=zX zFL!g4r#TndKMjy>`tq!^9|(7z{c7&3qW{aFpLqW7f`cP2`j1$Q`Trw1`?a(Fn~LBl zx}45BZOxV&L}LAx>&NIry6xrm)NbH#i-x$Q2yig@*nrCf4MXw1>ZlE59KHJVe_(pWakfr;eipXdxLZ# zjFu>79a@TPY!+I!Zt?L;$?h)cu(GgHt10M7-I!uIuHx02jwLioB&{uB-IK3PmNVN6 zKA9XYDVXS4j1o%LY%QNvYGCda=&4Wz0?ag0)w2ZoL8oW|DvRf_Mhm-SMuLy_sR}Gi z%r+F0C6j~`yJ=k8713fX3s{yR`-F>LHTc%@I^y+=k54Tv0IdXOuEE!}y}H8)$+uExQ%vM|R^hdp zJuD8GzO7<-LBII(N<-!^uPIuEYdG^>u;EOs6EfMY+ENd5PDKU5W=<+;;lyBYL-1qg zxfx>(ZFyM1aI&h)E+ZqXv3nBIzo;G+Po*zEx*WqoRWcAZHEbTYL|tPT@>a+=xtj@q zIW=_+kW<<8aiWc|P?M~mob*thwqz#WhGS^dG`qzcdAce#e79i%AA;jd6s`F0&&hAS z`e+_qDOgzcqSjr_u2^fh^1gK(i86H23+~t<~3E=o+iF>GCqk!CZS} zW)S{k*G)4(5_BTidb!>e0y{sKl*TTts8t}Z#pe33RQ>$hKv%{uMpp-U8qf4rn2COk zsEhlkIKaVQ*PS@946J?ov0bQ098C=43^bCObLhzcki2t5Fip8x3Ha)3*@E<-1qNa z606dfN$QLQ697%LUxGF*p`9A+Bfuzx>*SX8N$OnXjOy8tf73!(sj_EGbaaNc&cRs4 zW@T$(bR(r~#PPWRjI4Z%xmv@Dod|2+3>o`4gwNi)5DvdcB%g9B=0~I;1955C9BJ~_ zUsaE6F?d9P*e!Arwo1L&I_EcvZtPCyGaB7tEe69Hky-axIo=yf2m?k^H%4zSh)gOG z^q_p^k71p$=r%UtC%r3j3G@bgkR9@?MGaW>VYQ`;^I$wh!tMO#{Cc)cDMB|DCTbcX za-42~h=4^d&XuHFx#*uN6nGt*tO2k+IpnXv@ZDXIrxW7P>%6cT09?;WcWqh8pCE7a zf=QhLT+;&&+z&57m${`k*Sd!utCf`mkuAQXL_){K1M$wy+}GeH$)4KQsLCH3K*Y^S zgx5y%UZg@#P+ua^Da4(A?J+Tqozg7eKq zyc5W4r@J%VZJou!HqZ0WuZQqrEm0R=1X{|vpLf%(Re3n9g}>l>>XN>C>&)rPQ_!t^?xOx2$=y6+bnE%+kp zSPwomDh*AjC}~5-2aJkGh2Uq0;S~I~E2&u591HEm%O!cjwjUA5Bw%Lp1CAKh;(#9w zC!Ia`(oz$#w*hY@zV{TwCr!pdTeRJt@gF_)XN}7WJ-KGt8aX-ibO_iON8e&ZhA`ds zl&1HMkY0=GKpIvLxjhHYd=bLd{SP0?k31yfo6;bus6C``Le2eL$guu0w05DF(V@P> z4!f7{z7OgiiC}YQ@5RAMkKy%?iF9qYL$4OIjonMNhD!o?te1ZP(9gE@iZJ z;89t`>thM|fI$mN#e=EC-Tr*@F6;WMDdt7k&?{*8Z24b(>F+1Sk$;o=Er@}mhnwom zo{*(5V2@I`#-hrmb7OR@rYn8+Qg-fBR;&zjyeNi_i8LCSrcK=%_ZSHuNpR`;ok!r{ z7e@Porbl6)4+R8uKD9@9B}nBvFQR!AI9oRDJ)&;C5*E751sxd+6t>)I%ZYb_n{$o9 zkT;K75LNVebtaSY&i9uOX0^W+x943#6J4(hq|g>sbTx0Hf%8Uboz~=@*=qRE&(;r0 zyZRo9J9|xzgb}omT_a%UZ3)sjd#}oi)^yX~xR~3HgsQrI4{L5Rh-ShReZbcp`YoUD z1ng87{4IK(D5^Y@th!vBWY(iMFAjlgJ?w`)owtz$?@!NA z4J_kB`G%L;16(pNzmFAH(8~Xa`O;IseP#nLPpUYEgE1No9>OiIUMn5A@Zzd@?TO5n zP-ONY*>~?naW@D2K_wLGTphMtB7Tp7nPwFQ*aFQQ2?$AsBUwuZC~!P-BtI zWN@LTN*KP|rjY86SVMp>qlI--0oUKRKyEkiT*;x3mWx9~{rY(>d}r7pVl zFQ^|-Z0grjO(vl;cR16))bj_WMpTjFM2}2e$QEXPgL@Ry`h?< z=X?%<`10%tPaJ+$;7`c_09_5f!N*Y~9eiBN&-jka7%4+~0u)<1i>7UuW{q_B-lw83 zFq@e<+i|Nu8kpvZkDqeI9a>s4+mkstz$8UQhnv)srlhH0I{9{9J|>f{MI&HgDUy3r zndJe6qUBad6=S=ZJ}ay*aExEbfs~3QX=p0eIfLGsiq!AacmXK$BPH6;MVG@nuWM8X<7*j z$l>WS(Q2u-`=`XTo2I0ViC1mgbsF?(R)cv#K}5sSaLr8^G%2{9ptw@IB=;Ge zq?C{-P83*Ur1%?7@byRE?5|A1?K^L3=NdTWaAd#Rc#TJ<>FnIdePlUE7^&(w;dsP66;1x591o@XoZwPV)(IZAsv4Pkno^qzuxSHP@tzV`&nfG5U zg$qg74$>F$5b8L;&isO~2YU?cppUmW&hz?eRrqs?Mu8e=FPJkpQaOt=heNHWV9d)wdaCj%Kbw;wf4oBqxIoB)$=E`Zzt3 zqXJlksqeNJ3-WlZ=Rg|5Xzr9-*~Vk*-XA)rW{qTw_p9ohxjDlI%buGK!_P1Y?mJh1 zt1@hDWa{OdMg{2|&Q(PN2rgd(1*x@s6;Fm=KmgR%6pyT`8%Y*xZq}<}Ru;Y?kICTv zgx=un&{j<=Bh`%h(B0|5BA~NYWv?>X89f!J9hi(MKT2X&z{tG7*#B^M+}Od*l)A>r zv@_ZKZiw7|#kFmV;7gMe!;5*%k3cj_MO!OhSQsEI9%w111Zbm=nq_K-6hv{eT}*SE2OAqX9UW{- z@!tlGivfjkv2mJ0OEo^j-^!DtD-T8niZ=)qDw6=Fct}+zJKIG`brzBBG^Msb$}&`o z1%KbP^SvjSdErE&3!tQ(SYeG!t?)6?=j&u?eooUsesFH%QiXA*RbD7fnLQA21P!l# zXs{~?L?#gczg$R$X?K&i>b6Q6j^E0Rw%A@=M9r*G3`RuPbefDnSOJKdH}U2G?y{l% zjmPMX!bb2;h2?rzaWE*UQU-3};H+)C1n3nkme~yIYm)rnS)!0JZ1>niI%JMy30R{b zA(0Wy5f@6F#!n+$-{^d%+nboVR;21srJ-ECCt;iSyxi}`#TtPv<7J;Qy6Bv-CsJK{ z@L2$;lJdr}$MRGovLsSV*g6j>*(f>1GMe0?Rqk5y%vl-ZOc_UKE>YEd#Vi*%IY@pN zT%e)^iF3Mx*o9zezOw=!zUyOV8Q|cJAyhN*zp4Fb3ZM#?TOgxaEtyl-j3*W{b4Vc1 z6JWHMd|aa;X(*}T_a?s5-W~YbPTt_HL4srv;{bz%QyiL=yqd!i73!ehLj%6PvFgvu zhB#XB4kq0r%_ETgsJoSJTrL4wp9!%>(A?Xl@@G}!b@(p)dI6Qi-;LPk91el*gzNJx zM1xgajSby6_E-z{j~-2Hl4whoIFX)Mg~@fJFGt>1v^2d~T2!gDlM%Ph52VTc`muNz zP+5MHmxRm48s765c)yd zYiYx%LN~RKR({f`16m}EP?L?VXJ1CS_Kz15K!~e`H@NwRjNvU`oJIeabD`YRW(3<{K(6S!E$3 ztI@Xhp0znq2(zTH=9W;hP!(0BTYcK#M$a1?#i>XfH|6r6KyHgpWJ0kJP0DhK=4&v4 zG4i_a-ls?rotSU${FYy1+k-7i?kHs1nI~x%d z?Ug=FWGER}^**H<8d2sAtrV!%GsV^w;YH=?0yJiKRao!QN&|;S#)0-9Gs1yk4;oVZ zjL%;&3Ts-frOu--OuU=c>|LC-y_`-GGKYtnj>#JDsH)(cjs+au2}t7;&#h_n+FrDI zoBUqH;|3MKiC|4z3hmSin$nvtLt?<>X%=*Xre8F33OXKzi|AG_yWJ#xbrSD%zTR9f)LFTxk0ltkM z7dis67Hsb-ZTVNc2eVyeoa2^a>sjb%vWj17u|ujigW7mRk2IrW*>=g_%od8TKuAW2 zfz1v>oMIB%)s1qb1HAlM z=}+=_fW4;-J&;3#+kj4{uRDBo??gbtl)rZ$cdm$l#9F!8s-ii9)ihNEH`$(l z)>5_SiM`Llgv99P<3x_elT?&>7JZ9&_T%@%Hp^&`grUYV`Um9zgTK`E+{plsU!g~y zY+F3Nt7P)YU7vbw+A@2sZKU?|nNdHSP(=bw>dT0hd++fU$bRwWXmk~saqW`$F~Z>5 z;B9;e^ge51)ovl8dAHC6E_kTBygxL838|=N3EsQfWIi&9!}O|^36fpYFvqalWtAm{_^b1~>2rnd-DwPG%nQ>2mf>77+gwQryimXuJQX@{(* zM9L&H(c-Wt%NbV&25PKFxw_=Ry;nXV;xS{0HTd|t+KdkBHKc1})Qoc12}?L->Rc0j zEgmzK^2Bg&5zcR%=x4@rMO^e$fdWF4pBs?ZwHc5%)%MP--O0CbP*u*`lTlNboJUPd zdB4Zof=o;U>A9m9F2AyhnXG$I*92Bf<9O4*@F8wVT}iQu-+eER;PqRPX+2EOcLwW| z6jT`kgTOa>lAnfm<_~2bsVlzS-VBWdQ&aET$HkL7S~5M*(OSoK%6cfx=ZbC-dX7e_ znYp{D&E@Fm59=gQ;T2xXg7EskZbu3pt> zoZDnt;F!m$_vi>GJmOv||3Qr%#9UT9`vi*1|4CRVKNr+z96am5V>y6hrvd?HW5Ost zd#>2r9YsiTG#YkwCLzVQ@npYGOT^F*f&*4V+j{8a+v&i201K(cp8{JQKCMZQRi-~Q zH87#KAun9lGG@^F-E7Lk#shI=Hf6zDEHE#Y7w1Xh%olUih|Mu8lTV*py~1unb`U0q zx0#cChZ2XSW-3wfK{R9h^ZB}7Rs$Kj!b`g%et~)&b zIqbVNwn;5{RuV~s{AROt-MsS0LDU-Kq7Db0esi&^ifkCms$~OI zDx;@Iy(C36%aNxXZAA&E#B8E`W&ELUXLTIHtPKBUrozeX24+~pxhfw|?b_FkG6bnz z+Ps&j_f@9*yv~vnTrhwHMcT{N0F}3c2(6TezBFg2d z0?1%Rtu!rF7}nNdeTWU&6LEx zAxL>@(Rve|!nt>fh-f7mMNW0JSXbnzJD&8}V`q&*i##_PUTY-*cMyGHp9aaGSM2gy z*a!ZB%}K@36277y%*|jX7>}(OyOg**efTkN#Dnio7|m#MEVD6AXhW>oW`fB+T)!+`oZKF~oC1ueiR%5R31;t37Sp zd#dd}nqB$DVA9MhYIrv(8`Uw3D~&kELQSLGG4g6s{_7=OY~6)5!NNE%C@EO{npXJu?Fj0 z8ls@A3$gHN_G({UtU`&e+Y1!gKZZAZY)#qZi1@~u2_9pyQUvM~PIeaD?}yP<+4r-K zy*IFR7XT>6&Fm4Vv}gtu&|Ue9H{Qckqf)O9mpR7!@iFI%9x3da81T)$OL((qUEfhz zfQpKs9QTu$CmG73dKQy*bTs1Kdu~MuLvAVe%ud0>&@VWpS;5+Xz7Ges8T$mp9SKYn z*Lfz-+yK1S?+EDJNu}=v4VcCzd0)7{5#T74)Riev)ljK4Vwax^jg?gQ)dvPy>EYoW zlzdf0-S3lVWHe>!y7jex6!IuwAQlXKV2&ub|HknCR8d`M6zRjMo?xPHLMVo~_Ih{2 z`sHCRuFHY`18=FQuCUTANp-DyRVTHxlFR6}&)yZUn8&vE$)d0j2X$#!r$Nr2=9B1N zOcw3!+q>}*mD`wbWLUp1W`3z;MXKak{QNGnwuN_7S3zO5z~T!|@7Z+a8CM|73Z-Sv zVjg&; zPSlOoWf0<4>|2}NA~J%x$)Dl5+NU8hHMslpG^<BETza%htejMgbc~d%y@F-X~jR5k@9Qr2B zQ|%>b@5XNL@o(A)6u{jM%#XG@hGw^0%O*=x-N>C6Q?3tY_s?s=Q!2O8s(x!>JcTV+ z6t4s-;Prh15|grWn5|yT<-QsrJ=zK4erV#5Oh!sV8a zr`&->+B79rQ7_h1$a`8<2@=ht^-a;nMb{m&nN z)x^wtT%K%HPv7~wwOqwL+(jvF3ko z^J7ltvAXLK!~Ca14}7$vnbyIsu;o*W;%-g*PW11XXXx<)dk2<~4{5rXp62$=NeznP zrTyO!ALd@Flr?Dsw0MGTLb!O@EsDh+HuVzz)9DfbOYu~9OroyjC<|;%jYF;DM5s(P z-eLwi2Ficn@C<~^mqH=Z`njJmDQuufqgv;VQL5Tl`6VZI>B7@u{A9h!oZpl1Wuv{j ziRJL)3lfZ6+vV_|BkQYBFDzZ!%vHGA-|v`8e~fY|WOrbk3OZKLVy+<3ev758$20w; z<(s`qoY7vBPdH%eX)*51!R)vjq{6v)s!r;1n!@4o@c+9BFiDSFr#(y-&O2Zeoh4(f zigf9+%XVseNc*i9$H^Pp3Ju^0=(7umhTscX3@J9H>+yGsEpS7*_^9QNRk(CVu1*NT zKIyWnc)^?8YyFr=sb7gT9B_4!Y6X&)@`+T}-(T;upk#BZ{?HZ{KzVt!W<%Wi@#6gs z+nn;)C3fqWC%EZA*dF4vKDf6bY6-12oU!3r$|0x^NhcFpA--MC@3OG5;&f)yLir4ic6XgxqKU7X0<&ICemQ%< zjX&oSbVl*=3Oz=bfp*tPzyF!-X4qusGImqVLAcgVx9HPIchTcdy~2~UoTBzjlVCge zUIzw!QvMeT@CuJMt7*#LmGwv;fHC8V4XtIJ%HEQOltIu3K- zB&u@SX2%UiFfq3TvlJ^C3-7VEt}=cfV7UsJh`D9TEwd$8G%wgOwKZLjRm#=jKfXo! zkEW}H1>=6K=T=!FJx?r)!Tl0kkeLIi+q;_i?Db`iIta@D49pkAO?NMNy1jPyi*7IU zRoTaCp_#_JCu+vNiqig!Zdy&b48dPmwg=B^QamCGb>8XJ`GeJr~R5sub@? z&P)YL3QN3#nkG3b~i?$-7M(TmAv6*qAr(fsn_JO$|_#)#GNCVMu=fngH z?6ken#j5L~$J98PA&ST{K-6~hN_owknOQ7gUWy9s@U4i7oD5FxDrg|C1jZL{E={hX z?hz7c*9T7NSBuP;Uqpe12rrw9becj&7yk9F4dNWuI=JXKV>Jv)d-ftE z{JI=X&wRK$u>O#|*7IN(P{VV+U+&%l-CSNy70${-Ya8O6Zj5kRtS!{|vUr9W8?L+s zDOu&6z~)_ls8u3(delPX2vu0Lz52u&3O{HTB$dcf6sz4XEtJuU>}cy2UX}rMENV6q z*xQZ5CMcDJ;loL2)1tTq!1F5t#s<$G%h;Nflc?G*08ny2D&yhN7Ow%~rmK%tR|HyF zz9({Cuhm7*b(-R^{4F;kApIIJnOkgi7&Z6ukrI5!ce4c#akYRa>h)tZeV#FCfy4Dk z37d=`K04Y9a^FvMpj^17$87wr4Uk#)WvP4?=h^5JetViMiYS$aUzZ}Zo*kWF#<7AY zUJ<6|aR*7<=7Llnyj#$nIsgN~uF^l0l1gqDqb@I9!=#kF`cJ!r`LtMC5nibU&mL58 z99c5qrmoguoF)p_oBGH}zYMrf)qZb`p=W!0?>q4sM)aQYMvpKgl+iFC=*7WhpmT$S>u{Lp_{RXKe#g(-R_-w|$?$>ImTs zZO%J!aa7cGyxz)>y4%?Ai^UTVZj#`fG>Y`gAtnFj?rSX^>Ms1yx-Uk4BUC<}hIf^C z!=(_sz0G^~$Z}RVevqeJlfGbU0?Wld&*1N?Wvp%KG5yeL6SA#3%wZ5pH&c z+S)yDxruP^un?yC)qRTdK8(J5sK;W|v4hpHtNn(oUjF%6d64bo5W?j0+bBx(Ygiu5 z_O#MO;B51O&l|hr*PoCIg~&E*y<07x)MxZfNk|gLqtv%M2e)o0A3IA-{dt3zR8Vpf zRbPM7H+{D%yfat`DY4|x93T$Rd$&RbB`@?1 z2aO|bcCFw%;22|j5VZXU#$qhVp)!kEaF-PGezc#gRQ1BL&Arv^vPEqQ&vzdVqi%3! z_-tbh9kyxFf>3a>i&K176Owi5GW3uV|I*+s?7{=TuK60YO*CfxQc_sz&d4@AH*UhtLM!%9fm;_m$QQKf|iUJ$c>Go)=@{@!ojr>uqBM9XhD~ z?1cfL;_ibxI% zY*Gj|*pQ+ybx}~isKx0e7?&9FgEVXC>bEzXPCYdOHQdP|n3~P^r?ogDK9!!T=z08% zG(^5^{3PJ2@$+ay3iVAHo7C*dmr&DBW}KrBmN;M&_EmDdnPJe<6vrz@i`Qd98gtRo zi`~ruljBb#c;-=)_TR{%I%P-p##fCKl-LQnyi2FK0b#;(CVJ`x^G&f=}t& zA4*<|y&kT_s0g@T5FZh6Ztci!DiwSVc%s%{?y>hNHx1(9wCq_rvQ)@_njmIz_k*$y z{mVv?Y2~E_HKLmhq{zCvK5_brQ`3t9-YaF|?(Ux6Ih18k?=xWPtRAX~VwH}G;)wF( z!5x$`AvTn+>~Ua|&gVIFE7m+^s0GEX!dRZ|cYKFHPbZ86I(ux{)og=ja>RmC#nQn#O57)kM<^BfKgN#K8B9+2U!sidCOc$?m>Vkn$m9>`71~L@hjOg_j_J|oxRbTE z1ad6WRKq1ph4j=!S$A5U0QI@J$MJ^fFTV0_Pb$qNSUfX3M#V;xZUqc>3$+{80Pu|D zu$ss8Vgs|l_}b-0nbczZBl$R=3MJWb-MrJO7Q0Pw#Z0i~s!`f=ldl-6vqS4@PqZiE15DIB^AR{H)rz1GyeK0@KSy z6OU*fEQdnmt@#pfA8@CX^F80K&+vU$gvT?r{5nIQUYP@L1yQD6_caBJ>M%$v{eu#7 zM{Y-^TkjXNS0xGqCSn4F8eCsi?>(G5^~$>u(bbR>8rZ{MR;#=h{DZc^=vBIov)mo; zG3==0XXjCinz|}qO^bPF;&ct%pN%Vl{7!~R#GZ+=FpT2NrJ{5u`t^ zc{B)hCb*P%{vJ(QfddzVgKN}YQ^(#>a)!PqBQ0~##+X5cUZg=Raym~JM~+<1{5@~= zaiv2xuSk8f;}W&I*Xzp6`zV_8&jjMpC({C~VNeZhCE8^r9X!dA;k4vLIixo@ilM6< z?>cLmUpQ*FjP%6g_xk--U3L~@=HOv>UE#(j*-+uBXFRX7&``YHw0gtmKhWf&#x@5i zI({mRJ@TC;OdPZ#n3S--oaCt5pp(XxYHUs=EV|!7mYlcWFXQy+#*j^~YJvXCUdrcB zXNGxP_GY92{V+y)sXO)J+Z@>&@ilfUQs^FzqaR!KS}I1;l$I5BPU0H&bX2+SpnR9J zD#rY3J%6Y2aTA~4Ca&lidE$r?QEnx0#*2k`s;v1fW{gy7`moH_2h5m;*O}kG;~#lA zyY-KoWX4u1e>ca(!C z%wK**@m78P?bY~jo4pWQn_b)3&H6ao8vpM9W9%(};@H;j?ZFe=A-KB*4Nmaj?(XjH z5+JxkaCdk2;O_43?)FX2x%b?2?(cq8|0)V7=<3?NXLe8Twcho-bB3{A+RWSr^JcJj zUjE#iXNUN^{H%Ii+sHkX=|kXh7F4}qx$usC0rp^lNYNBkR8(pZ0Mxu}3IG6lgBQI1 zV!J%CK@?I(FUKSh63|zR4cWJK)4E8>1xp_x+6z}b79DWm<7<;RkOYX56auXhE*LMq z5X3^06euUSO0S7_IdsHijxjAcr30wo(BDdf#Jx~j=PbCh=HtB&$T7(V#0a(Da==Oy z=$cW);W%KUzcYlqsyC2@^6blNHPo(sGcZdy7> z2A=c%H1FzENfM49@;=N4DJiMfj^ZbpT7 za4>K{L!>Cf5G&is6gh@SDsFJ+FGJdKM2V_vm+>S<1j4|3AL4PDf({si>ShuU)=BJo z;*-XvRv)9Ps?4JJU{YN!qs=Jbiq1c|QB?Tf61Q*{qW2pPCqDX6j8kz!MX4(t!DVnw zd9_m*52{0`P8Sc7PbNPMwFkN8sck~18MSeo(B7hKT#rpmOfVCS z?&!%@7~c7&<^$#Ht|m<)aG7U353y(dOuSWiZjN0y8G}v95pxlze$U%RPMtT4 zG%IpVKHp?3A82)g4F+S?ubB#@=^cJrn?{X5n4H$rdF)o{tKW041EQ3b1gMI!43L%A z-2t7fS@=AZ)d2-MbQwVbyrY8}aB|k9E7h=;HG|P%y+vR?Lu#G5pkUpsDy7p`qU{O3 z9*lfps9%I!bEq{yxn$T=b(myi)Rqp$_FyiHN~4j=;lG^^TMs9<$7$XpXMY4dHeLU| zB4>0pCz8xng7}2Gb}!-eD5YbJQn_|{_qd8Ls!oX=u7Wx_6y9bJBA?RavO`E?lZrre{*Q34 zxcYY=K@^GJF(738{wH>9j2lK`N^YNHcXK&90sHq+!jfro)JURG3Ves=)g~YB$|O3C zjV{<^ry4T~%=S(j6jhbhOT`H-4tEU85?2{jS#kbBm1;;3#-T1Ww3a|(2kQOSSV$`x zIrv&Ka^6xSkKqrRo_rEPBh1#9bu8`h51~da%3A!>R)8#|<}Cchp%>l!Z2{*degqsa z@zcN&(Or7wDQ++#2$`(F(>C=BNUOX${B}CIZA=wOyl8o71J5={RbX zdGWo>ah-S7D|6h<&&=d?YLnbTGSh0gZr1tzZF(*!P=iw_CU2y-A zC?I~n_R=YS*8?YqLSptqNKqi}l#9CNp5p7%*Bw$vul2Hhc+XuO?2#_=ydb2e-ev{M zreEIvC$AE^2*tTka++$JrsG9hv^U?7T07-ZBkJ-$}J5| zIA(*GBS}(j+oh`}o0|>#0&-MV6fZTvQdmT4H(PBO58qJyWbI&0T{AbNKr7eF3%oNh z^~d+_jB_1`;&^Oko(N52X$-DV8}22&w&xc_oSLbWV~t7j)(VDRHCxM9%!gUy$4Q43Ur3oLl1sA(hfDU8Ih&Llj8yj4uDE%%bM5U%{H)+{ll$r{n};qB z2PS=6)usSu>~&?IGd0MH&@*2YR` zDH(JMnzeYtQEpWu$rx8f_3|i|s&c&Ck22C^rNaDLyUA*h%CzcAhc+d}@51->jOHa_ z{Lxe|ml*tSFVott8+oUZZ5r$H)`=po?vZdq+O59)<&^r-;Y0c9^lLoTAJ3~au`lq{ z|FSp?EM4!XI_O)k&(zGT5|}+M0)st!PF3d4#{Qpp*E}vLQPWn3e5MTVkR%vD&IST?ju^R{`B6 zBm;dLQ{vRzk4RLhyw@8T2pQVlJHIWI@`$!aUJv(*_Z2(C3JcU9wA*4StxC_b2E9dS zLrKO64&GMlJ(KKVD-i&wSNvv%t}LAJwJ~;(&alpo#tc~68qeQDJDJm*94Ha zD@?yqkt^5BJ*ysWnTVL*S9;fz;iE?;m$=Ws{(W zm`bGg++JhD0q3Sm8v`P!+Oe(5L&_#sdwSlN-p}dQ92Yaw_w_sDC}h9_4Udh544D|4vsTkPy8y}9z0LyZlCuH~lJ5lWZ zhahHJ36JyPAgus0Gbi2q3gPqV7hi^AcS` z5=pb72rWcUeD5(45CFSAIuT2uP2auPS#TxA6p5w$r2Y0y_N!;6nRz_Xh1d1Eox#Lx z1;XQq7<9E!!DO5hxSbmgvDvll`b6Y(X`z=M(_}i-TGqs#s8Gnrd^>;#Syys*KW?1H zUhFPkG;-nKrcPWyw@2XFWlMOrh58SCH%9mO$cR-?;}>n$yO$i=aYQAb%D!u3cqeL> zWv)$p+kL{J2`=#vw;#MW_d$CcONBIb_me&D$EFw_L+83_0`+;j4AO4?$N@T4bOz?~ z%*w=P{J>8&22?ii2u=q87v~9r^k-%R=rlarJS5`jY((?np6a5^<>4j~NLW;MA*Be5 z64a09gVA?Xta1QA_2B+sdHd~%e|MoJ_jM)lWhV@jx)iU?c$2k|<$hcOo@}C(+uGe} zzHy`)wo&Bj^=Uq_G>dDn2V$9RrJbf4gFfNjI0*hI|z7_0l`TI`dNXmOe%yV+V z`;(=FfD^@{N`l0h`{PV|b)94GaKbsz#p*u2aG%J2HH2eQ9n*pSat1wKb(x^egNXv==Vg%KuWEP5c(XJz%Cc{rzkJsD4fG~?+>;Q~XiR?Di@ zyOL8(-1Gieq8VnG(B@s3xe%=;*d$ood^^&BL+%vtO53A-#|oZa*DyE7#mSDP>*)Eq z3aKb1G-#=x&=J})b$SL}D#9rLi)vQovpHnwg694>F8hX;Y1Y@(yS##oO49*6YM#{% zHaUGkL<0d|WecR8Vu8A|yY+YX^&923eSF>)2In=->g(VPd<%9qoAJo;6HkW%tmM)z z;}LvCmF=d7OmF6X<|A4uaXM59K$%`x1P_y<>$+_F1R>_hgd{BFRsgW_$o%|v(eYcN zzMwJFMNzP8bDJiVMU2w}P!5fSEFtXA&s;ucm1u-i@a4mZ$8!?8;j!mR)|o~c+GMEQ zwDI%r_Q#**b!_;zC<8@DIHuw2{VbXIs z*H5i^6eHtH3R^>1npHKf%y_S0Hh=)Z-OFZX2?$rZ{+Ff=*E@Km-p;{nbGeonT7lk& z_xw-s8xb&j*O?RK{Ib{i3Y2P>r@&djqHz(#;N~%LUxfkba4kVh9*KdV0G%9S2kVKl zAlnM^F1X+^{1id524wiE)Vp`Dao{Wt@R)ib0(8!pbx~*LP%=A(1-c9fBX|pf6NP0B<9R6CorA|M8~Ge%O~bcwJZTbv(?H zBdoFY^$3|s*>?;;XZM*iFP9lmQpRUB`J+Nu0xY4tZ*IoygU%igRgayWW+odz00h_= zWKNxFq|@J2@;~*H* zhCZe7h+F+)8oE>xK5$CMEFu+1#l62V)*x(Q#l;Re9&yt$5;F~h`))l{FBWYhibPqk zA`0?L5^tcq9s5V&D+;^fN>DuS=~~JyDS9cVcDU?a0k%$Z)Jx>B%3P?8tzNhh@f%Xa zCdy=yly_lAXlUcZe74D*#du(hk8-abT<=tVR)faIYW??1 z?^V%ABrrNQD4dEUFk$R7lgVj{a9boS(mBV4WoYTi;iWM~K^%Rz*yt^||M9oFEe)9_ zP{ijt;)wi^KK5-T6fA*`jURp&ptZ(-4BYF5;(T5vXUgTrx_&VBA z(18f|Rl;rA2KXt^fU+|$bB>r`Os-c*QMk9cqXTD?n$O1GROXY&X4e`(+b<0lAF)Kt zD%F4+@j$-0vHqy{C;BP=kK&pAovV7R5ZvGplqQnJ2{$|r+x2-avxV!0$+?)_p=1nb z0N*roE>&$KMSA)KLME{v3 z=l~eaFToai#B{5XHivOWcOE|iQQC{de#D(40T!7f$nZcp`OfRb!=%OU9rv(&3G)uX zW+XuI6+Z9L!U6or4-66-Fjb}T)sUH!Mwqs8bLhF+fAQkO4Wy?V6}CMb39T2P@5E-| zfaKW7{pn=HV2SDTIy_eFsILBlZKQh`6Jq2*0bKsw-5pO*l5S7B;F!sjY(NoE7sdRK z=K;SMZ0;fSHv!h;4#)ILV=Jhgc-r-7%i-=US$o@T+41;fdVlQvL9&O(9ZZ9Z{1T7Z z)9!hzcVvzmVfE~^1}|Tty`VK8Qcmvf#m`6t22fl3)qxJdtXS7Q7Ktoz$eJM_0e`C-s+J?5y%%DD5Rt%VR~c z#qByBAY}eyNB~wf+1uI8uv&?b4`>Xmpa4F)HWTHeQKwIJEn%SSJ37(KpEgOU!JCDc z`0Mh1#MVn*TKGty*{*&oiWiN`@*V)T_!-%abVj$H&Vr3dHz2_@FiGF7&3oFF-;{0B z7X^EPNE*S22Q!S1jfxOfvA52k5PNL0pIEfzU@-C|s=Zcj0~1?9R|GcS4SD@KmH0sx zFflWk3+X72&C98rcwbB?L2fA~Bd=PbM0^SP33OUd^J1GSzh&GqG2hUYE(5urzr=bh zoGxM-^Q74;gL9~a28^4iLH^f3_ChZrm;(eqU{T@0WNh=Z^b?t3zDYAEA#l67E2^F+ z_(j*+M^%m(0)Xi3NMyDZMIWGaZMF7%v=~59cU}ZpWssy+LeITWSQtr~l#t7jET*p~bAalS7?$ag5@WpFD*V;PJiGysOI20ts13B4d9 z0|&|#4yUm`X06v9;xxE=Tt~Vp&iLaVJ28+rjQwpP*5k@TpGZ|cit6e@|{lu#f z5Rf%+2I};#RmE#kcH0KA*8r#|((i_Oq0zW#r}%=3l(7wt#HB2@bl((-hXDo#0` z3p?tVTN3spo<%SrVL8YsnMEr58*NNYTN;HvQP-`u#Q;}uQ?giQrQTd)!_O6D8H{oM ze%~!vmQJC|GnnrM*u!U!z^xt2qTMHNSJPPHpGip$v z`GYD0rz~)Hifem|n>q#|M<J8`}j40pxH@n08LC@{9R_0 z)i@&>6adsnfot6a4OR5fFm`WjoQ7|BMb%{SYp_H*x1EdIc#TNeCa`qH%p7=ZpR{UA$rxf$Ut_jzMPx->{jlR;H9$ZQF#+IYWng~1(9?Mzlc zskqLLfI_S!%K&yB^W+uM-Dzc-cL=THPV_I6V&F}xv~y=MF#ulHdR6Mx)Ct6gm#yF2 zAY4tX8x^4*lapuH(VuBYN6vx>LI%2=p$M`W)u~aLDkxtOkg4XOs=cK+EyS4lb4gFp z7mo8?MJP}Zga?gFYJs%CsjiF2w0dsvK)8@rIJK6AKyFvbv_zJQ8A5>>uBp1AZm->o z34kqCdD?4V|@X0#IqvJ`i#wnp~!)A*MJi zBj$BBG6GsVk{*!19BNHd20I)qnJ-WJ2|fBYkN`Z?H}jLJ8nQiF3nH&8^N34Cvm;O_ z!jPPA4paPSi$wQ1b*o5%pf_J`M0Dsl%3aSF+_WSPO{=~kLRbwGT~?>e#*lRUqQ3fs9_-qFYnpY;&L_q;Mo>>M<|Jm(Qew_vcTMaWZ>Gc4C8;FQ9$?_Wsd4W1?pG&S#r*|1^>%M(>`U0 z*{j^fM|J8>jTc}kd=UQOUy}WQYUSgkv|T&K*UqUU&|%MNP6|N1yn%&7pWHpMM2Cj= z`-r+E2zyx1?wOKPRasDBwRIYUX)t}Kk_vOWGKUW9*-bL~nT94Dj4SjzhR!dAi*O9D z4-JHhb?Tl+e?4l85FjJ6YbYcD&c#W^JL|e+Vk% zZ7w*S)!gr}FZ{aZ&To`%-z=p8o^H*FY31|sSkUqLqC|OoEZtp4x8WWNh^}vbUhp07 z1uVe_ECa?wYs)`xM6 zug6XdZ7Uy|1ybSJ9CARf^!MS4-=;`& zcZZAelcJiDr7KHj6~`^pto|y6XYh{@Rqmp>GVkMuAb3m!%F=Gd_kOI7h9D%Q~KfE#j{3_m+^nt|~&(<8j<8hgdg;oF8VcmDOEv~v(FT~lg z(#k$44$;?MJb*1FWn9*GJ!$u_-wFmi3m>=P2SBDNIkGx~h1Z&TK&jmI69^xpXZfcZg(6VOBQKJm59+;Tw5OOiOh~(+SSK#pN6_vf0h^@B6%-OwXyMD#Jt-0E7(onkSF5IJZBjKhGKnSxKZSah=KZjF9QPo+!8P$PTe-r1dispc*?3L)` z0S#od`Lcqxo_t@7Gixl*jduKW*S8eTS;u;$Dt5meX9qe@Kh;^fZp6?FNzC6bti?eRUgEA^9&)O3vF+&rMhVX z^CUA=%vs81*F5vD;>YgSy*p$mY;c^fN7*W}#0C_Bf;O$Sp8a%;x0C57n%8^j9o71z z<5aY;Ze-1yxr;Rc^$gegma`OUC4R*}vjc`iXLY94EVWL zGc8*J-m}XcF1v9p&7e}5O>MLz`lzQIB@CwVlY16!M+@a1JJNZxNsSp4tD8G#)9P;> zk*B!T=4?~lZsF%D9it(tE#Sf zTuTN>1H%mn^Kln_*w!l!(~`T1z~<)N(hUF}VC^|8C>(yPZkeJA*6*L2Y0mR%p=d^i zIX1wm^|*@d2V47N%3HGY+qzW z9gkumo3zsY#4NAIpBMj{+qLs7bysDL@4`{C_IOLE-2e2Z8fn)Lv?@i&Vd3Y6*%j5Z zx;v;!2i0PE-*1%hTuh(!>FJ-n;g3`#6`JofZcJRD8^eZHN@z!y5Mho3m#aQ3ov0UR zhrAcPq=w~>wTupp!z3owXD`u$gs&lCs+IUxFob?BLW-uQ7jCSMDzmtvxt%DXe4~%$ zxHgN2ZyOTfL)@W{l63F57))ecub(s1$l2ijr;c4+rE={%!Ra4z?97giH9^6=Z1EjlgQj%VpVIilzl3AdJ+sC}UPLG?yHO|WjIUt- zTVc(JpNYXtiuk+oyL44)L3MCvcNPr-CL|RdA1f)$f60ux$5g!f_Ew&KM6}7xKBZOl zk;R2Gr6o=~5@WtXS)R?zWcTy`iPtRJSg4vqM4=h-e4f}2jLy@nh48UhVabFBZ!Wy4 zq|QcYtikL5F=mvzh&tbx(xUzET7c8jbE>eQo{#X**8x(AEnt~M#ZQeOzeTL{J@!sa zLIvy5o*d5!ud(8OgK7UtJdOIR=diPuTKDdf$J-EsYy1LV@!z9O8XxBm-}4R4Ku@mQbIi5g`VEt z#?2t^P2V5Oel3vZDBe50#5Y*i994K*8{MEPM(fjsqm8ZFnY!Ju^m$-|#df$DPNylo z-U7~TE81cv)#30#y%iP$S7jm z@8urP10Lqv?~T?YA>Yo&nO|2pmpqP>)~snxo=T;-Js0lYUtBBlf<5c4JMKkyn%1@M zi^f-{0FO*i=Nvb7TZ#0o3<;~IljHx)odmw%r@!<+<9pnkoIbyoo5JvUZGG#1vkr(1 zeuV9LS5JE-3BERS1EM*C6@Rj3#;UudI6?U!HLr+HPOn@t<~Q-n~xi z=GW&|`y^m_kg+^Z>wZypc#zhVP0Lgho}GM5Z;>)TSg5Z(2jXXWKd~p6cHB;Vd^aEM z#vDReRytiKS)8%CaDR75eS0xhZvX&*RfxZXVMFV(8(Zn;dA#{FD76Y1a;uMST5r>Ooe*K&8Zf3>owV3 zQaOPMOw}xb5S1;|3%PzGWkWu`fN9S>{9ca1p7P~0dpKkeTfYG$9t76Mmm~5S4!j2YA1ytVMBU*V7Xd%<`pS~vM zI%#5e=MkV$*m=1vpt+5gK5J`Z9MMS5Z!v5?R?O$%N$*M2O)7?KI-Ia_JT_+jDn}{E zb~sl>C$FZf%ZrN2cg>G4)Y8xrfzM`37-56W2C4wvKtLm&tS@^H1hO=3XGKQUhM@_xS zUA(-sa*B3fq~E^GCII7z^QgWSuoP|2CNnxJODLwHCQGBmSvn7(J(31p6mJT~_7gmj z#&KDPV_FT0FkILUUr?IQ?xMW;jqUFa<^EHx#^)Q%fdyNfDwXttI?l3W;n8_s0K?w5 ziFPc-lHyQwp%X=C;1U}P^2e^$E62ik?2oc7O?kJ?!;TGTxWCHn2EmNP^}!=sA$%NY zFuz^O42({ zw@L!4`n^1#qm|)V!|@vT3-+D4!TIXAFAg?wJJ@aBOYHCKnL(IU{l|au7TXBWJtBVm ztZ;1g{@QUb={@WIc3A^szyD3KA)U-2g;=}(>{a$ei-jiatObZmN!+b{zjv22|PGN&mY{lgq@bUX7AOHjs%}nTYa5#AVK-;@&t#Pk**O808nX0E?Z>+pQirHm;|>-(jm|?$+c^`)5_3HbohurtG5$V#P7_g zrLy_p zrOCBqf?!fj%p&6Sxl{!&qX(uV<>z}DNHM=Yw zP!lwLhnu82QcF1qTNB{Z0QC55G*?Ce_;?@DBxS@fu*VY}Iu&kVkXGC_gZbC1U)Zv4 z*z*2*aK0&`z^q#PXTu35iE{b6z~Coo1k4*XviYK+aQBr@%2mlnb2g7%M>Fvx)c6?x zwkY(t9&|KSBd<`WTrV~fH_pflv^KmR%?S-G-jD1=7qfP*ZQ1%Xk@!yW*6E|5TWv$r zboHFi-*ne@+TB!wMW8pUq$N!F&4PqgsTDFs0CcyAO1JqTb=uqf--cW=`QVQq>CJQN z8mew2&=tx0yDZAPTXY$G4Lo!zst|>?hsdt|mf1Y5N4wI=ga2?v88G7TCdpci)p-}u zs!>{SP1y=#tf@AHNm8K5nK)v0Fx~B25?0_@=C#bf`{La~&tVZOtoja=!}B{U)+@H< z-+ap^8dx>@9mP9rK0D9JRscR;Zq>HuV!EN-IiO`)gOR| zaX2*R?X5Nra@6<~R3m*=>K+f*x~qGnp~MgX*FDIRZ~oV-NV>wb{P;8OE3^@fKLtr01y}m zM49X3VgY^j`=Aj&_P8-m#!d8AT_=POyvC)zQHoL#D;f;tl>dgdMsH3@u}Br7~}Cgdx=4$jKI)%oZiH#7QiQtEx3P`nU7V^R^&TXcwN&A1zXtQcp{?^IrTf_ zG|Ow1IRl3UmzVFeUo)LsOtdOUzyG&z#`R<9^o!&w9N;K|%nY4?x~@6D7!Vq{ba0yU zpm4~J#?{lQ0PjNxJNY-$%J2PX45{Wp!q2kSWks0k@LrHJ=tON%9|ZF$2o@;zX}r0R zKQg&9!qf1m1AR4srY|DjWfv~qb*{CXQAj>DA}BCMG%{#3K-ufcXr>T{iZ8Tg03b`j zf`^ODvm?v!-zpFdGU@dnw3uMmK*S0;PRnD1*02~DpQ1`L#87_t5ul1Q_sw_#2UGC< zxcmRawZQ*luD)&Ms9C^b*h3V!5Cgjb0yiuT_v|CE*mwj`*E%*g%XKd<$k2ct4NI!Q z8}OA;NGCo+0|LT95>^hPxV=>5umW;{k2$MST`pzOumCqrgJBZI&h&buIOrvvEB`ktILRcpBPp8bih`=89?O)#$hpm~(au zwu%<_u6!xHWXHC?1l&NTCz`2j_<{h!tHvP#92@=fG_vO}4j2#s4gf{os2p>vpN^?` zj5)4y@6V?dC$ir$mF?P80=Z9apEDvFynjo?T`4c&B7P`x4E}j0VA;4wX6N zJwQ!EmuhEfzrDNUUDH=}R-F{qBklzv2~bb{c6xrvlKT&yh!k!D0};bj z9~p^=7EB70yGssLTt{nc`Pnw92wDUa`ZU|F5`P{Zd=vKj8n~w_H5|(71(F}Zz1%!l z9y3hsZkl8zlSfNKMWaGj1QILT?{xRGE?W!fptx!71mn(QQ%&EZv>7Z~6ZiXB@eV9o zx?Jf8fR2cC@sr;xe=23%^ud5o-2tWbA}xB{%*%Sg+ao zMWvO$C_*HJzA=mcaBF{1$U|m{Tsf06*!VW(p_gSqjMjpl1)Cc~#2dOwXKz*5ie2F$ z7LtA2Z9!w+n>SRVn14>p?t7&`x+Pc~iiPUKzzg$rx|vGXOKz{U$zzQAG44b71GvUi z3+i~jxfO|-McD4}BW+$yFbkk!L2Ei!zbgj?GRT}(-iA^}IAh1Eu>(K>^x9-v{cC&c zuU}$C#^l}8Edd*xYMk}FP7dq!y;<4DO;*Q$&$Sp3(l7p(=ZOAm$5<*u>wsZ>ir^7g zjr^;)_}l6SMNTcA|71y2W&Ytc3eM)3v7&wag_YL(o}8EByX&Ikv+aYlelRMH2gj72 zW2MP@crP>!qECdGl+EG3>ZXS-WXggn1MK3NDYLP-ImJ+t@MVsODmhRiD~CcoS9H5R{TNUQbv~D`-c`1`xs=B>o*OKW-M0ijrf9aC%3UJr;ydzw#dYWYJdo zodSa~3SRbx3LvAF(&EnWrFi!%%no;bGz=yD4Y0WFw>_uIp)OPc@X1y{g#hq@yg)(l zJ;ef@Py#n6N7FC|hN5hn@-e+stPjExYN0hjpiSCS#Cae4uLt@1wr0AvYLAF12RVy> z*%l@diut1SCEwJ?RDc%^1FKBh=hyQbB-1Bsm}mYnAw<$7rvhRk{9wA93HqRL@#TvEJXtT9+%h=SRq&{+sC7}` zq5=bht_}M@PMK!5Sdn^9JWN8j?YAM39eFKhb`CUN8d;soW!-Ew+8tJW_7W|+ zIy6$_DQRl7#3@wS&4`>eGRHQG78>8rEZPw>hOolLfGf6GJFiywM?wvuemEWNoFu!t z=+S5cf+CCL(!@`xo*5Ym_5)U$H(gY6jreX~&>v>nV@tCiq86ZIuNcLwNLUqG1zQLJ zItD$z{_Tv?$3nv@gbwI+DapGi;!;;MiL&Wd9K>o89ba68B>culr8QNo!*&LuSh(-b zlr>(WSWcOpEzYsF@YPyl{2GvHp+#M#Y?)D7>{C!q`EWD7MmCph#)-H%yUA-{mv0nF z^$ZWZ$qCinRZjXiGKli<7wB1Ka9k-&icGz-hB;RC@9H}jah+<2X z{m|qbk)WxM-KeKF;s9~hKS>Jt($9Pr?X)eBkHj1 z)N2{=osEIJA7U8d!AV^vgYKRG~ZOmfvNRT2=CgQvJ)CkcQ;^ zy6DUz=5YKqKDDN#1z;J-tMcr0uVo>0w+89q-VatD z17JjIp4+s&8}Aoh3nIA?!45dk&p>}hr+R*Vi!yw&Sg`@Pyy+#299rNJ! z9*FfEwIT~f$G127%Y;ov$%_Gd5^saD{1Q_OrsSmXZyn55_A?QJFM7X2FshhNz`WQU z40>fTl3pJu>=&O#&Dg)SihKZ3S3Rr5$p<>n)bZok&%Yh$uq=mNjLJrAOby@h0{bXq zAVDqxNN)U-AXgUlKKuUjJCE;Wok;cJP0{9XxsLSh4BHPM@$?iB)dL#4o3T1i7EO>IguBT$NiN*xuZZ+#duv#eWmz404GyViUcC zWL*T1_@P;q8aEG zo5DBVgD$M)H0ZJMw94c*| zGsKy<=06B><$n_7=Ke{Lb9G&B3>*?CB62%U1`_1V`{$ z&+ajcI4Vr0Esp+9kShTan$#6;8-F2O^}{+E`I8`r z^)G@P4#97N+`MT59m zTm=D;AV*F8n;^&bCqXXx4}u)VhGRL9AQykvUizCL#{!1|T!0_g7T@;9bC%Cm3#ZpN zTA#OUcK46xD=tPv3*fqvICEFJlJXDFZYvv;6v?_8-E-E5 ziad3>%v%felvP$YV2TZG@hI**SPOL89Q6-%>n>Z%YD?At66Bu$MUb0({v0R5%7d}^ z{f`_K!_0&>X0kN#V2vym>uakR=p$uf!2$XDmm0g(#zN|aGyDW^BXbA~ix`R?@>h_x z-B?#Cw*cm46))BUsRe$p<|_pF@;_UppDZ;Fmz;+7H6%}tf<@47K85Y(P zP~j$_7G_sP8G_#N#&4_T$?xu`Y8OFUFFU_@s}j9{?MS=MTCR9AG#Yo)rBBOVGfJWR zu`QK6Pq}>@47Tl~C4c0nC1XF&()MYN2s7^q#~9}@TRgeOgSWHsUdG;!z`X~pLT681 zUK|{XXFR+Jw4^8pW2cY|S0HRnPd2p=N2 zK~}ENnl&lr&bomNgJwH@KaOm(z4oM@Lr{f#2>W{+>VuyJ?cCF+Wy|GyOZ6(d|1up# zQ5KzEpKind2N|vt$^z-HXx1OUu)-{_0qKYT>)?euo(j)P`sZ-NpzkMEma z>Oj{~ArgjZ&5l^%+6W8YGKot4x<~XQer?=CwqT{L%vdNG88$UnDR?Xk_M;o6+mJ+* z3xzE${}eVX|Ga#W_Xp;#+HmuC@Q37g@CVzz?GKl8AYp$gaHSlALWARa^?LCZh=U6N z)x8?xsEq~5i7vq&4so=~)}Ln&)lX)YaK!NDbO_iQlwqy{nJWH>+-bQr~nGv879UZ^@@Eg8&{D z2iBpCmmM))=n?xxs&fiZkFua_X>9}jsLZ9DxUTU;|dAkHZE^s%1ao*jp zKX_3+)|^XgJ=aJ(y%K)IP50c=pEP1R4*U)T!_5J~aO{79;c%aYAMjCPt;61rSf?}0 zh0eB&MIv0t-jt~>L)TZWR(dU<5Z$EDwwqhT=}gUW=^y`1h713b495#3!vWhA0LgHc ziG6Q`7op3%wg_!EV8nLk{X92tFf^nE9*}eL?NV&SOBk9B5`y`&a2id!SKGs(71`q0af<^_!)%@b zW@p(QB$A8ccF-{MNO}3m5s)JN-d8mluR9~W-@VtL@fA_B!BtEpXYYjd$!H3|bF;Au zi=;h{60|?O64ZzwpW5@a+9I1veLyYq?=a`{0$T>_@|(RB^G4ZWGxne4`xhK87zl@> z{tFIwo)JnK8ylPW{xA;P5XwrGdC<&JWwLW(K>V~WsQ4!7EbQWBYRP&fpt}FkGEXH- zJ;G(=l=`;2h~IjJ@^C-ry$l+V)FqAU&VAW)?rF`~(4XD$(7JsQyE(nxVeu?(K3^7~ z&3pc3%iHxO$aogN;cYoZ$Q}km8|%x7C*jr5xb@oum^1LfeSg7Q>npdUC*2KNTUF53U-NCxV%bqB?Kob$iI(1`r*4TO!m>Z1nIe;* z#$K++$32eSy&oRFPOd~Oc17ZQZtlKcnZ4{6Xt!PzK=GFACZ?2N2YXByqW=o9PB~cM zZGH8Bf9m#rzx0Ly*Xo$PN@`Ib$VSO@lgapZEkGSvbfNd{IP}-Ur+K)R`#&5RTFH5D z=(nTU60Gq2tLXjUj$alXhh!cD=wIhB(!+(a?Qs%yc{~a5B`Xg<>{jxT`~;}=V&Z~v zbNHw0UU3({#Pz4y2AnXY<>G>PniqSdW`sZVvj<8G?z_Ecvyip3L>sH!q8Ic~P~1`` z>T4J?O<9B--@>2LN(BIG5<-+ixr1P1f&2!p=0f`B>x!ak(C5L}h}s~nF@{y<3}CZ# zRt~9qs8zS0S8m8Fg_LPS*udb4*9ueDrBd7EX9Mt!#Y%UYp7UA2_)pw?h2m@p4pyxB zq+2ckKzd9u8eg0(^?=lHND1>{=0>r(Mg4ai3fw}&U=ZE-{JV$|9~tHk4D}+9_Lg{97jb~>=q&d5(wwQb1Z&f?g{gOh_ zyrxr{U%7ROh4vz~7I|BzM0N!Y1!E|GpJ#RQG{l%s`|+ztvoj@iOwpE?3>vVW;U|p| zWAQ5x0*8nnu)BA4ARptFxD(mf=UYOSBEZJY5B`I~(i2H6#aDqM_bL)=(aip#B?q!i zsVa7W95x!kjcyM?g5RB*H!`gG2AX6$XDc&gUeZfa-HXANUi%+ zN_lHGiWXETA_~2%{K~jlpuvx!2ujXHL4#;)T~{i2i3VSi0;DJ%q9jly!iHk$sJ6$3 z%xQazEX6BPeBE<)GTP%sy7@%FvL&_jx_YUYs?f*LMYP*s+f|(xaVAL06kD{FA(&c$ zz2gw&A}wA@3?#Lf+do?XlN2xsuWDk%-+a`PI(tR<4Z&!W_(%!K-(CRk{~u*%6%|LK zM(YLwBxr(5kU&CkcMmSX-QC^YLh#@gJh;2NySux)yWJvlX3or8!5ey@Jht2y5h4&7{HwH%vU&gI+MV{$KYEd#r;(A>C#vwXgQ2*pMT$8}Q} z=Phx}d@g&?(qHN~2Z8|^?<+`1!iIDjh~w1ZfXYe@UiYeXIPq76+oWD=4#oB;Und4h zT}4t@3QGlZ7#o&DY0!IA98IV9cBLVkRy?GpHnv%aQz5zch$B_8QxYm`&OHS2RFgMV zA(@Sz8s(1R-#@nlGtFFHm7(Yv zZ;gXy4n;hhSy18%knQCYU{Sy)cf2WgKh9l6zFjO^C)fyWeUALN?h#6Pagi zjz-$I(eTCw*waViJyGN&Vfvc5%<>NpFZGh}G=M2J{FEjbK-k`1Yc{g8cW^*t9{9={ zLzVX;hUY+zAk2Wcp+Q~%jiny~0)(Y=%Z!ac;|AtX?}a0OcyrrN{q{X$&qHe%BinmM z-=NxWHVurZA{)U(IC6;zO;h`&Kb#jVezLbp>`8Nxkbv;nNx0bBs+8Hy38ODu(m24} z1TuAqLCIR0l!#c~YD^n8f=bB>jB$Mlt_U!oio2_ajtHz;*&ErVRhr>D zeK2nJSfn8E)XWDIYJg8C@L7pzo=MX{e>{0t<4b{d|E5V91kwoJ#crhsBfPMhAX>O!AmkWS`*4~(n%w{Wnr~>~wSy5{mi1WKs+RWJnAJ)a-&28M%?4$g zK)FB^m&g;C$SPqudgn#At zazGF9W;d-cK8`j|5leR+v;MZZ);lf4!c+evB6N&qPL@LFHY%m(mFC{7Solzw!m9{p zIqT#U04={&LIZr$ar9yu-fG#ct5|_!bCSoB=4$Zp?AQxK3PZxsg`KIKMX%Y}Z+mn_ z!%WGrOh2CkG}=I3TRQoLroGsJ5&o`B3-~vPFt8hE4BBMMCbM}j2e>~laT>@km^i4Z z<;35ZxQ3(11w~qi&kQjym^h_L;ZIX<685M*NL8Zb#l1PPAV;Gl&i3wcS)0n9kQF`2 zJ^8*wlLr>IA9)rR7Q!C`m%;pn=SmGxF64Z2vLn4g{1n1Y^cRYGnhMm#F6uN22sjbM zM3koLExcra44iVdal5svf=1%F%#X5|dJ9~EOF4z~AHBcH-GFJ!-Z3-cD!x-Kky9+K zHtG2c<3-DS7ZI)*mg*EUeil(qRwqt_7M!T&PA5|+ayp9|SRwuxXgEKa1!bp#-dv){ zX?Z+!<`%lpS)lI#4YIZUVp|Hzf(TvS#wo$g{@R56n-Zt?Q$y(H-T-9XkcAGTFw|Z| zn5iN-h7C~SfaBBth|$up%W%@LKrZiR`IN++^b5mK)LUwYStxT-Q1+eDlRL zXyC9a&xoM`w<#&u?o2D3qe*Q! zv2kOUh~FZ_sOt&2+)N(%+Bbj8d`MYJj8b|#-BW5-&#K}%R?)(fDsB>S@ow{%DcVU0 z#kT^c+?D%rsK<=ojP2MRJ#~mn^`SWz5G`Yso zyoH()>61MF-(3hiFN%k#{ma7$u&5d0_*5`YnnS*5$ z8qtlfpq@+uFU9m!cVJs^L@}O z>az!0-1l(bO8W2Y@&o9QFNnA&VbQ-3ajkP*fwW+tJ;-W-QiVa)JF{H?5!d(|5y$pl zL>&JMA};YyMBL?XMBJA$k~fL+F0;zX(64&vTa2YAl{_*zGO{dtI)~vUzH*OlO}5<_ zi<4H0m*&DWbYIMIi$fzB?dMN#1Fq2MDixRaXED(2SNs=HR7@`JwG?eHNxgWuj~pAA ze=1UQVOkazcij{dj~-;(<2>?YPbsc3>*82-EyZ`*bm#l$!uSJ>ucSTFIwCZ-^I^g!@a z1&xxY+uImqcf3;V0=?aA#$iusa)-E~I#@KM#s)duJ$URZx@}|Wa0x}k-#S+9`6=QP zRTKF@3P-}CUQ<0kE&8wW>3Fw`xN$aw#yN0F$rnN#nl94x5VUMJ+Bq~GOjO|0YY4&+BGdei1X3*%>eKbx$9}SpJ`6*a)3gs52UucG*3swFD4#(M!@B)WZ{tq0Emli!v?kHz&Y<(vjTa)E0&`2Xsn!grk*yQ$a+7@O?Pe3nmIir>3J_Su)QeoZ{D^X7 zOBIDgRjuADmpRlWM3T$oa~~v-d-xKFg%*PN-qqKF&04$qX2?kNxb$8E&j}xD#EyIc zlKMGt+-?iz#C2M;hM&FKZ2ROhOfa`ljIWRB9SGT$d#1G@G4HLV}}O5FQvvVX(cxC5LO)=UO6Q_>$(UC`-XNJ5)~V z5=xta$EPlhj<9I6I+O0LR`U8kr{9g)Q44jJAuZajrzX z6-kfvh#nm|Qm8q#^tTV)-uvqCu`XM30wgZ*sm2XVbNDDv5s{HYYx)|l|b?Kw1#N&Uh2cCv9gZ`G^4hAyU z*(CcP-Uj1FbyG2DaZ>kY_7|HLEc{L%7jq2*aMCLkT>KMjFtyh}3C+YoT2Dtf9!1r? zZ)H`w%Dm(O`EsC1kMVaURh>Pyazrf zSFg-`woD*fMdq=(t~z?wP|p}GcMX^s3sc%4)dsXC79{Uw?5IW~WxvS*HHvd8+^fnY z=Eh>ZOyi1b5-t+_^6R&EW{K>my0A$#iwRVNF(E>3OxSnNeaN^}=9B44BMW`=i( z3eaj%AXz7w7(*7QR27WwPRTwO9+~Me%47Nv$la_$@BpTSv(^1lu>3j{WJ>9OYi}519Eu-BuaX>?fY+9>*fbC3PR#}2ud5a~ zehs#mVPRP$xNEkMDzc4KlR;<%{4?A1x=g8s?Vq8yZ}IpU3){ z9cy$y14My;gsyaDO5p$FI{YmRm*9PP190$^3}}NEVDuO7KG?xNu4a*y+v}bKL^vv+ z;Hk7;dM23W;mu;y8+~D9tDyKgG2Y^EOI;47ads*FbKOz2!_O*378d;GISVtSUFNj| z7Ul_y>`6BHCCU8@#%bRDr&HkTKKMP*o@Sm701}SO(u_>?1qrA6)9HY&n~GBB>>)I5 zd%A0>tvH1hs9BPp9SjC94K=Om#jX_Qj~8#09BQ`Bxc1_e;6vBrB9$nNv`hFL6eS{| z+i7gvu&Y-yd3Plb5aB9CJ+rd9`>fMx^Qq4}^p)f`XRPBivZs4L%4wbXA7)~i*`M1E z6@H88W*k2Z!)&-dMxRjHnNjLFa^0Gw zI#zYu-O*VO0woEGC%Wc9fVMPTSBedilby{aBCFg6zX#tKP;ip*;sPzq;k)c3tU>Kr z*&ACFO-~&LL{3eIJpqx3b|{ocM8nN%?9O-9ExgmEC#P<2UCuC+guLh@sM`E|3iFS_fua&V|L`T*lJ6_IQ$C{PW3kt&XFN~ z?ig<_p9Jy8J%rV-56kA)2pIGP+x}nGKfWJ}NqU;w4oUK6H2zTkEQpL~;x2|U>6!cx z)h}}q8qaXnT!fFezz{}<9e{EV^+JT>iK`R}ttGf?87UijA;J~$y%6E9n2g^P(=?(VBStSYU!6Ku)PlY&m+bT0|FZ_R!QHX5s_ zBb;Y25lg*}+2g5VgF`2-YEc(hZt|cXD1y7*ol73g~WY+ACSI`eZcH;(F!Xe zV21t&5YA=tN_HMN^`?N$#&gX(^g%XLC&1$~c~4&~3ahWH^VA-RP9^2N%yus6mqSOq z&6w-mBf1etKJl#CYJztMwUSH%o z#0ofJt{Qw3}^>Dm*dWz@Tu!! z!G(jzGxhv2Q9~TYEuvIJ@;>_2#jlDbYhiP2UW<(gln;iT{MdkZR_5O~PZ2$Q|K=c5 z-`nlT6@PD>*SN#YJbTaMv6MQplXX9$U+A*3sNOhCzD_PDZN`vSw8e~keYsiN7$F^hfpp_pE;_k3p<2yJ z+q-L%Sv0$80|`!p@vuW#SG`t?Ok98bo+*QS0Wwc~Cft7unCOk`F3I<;n%zFD(9H}s zC%b>@v$9&N+iQeR0qo;>)yx#PlGI=ofi$d81G1Mj>DKAI)HA6GxNkaw3S~K$7mv&L zBPhFThV)lt#uavt%yCMD=T?sqSpFvC&oLChyti;-1<@bAOmVIi7%NnJwpeTk&dt<7Yxg!k5uPjKRTx$ovL9~p&*kfq%lywdklkx^ z*$9i2s1u_mO=k4Kn?By+ zSWdmqDYvI?R1dl(W@|m(%e6JfR-nB7o$+pm=ZhXfK%d<+^se*j5rfS1HTeU3ax75Ab8_^ABGE>DItqr4aQn)+jrn{Q#((2 z*kgjiOWs(8smyWW(}k2;w~IeSD2s~f8)*nafm8#jc@HNq0O#f2%Pba;eMeAtKuT$2 zt*ry0#zvR*jY*%uB)DV-lP%wC>t4fJ$~#$pdv+ipOlMQ#(V-wBI=YabM#=X_EV$3> zYxWty{g1%A{%Y9}{jM(Qvw(5$+ZP=PQ=z9wtQ)xOEl-Y(lg8D7Sp709PlAL%La-Ty zP2yS83o$!+4e_qBnJ_!>zU0ip1#!zYpuldp7DJC~7CiQKuSfkiy~bDQpQj8N*YLJ6 zINv4hpZfV;MO>Y|e48DxIP9$21R)yf{~Zm7{RbLOo7{4BBo6O=uUlaS0q^2aZe^Wc z!LAUI_6gkR`Vcqi4js2~M^c3!l(7QS#@drr(r4Xia1a-u7iWPthT|W@!S9n!lHed$ zi$Egta1<5)wj#TkCp{Gf<&{d1e_2`S6fF?~G-o z)qP`jiLKVkH!YHHE?0XTBaASOkCqa)vMTU1B;@ee%AN6w)j_NTLjjf1j0(VhS3nCY z=BR1A0?Gx;2!5-LT(Yp4=A(wh43O+8_4LhQC)dNE4M#C55GcoDxl+Z56C20RYo){s z90_qxLTa>xm@ zdaV!r+mvBMRDV}p7mw}j4(kn8H++KXJ?5-?mcSJ#g&LsEu8&mf;%1#zPVXBarq-i+ z9okHcKYZGx)Muz?1N};eO$&mQl=(}po)eTP7zTHCQaIuHJ9X;Ofq2TP7h3ncr)sKv~{S|&MFSiIjSMw@9+e<%N? zJBacrLEeDng$#$yqW2dWt{XcwwF4PfuE%V90XxrYB@9g8d&mkP!^vGPC<-VOefkcA zp#lIIQ`F1IF72y-%((>rNruz=8yQaY6v^ukGF+@sl$If&gujsCuA`C{0Ww_EpJcer z7c$(NMjcs*2fcrg;l6#z1p>5xbObzm5{vkB!j6>hn`(~TsTvN4-uXpWP~;X&n4;c$ zXHdOM1N|~nNWhvfQ+sIB=3UD7p-!qG`69Mipd8$Rt^4|`s9mc>?$0H#YPI;7=vr5i z_ppUc99*UJ;yNY!zOF~a#bt2U0Sn^q2t~DHdEiQz-h%gJwq>6oggQ+zk5vLJk?+V?P(Sq;0RF6%!sj$1gj?$!`s3}!?IDBoiBx0??Oxi-DMjZJDJeWr+W+`y zn4MkHe`)4l#xTDI)>8OhL}+gH>W}?=DA-*0_B3Gr3tB=I$S4?CD`Rq+) z20AGBNoCt|*!m`=3^}OGk*vJvg1h<7*~=d>&k;~+9SuK<>1~}gHrWe7Fwq>O-wXJ{ z#lP>Ftg(DhgEG&}%n<1|(!6UxD2$=on@Q#@N0a8KWZW|_iXK{{kE7MeWeH?pWw#}c z(5^g4ieY>9N4+8_RUo1sVEBuxN(vz;x`mXm$96KP7;+kXZ+{bM@ z<;o$(V0t`pw<%Jq<(EzbkIk$3fd1*2YAuzB(SoEy;~3oV4P{a3^0yWZd@rO^a)I z_1ip%I3pvXP)`I@W-^7nGjS-WoT*8h|$F!IcFU2So~~fu2fy0e1ayt zVwFLTrPq8G5p|6YC-aTo=gU>NY_SY4w75ED>uvPbg)+-`thlng;{Tw<@j|(n_tDX} z`k2CjByHJB-Xp?UzNapGb^>s5KGOg$?s!J{(f$K$i-zI`Lzw`@XDyM>DuFmDXLYTZ z*3=}7;o!v33O{ArMG#vG+&I4=L+FZ?OiCDEQq8|v_P~(+!Bx(6B<<+TaiqX^OE><#@&+Jk|+9A-HSlSDELNq z5RK=?!-@7d^S=F-$vyIeo+6+&P|g1zR?PllSDtUpt5^)tQ(2-gZ5a}M_D#E<#!~KY z6>@zr#WVBVgoWP3#wg@yFkz2}Ytm1|3a?J*hty53Mh(P|nJ6l@4F*5cnU7nk>YODG3bxx$HjLc>5ug;dyh!n$cT~r(=%-_ zU#{p#-h0nF5TX$c>jY<@(avz0~nYkK#PSKKnGuid@(Ct^Ff&Jmuq;RkdZgiaSk zh!Kv%*mWBg2(hUevt8Sco2S6;n2;bTx#UtvO=n8zcG7gg?98tU5aIoYSDd7R7isbV zL;d=N`{FfQB1}kwUlJLS`zl6hmxk{5Ie()i=jJMj!Z!NqUnl z%*uaKtjt$+=8G^ftJbe*2F7 z05d6qasOXB=PSh@T$@tPgb_ni3K#bcaE1GJ%4gw1fmB;YIJ~y1%Ms)R`;=jus zj-Q-+D3i!0NrJ>@GHH0&ZfFDq1BTGVxpeb+{-3VTZu;&wZ-DlSPq5c90uG1st1cTS zO8e`XOh@k!q&zOTr)%BNo2?7@0ZQD~|5xI{P>ty=TH6WWq3@LS&$9v^hU9=o=5_tg zv(~V|F6amMx}OH`)oYICq9|b|k@y7G)HTpiHb9Wi8t% zKx%wue|j8{lMCzjGSV~JN~vh7yZi3BD*akh>xSX}6fJk7Dyd(6N_3}(v#Wgf{l=_q ze2Sh8Yh#<5rIkS#IdU-iJwoR|GmSm@tF?-NX9 z7x@&Xb%#LO5zAD?5x}&|$Db40F*!oZAZAKoq&4HSvmI7C{%LgM5?Uel-t8uVuGrlv z)Y(Z`jz%zSJ0yA248ZkZsjd#ij75DaFy0Mcq%+Z#wRUEX1-{e1Ongv5cwBT7geow| zi5VKk^%%;k8CCv*NUoHZ?}z~quUHl?b|E;p9@0ZCDzKP{kDC-9MYb>Zr-M2Px~&`* z?WUpfz865m@tf-*&(Fs^E>jkIZazPMJ}a0R5EDmEFZqi8XnwF#ibK^jF@J)LJ}4P2 zaR<@RpZ$tuC=GR^ zt8YAa?fZWc5misNOs!m=}mMV zMi*48uj+Ze3=e^n5W{|7RA~J;mC^RRB3m6D9xt0s&KmQ7goFG?DiGl9IM~Q90!rK5+d13P1f=UkE8npgRU1OQC9~Z;^zqGV~<{6d2{-*N3=CGIQ|NR7!C5J%90rXFu{}qWBptApStr%f% zrR+4YCDruK4A`)oIe&R0+Jv>2ynASO?fprWUimZ7`m=MSwbf;C zvx-%L77Fb#LH*FQ#Dl?Y@mG_GU$+<&(ST<{0Iv;V1mya*M z%dmhz=A;L^Z11&1y=Q5Pr}5sOx`-gX@ekiM967$ee^k%jwERf%K1LMs{@O*RK{v4A zeE=tI+pTQJJ`N`4={;d?;#lscQaeNO$$mD%9aVvCfDvD*m~}lGVlkjGX(#b#vN0tkDuNF+fMnmS8DCq9y_J>TLta38Vrl zl#~hds|vN-7RD(4TT`ZyT+~T6`_fde&Z%&A-Bw1BNBL(%`(*ZY`+fN|a#{IcinHJn z)JaqdD+%Ds;8RK;xvdyz7Pi%o#8f~%oodM>xS%neqHvU9iP}5WR-umv*FU~)f@*Rt zcZu@37z_Xy9`sZ`#r&kGu^00R5+`l>CHZkZ!R?YS0T={4gmGBn}xJl~?-Dc>VKyblwCUgl^ArCOVq#@=-|w2^u)D?d;h-!g5zuEUiH|XBgu4 zk)4x~u;E!8_zoK^K+2h?U7dtqes|>BV6FwuyO;; zet<5bpn|cqG-jz!0dg$7z0ob4KIQBh=PKmKkDEr^W@?)lyDK@FJBWGXuAnBU)?tCR z{&&QFUn92l4aUo)YiqjM;o=VR++a{ z^lRr4{#%rZRhQjmpc2cw)+fcPWGr~{SJoQ)Qalaf0ce9`IsrcAf@aIfvGD>mdXRJA z^igAN;R;wZC023P9;}TCYaXr7 zg628g_27rNbYIH*NlFC%%SRY*f=nf=SyvbL($!ksj0OYjcebdtRD|+LgtWCozA5y1 zU>w+oQfIBGJxZ;t>Js`CG4(3>_zYE;^N2h(-&*30gRpuY$TV1;9|q&rHJI-iU=N1?7en~E*j#S-i@FLD}jr3Fe*?a(UywfP^Y5`MqLP6 zPE((%&Y2E5#CEdI*v;8dUsh}xn7~u=)^1V+ka+R(U@c?8U-K5X-c!RI|J(w6uY$?l za+nknIY5pOJrCOCr6-Icq>D9&j#>8t3xN=Lh{tljlKpv_XuWR@I0f;zm1JD4@gcrJ z2+c)jaiTvm9Z8XeN^?=@lFwM->@0Gm87|+!O#h{7(3NGIhb=!|*bRD|)xr4DP*t4y@vGbQwxC}z z`-StajQv?0-tu*u1k^lH2>T5X62yi6RTskP=6Wp=%5H6bni|6B$!K!Ie=dmnM5^d~ z#~huvnm6?LB)D{;cGqyd#x!JrP8E@N#4Nf~CFFjeLEwX*{%A{ttIK4xyz1`a4@Sz{aA9>oHer=aTm-J zzCI0eDTj;;S3LuxE`B*D2--TUN??Fux$yD%t88FxJWI;DrvAKJF9qXagkLsvZ5Ub zA2ex|V4a!5I5(QNSB!A|tBh%FQhS)&Vr>&{T*mHBNrJ69G+rf8HcKWT><8 zV=^vxhi}ew`YiV}YG|Yg9GK2O1Y{pDYXIcYu-Epje56mD5SZM#kZ>xzeUxkeC&ay1wWL`lC$o_*lL{ZK&>W{5=0uHpQ{69rdUS zt12yXNVZ5OX~S2P!l$WTj06gA()fJG?`KoyQb)pgzh0C-}ydjJ(EVL$T=`7~0_2M&NKYKm9N`KnDX4 zCL{$3*B9TJneKpmcxxl`C3h7oU8m=DOS#syqVIBkDyUCG;G?}cPlubjRXbG^t0z>E zn5SaMHCjGdWq-Ov^!;`=;zs{VZ&ti)TfPuw9O=<^wm%u9PT%CRP}^(B$su751sj=F zYw34J&JZUu-@&`iq~uFx!=w^+mO55SsGs?#Fd^MRluDC@c;b@_y5K?)hy7}Z9B%I^ zaZK2(&Zq!mW24)NpkwW0ESgZ`tG8<1yS*rrk+G(q_%KZfiit2JYvxcjvQ2Q`k$jC> zJ);bcCgopHP8F@_HW|w-Epc^~H%sc@P_^Ez%axGR?dENjrJ;kq7j!L)ommrQKO}9QwzJ_jn70F@khH3DwQqeU;OA&_YVt1*gq>c(dW+7pR_!S z+^)$J8!WA^B~%b*RaW|ZZhgKtdqClLn>dTw%pEP(R+R3tpWL`Df$dxMZLqB}1QdnZ zpqIxl;($g*AxAsGb8=&n%#450@3rylQd#fKI~>Zt;MwNSkt;nG{}yuy&kM9gc2oW8 zI)2@j32wJ3edOL`xryWVF3JK#i0&sK_>OSL- z9z7_*sBH1g@79AZU$xpnyt|O4+`y!3BqX_3=y~GK6&KaXcZ?G{1A5QChh9^PJ2RcJCyMxPd8pj`W6h`jf1) z{O0Q$dIjE2Hj2%$%oiic4cc?;h4&V`pG2#bdy^ue_ZoeZ_PYWDz>ew$gNlT9zPM;0#3#-8 z8GVx?=E_jOFi5~6t!tcpC#P>#;Vseb7fO$9xnGzRT<8*wi(X#NbSm8;Be8w?VUV7hX(^%6l%@R@!kjHK;S5g9x@)ZZ-Px{kb`i?E zVFer1n!TF=Pu}>ADEs0?Ghlp~bNgad(n$yU;lQ!Yuv*5nCdT=^PQqFTRpn+@^JNJ& ze~xaLFm(ibY<6*oU$HVcf~JntlBv!|!HWrfOW**DiZg*hT1%Str-{gOE0wvY{Xre! zD<5xGPi-24YK=9nJq`(^n6nXn%mRO6HA2$XW5e~rt!WgiZ5qF6TOc|(aQP+g7O@Lk zGY@}I(nma}69yZpI~vtj6i;N`Z=vwZtoBu%k=EeWEFVK|pBvn(UXAXTY!79=G5T8e z#(iq@7EyL__hJ<`T3G|LWrwu}lkb;q}Qfe^F#$yxBI zEJ!pMJFIUR&NRd(Xt81y+uQ%Lhhr*T1uAOgDx`ozqmJw&rv!Poa#5XhIi|Bw_@h{U z`Fm9@>0Sa;ZvjJMt?z=U)?+Ndq0q{loKWl-i3d`b30u0-XHkV-@uQf4<1CDq`RLa} zF(S`_x?;y^$%>)Ak1WExu#GFM;*|XPv8JH0ACvyYM8+D2BE{qTC5fUsqq3b-Fk`^E zBV}+U$(P%Wg1NGOV|Aj07!l7DX_`g|^ZK?=RX0cywy5L*-$sJOYT=+kZ{N!rs>-cG z32M+H!O_wWTPv$u?*&5~&$x$a!d+6Sf7&?v1wJz83jvIMbb~;kA0a@=RTnEO97q!v zCJxeuIO#t4`-u`!xr^eBOBdE_rOl0=LCAASZe~o+v3kLNkBM=NMXx;^UCuaAWKaao zn_shR>@bsvUXq)qoLyKL5y(Gu$4r^~7 zV9|3Ss477x^{ftuC{KFo7n^?bF3`=3Y|PJQ`=vHYkv<2H@kKEUxBS%A8BD$+hxQf*v3hkIR_cFR6!t@iQA)iROeG874Wm2)AA+ zRFGs;U|L@Dwi7OhPXQkFnOr!BlW$lcG1e0tqzozQ{Yy@;oCE<5H8dB1;8t_d(MI)n zS1$glOI+hFh6E)i4OJ&_!IguernZi7-iRAI;oJAZgwEIml#JweJFm3~9zgh>-bpN+ zJN=Rafoyx%38;h<a~Alx$)D0)_^kTY0a47gUN_f#iA^!kN7yHjt%!aiiG-;`$w^FLW;H9^|dy7mT|JO zWsGs82R-+nvj9a;8dq<$8&D{55k8-@Mw)Htsbz{=_nwY%=v7rYhXtjmlzoc5i0wK* zv!m!S9!s3<>0f#6J2%@?GM{9@#jc&q@rhM?@neYb596W;6f}da@{v+Y+_@v<;dfNf zx*ktJq{s?7c^N)-XnW;oj{#Gz$vrHR{4tGfwh__>jMb%ZRxpL7aeXA80tBhYkdbEW z>`%;^)XB{B{>RS>wCtNS?h40LHQM~V+rVEpR%G3MA z-GnzqoT+#B76cQ7z74F6BQC4G41of91HS5!XqykquZChk zrDbNk24>?^71I0fxD#% z;|LZe)CQ2<{wi#$SV4>Br*>p@W-aw-SDf#vh9~+0Rq8Xaztq!+s5mYx8D0}CF!l(G zC7~rHg>{9Wu`}*6q2E~467gpc8tx=3%}7Rd^cr880s_i1)Fad8{!b3cqAik3B} zcp$;k9lI{$oj09__@Qgg`XUsgjOlcz967H)_yhz&M^c1X99eK)?DSUQfO-1zEof9ng=F!L%^q{?` z?E}-PLqb)0HBssxb&DikmZulU25v`=M7V{Ef-*?H3Y^~wt|=RW?~xjfQv9pf9$xWc zB#y5~@|jp!nnhrgPgFz;O?%%!-0NNRdlJ-alwsN)d!v6Z%9Okd$$EI^~;HY>;A z(f>?$P9>bFQJVd+L0YZ9Y(e25K=}##x^0ZH7tB+d+YpBSc5O2@f~WUBQZq}GJEb-; zx4&s$lDzy?CQX2Hy&Nt!%_g+xf=ZWEgdPUZdW8hK?A5{CUVd(iaa41eh6d)r-!1|APT4t>uJ(DZA#MFW2a!r!Z?Tw2Rc zW)US{NjA@qn7vM+j5&BjlpwfZtzgX=Tdkb07@bOJH<#sDJg+8LQn^3f zzFE%uL$2mishA2;M1qWDbinpJX$l5WNW24%mY!KRfaZfSf2sZzwd@~QG2nclm7J

(GTVk925hec&6=s&czojsR0?D> znW`JEWX^n>OAh5mhR=O<`1P}n?2S#@#~-r|>#S8$xt4SEMZ@#cpxT{&X8vpxBSk4+ zUi!%_c#0ms3MF}@pDwie+8!-KKFpC5J#j`E)`HgQJC2ai11Y8H2GIBll4K6efvv9< zlcm<{cVmj3)_(I-k5P&*+=;c>Ny3taz3@Z(Gb}MmXq)83!0#!MD zG&as-?V_(ia%Hhd z$|Mhb4Ox!q_WITYth|U3tRk!&+USDC&2~sIq$|X%h44EV91DuM2g+#rpD<%_HnN2T z3X>G}SB|e{8w${fuzyFK#!3{FBgaxy;KFwzy0)E2v`^iyMgD1*!(rKN zI%yKO@_YIW1)XVOR25S-T3Lm>$m~#eV1BGhnKW7z8d{}Bqn`6n^u9edlfurY=C!&h zm9DgAoq9m}W`Xv58}IG%X``?4TyeCO@cRhG`ceH+hsrriiI+Xc>;vU979s~v|P_Kjm`c+dCNP8QeTk*z3arh^Xnq4E8{-#zkcX11?%2*^pqH}S88M35t5Nrzlk2S20#;Wmx zCB(OtaU*v87UzC{LV35gkZc^%yxL5CAD+oo7(snN8Y+UwbvZLyN*ba%veF-~aug#O zXTZddC3k0QOxx3%bW2Em6w+lmJ zDNXktvzwQReJ@TTQ*1usJ+{&)r4y`vEon&S9BwS8q&oK5y;1HmCkut0FvPH+uQaEIU; zAVA{|!6is=m&Tpo?gS0NokoMZyY}`wGiN6A{`c(J{jgu3>Qh}^=jp2ZlY8&;;eyX$pX2FfrGGFiq{v$ci`uSK3XQL_73XhW`&nkV{8q(gHc55*L-cNjXLu*q zyWK0ycOp#Abb)aU=_s8lqU_G@(HpGQ-%^ZpYXn#nQLCtzkJzM2C?q1wx%1-uB&rs< zy8e07f8r^jDG43hBB7Lqr)Z3~&*~qlD@nIejg=QvOx>IPY-bk)XS9)1+`p){xB=fP z2Wval_~iTa@Zghbb&;|O=#p~$V3xwDT)!A83lC5Ybe$SXGl=#NM_lU2ai`%Gvz&3s zQiiTw%FU&z_o`>i%(psY06|1&ckJcn__{<^F7?Vr)xg0vt*n_MXP44#?2Wp(r%8U% zi1n{6uD8~OYK!fcEgb`-s&f(j_vf)iQb=D^f}3l#X-y;~Vmk^ZR|UC{4TM$wK7tXe~j~wzkkl+;6Cg;$XQIk&FMVaYpJ=B_d%GjKTXX`UjYf&`#9R6 z12~-py}n%ZiMMw8ibC<9bJ(9&$*Jk&<3D)bEPYI=I-T0qXS84_S^e@}geY2W4Hvit zjN-fJ5x(h(av^MW^|f%H<4EtLm}*t}EUe->nTg`s8jz|JU`)ISmXe(Qa6`|)y-9A9 zD+FG=#Ug{;H<%8jkDfq$eH<%4-TsVj>^O~o2@zR5K8d!0GC3R%jIRl_>!&T(f0}(! zP*>qISL@$yBK`6ZBK)>uS$ev09C^jj;9hrNzLAZFekZ10qAH!ts92|9G}lz{7(0ea z)`s0WEcJ-4KHuIV-X!!Rv2j_6p|NsxYz|{hwF=zCTgzM$p`{L-e(j&adxHs0_ zC$Cf5c4hboUqZpdl-_-y){^0Z@E|WLXBchubH(Lq3~~Z3rm(ik-j~Mtgc42zP!(-4 zG=^QsfUc+}Rw0~^wPPbXg8FDC+Y;U0OGr2cb|;*JkXT#Y&$6}+e3*bRKLlr1M_Ch_ z8m}I!jbg0fs4M?b>GzvUDu5EvuOoTK^^6qU<5(draK6(!HHb*VKoP6=y_KPQ8yHPl_XT?rll>hwu7f8PCV*m*;Z>m4Y?q=uy!1}2eS1T&OGanR zKKyJ&)hIi0p^kn7zeb&0 zK0zBjS`f&z9S7L)t&#$^;I~>Ejv$fGER;Z}yvU72Nm#LM#dj71AquPLr z5KHtT=Ps-j39Yt2MZilP5QCz%XQUQQ6{lS}#04)lA<>_=AugX+5e0C~A3GvRQ`Pyb z@Z5@8jeN7)TswDA)n3g@W_v99Vg5q;KQEY^KGfkPfA_dvqONX~YQ^{T9 zTUe3_{U8q25>c-mNRZ&W3l&a9ML3y3$i#1;HEZ*b1wSS?8I$KxK9zT7M=g%j2ItnV zK(lo!L`gsu9%3E_pz^`Q)W=YL(UxvL)SjogChB#{ic&5+6F-<+n0eBrot6Ufd`o0VGAAv8X}`aYNFb;Bmsytn2A?JfStyo{}md5%08BN(>I{7>h2A$ zozZ+xZvxbm14Oz$@na4eDk)Z8Nn>|8&U(v7X9*-P6?;qTY);*qd!L6<)6X4yXrRSx z7qLLJIvo(8!J49~wUhvM4qry+{Ry^4CF(hC9Xa9Gah%d>gmj|JV4^^m);_@ccGL-% z+c#e&-`y{&dGSd@!~?yldC9IF^JPj8ZbzvMf)g3FuvgvDJ6Vnc_jLYEn(f7~IrqZO zbtJCB5jU-yosT8JFBDcsNi-D2WZNmp3{Ip<&mv@S^X14BEICzc*a{wy`+kU)0^o-y z8j|IM*lW!rT`6A!7?hLY{>X@*EW%ydLZ?yJ2{f3qvO90SM;0+H3--^|9x_XNL5EP! zG_fUp{F;u0)MUTzifLk3`Aq^^LG={;tO1e^+I2Is6oZXdk<7~yzXFcpi(T4xojj(w z_{}CNoYtW*8UBFd`?PeZMrxS)Pp2eGcA@tnq~{q5#h6R<_PI-p?vBP5Gic(poQ|kJ zDuW?%My?UwGUn93z-4iM+amM1Xkw%%2Q5atGuA(KlWklhyqp8w`u2Y_kvPaI*S-6o`Ug))=vgu3z z5QoftLif~JA+C2iB5U4K;}-m9BtNeAy~AhdY5k=bvos9jOzGQYbiQ>sGpgx3Q^-4Y zdME#e>x94i*FuyHZp4%qS+o0YSQJu95Gy45KeWK1NoD+xp6gUv6vV8Gf71f;+6_4e z6M;4Y2DB)-&leRt7+2l-=6U0PP&meU)p&0ww7!vs72^HEwF{9+#}e|avH1YVUW;jj zGkV02xSGcQ2M1O6WwvS7cBV#O2@UD^^ za*!Bb?K0@l+5Bev5VW0)6ckaE#e3j1z;X8Z=zUt;TOF0?MXyv@i~jOYCPT{D~n`+_s^c9A&J(H})hoo$kqd7MauX~qc((M)g=aFNTiWfAI-NnUK z`kOT>F8mE;AC3wrbT*4uk(XDn#j7yN>UbF`>aJ&x$y%u!rOHPta%o0gd2H(AX;4#n zIZH)z(^imEBnl_im2r}v!u*N^q|6J~>seh<^O1xoBm1G1J#NxPij~#vD{gLs45Ljgpl!8 zrX&vIB%KBAu3&*zM@{|6cVS8C17Y7wzQF7ve9f&TPam;Zpz*(6&~>dVNK#=IHq_%U zr0i{<%M6A7Z&(s}^(G6?TN$>O{<{W^vKxZ8}*phtu3~9 zXU6;wPxq^t0;G>FK41*(sl0^{D_@>nwzH)q9{snt1%orSb(AbAd!>wOz>7{AV;}s+ zDuv<&HK6%DZjcI;nx8&~HF(h0gB1m8v-P5w7F*U=_;AYCK|JZ`=F1Jq;d z5LhMg1O9v>#&_FG@F(z3LjPhC@h4OtLVW)>JfCZ>f%(4fzc9X8j`_?Li5w}yAAA7{ zCBQF@gy%Gj_0NZ_=v-x*0tZBdK7 zY<^PtDc?7!xll+a)v~DnO;t3MSl3317Uyb$n!hO1UNmOw?BX;~w1^~m7i)&F?Bf`ZW1FRl`KV-_#ismrw9z=zxL%!dC%IR7(0 z#W&<^inCcp@?}EmL3t_LsxYX41s(Wc%A-->&d zOG`9OgVsD5wl7G#E8UakqV6BisU2QvEgW;eFBwx%~s`4a&E*s`V;78XTQ zx#1z}EFwt7{HiZRlxg5fs#eK~PwxZX`N`LQT zfGU;(F3Uplk(z3hJln>39gVPwAPU__p)I8JsXb>M&U#|Gh&M2bLCz2ZVB(*dEmlSo z@g{EZYo`j4Qfx0|LPXl~`0%jqAh$zV%zeLxz+qUf)7zcU+o3{Sp4Oi=exSZ8gGv-7 zC;g-#1I;zV%qS0DfjYJ6`DZ|Nwquu`IJ?{nFc0Q3U#oM zJ{YVhm$!;9FQcB;F9n9=CnAIvo7awP*EP~0Ni~z&K5Hd~Mc~0AN<2gdB)=Hu1BtsQ zSbG|iF3Z|+<4Dol{kOhjpc3O6aNX#Zk#eCwhjAvp%6qh=btashKgC>VVJMnI6*R6; zrM?>TFRpJym;ena!0ef{TGxF(O=)nf7BfNSi)4LF?)IAtc!I<3S(UZms(++3yD49~725;~O|_YDns!R~*oi%oaBlQ^~EcpXM!N z1?9G32o)IdgD_ixOgO;kUX)J7qAF#CyJ(2-Ntzx1Y~sC2>r|I_k-9`&mwEKqCU-0z z54hVDwdQ3mDggi&H9N@A)98a|dqVr%)WN&RmZxgRiGjzJZzX2t?ST@-!0xDfz2j%; zpL66giM-AWhiCS$lC4L;-09kKqWaVtm(z)|NLWHyR$kYe5d#Z!9arn~z351CZ&3ls zQN*Ae&inxb(A3fRf$N(uX7B(Cp{0qoli#a#jYo~(H(xIo$4(}W=FR5k96WIWlXk+s znm#kRQQ(pjpBuhp@R2^Js**dcp4JSf1Z!_j&<#tQOXhdCC5&W_UmaPvbKx(We zDi=}T3XH=|@D;^@YgC7P&gv0uZM762l}3TeOWwDXvnGkImEGV}E`Qvc!zh%aSg<&G zx$}T*Z0<5LYjoU`VrS@72JDyGo%N17KDJ#jE^1~iDy<#Uy=KO_fMk)@ZC@Y(4nxOu z6$1M3EH_JHW3?vKUx~I-LyscRYI;^Xi$PKDcP4r7Tfv1#nm5iQ%uC&k>WCVHQa-xi zSTdh;wT0!Rz=s=`3^s19(+~vFuPlNMMoZ1N-N$45F40GxlftO+{*^)wG|p$uQP+V& zJ(g=C)PlgQ-}7PdA=e_f06N8ETP&KPV3i*tU6qCBK^qB1>?+N^;=3epW>NjC{g0NWf|*~uWBBT?}A?|1$#GpHP|Xvs&0-(s__P} zBsZ_^Sd<3vG9TrIe`KQv^L^Xcn#@qBceC~gPSp=x#4=c?#m(RXi6XN<|C&OM?%?`z zDTmz2{Z{+5sA~C~*drD_AB6CGjQWK_-EBhTstICz#AGeU55e}|1fgE9HF>z$JPJ_a znVJT4f0j6et>Y`k7XkfqlZ~4h$tp9@t^BS13)cc7`}o;J*Fn8I_Rel@Q*}UgYbPBv z82B&UO&&Nd*1l4HdNO<}6Sn)Fezz01`E?HqGW_IAb2s0T?d@7dyG zSw%uAD4(lT9ocdy&sSmIcklwLh2`KC2WK&1&`nx4VkyJ=z1vO=`A4=WB*o-5Pl#hN zVi43)t-F3D3NX_u=#zniBhH;WBG6`$OakMOm2nek5h}#L4q@jDs($4{EZMtwM91Nv zX_`ZUli!}-uBPvtvof7cpI=Qx+bnztPA<=^AQJ-6z`ItqY0!Rway4T4E^8wSXYP4R z+pfo;Jbd}_*(4m)ScbY*Qg*h;Gba7a%<9u_3e_g4z>c@hnl|t_7Sm}xKIBWyQ4oA> ze`)7){FU^8&GqPc^t8f)8$pEF)u;9&5D`Em{A{{=3SMCwp`-KJwexDJI24M1V)nTH z8(+*0_>;>%;eYKXfAT)(Ng~V_f4G7WCkzjoP;8fj)wb1k*%(!Ar!TD-btbOw6-Bs6 z10B29A49qhr{t@$fOg%wF7w@&gNuCTlROO`XaIPdI(qNb0RU;la(${~YZ9D;+WQ1H z-Cwr*ABmW`_vjawxoI)@4|61!IA^+OOzCn2@9WQ zaXA(htYp?k>H3r#nNvz=i3+QiE!Bg{3w?q``=mk&fA&iuO7!H1*esbuY3 z(l$%{`uNhU$O<0tU87ZA!VZqb$yW{}5|KK)HyGTmw&{xE`Jd}wYrFtdJ{CPbQsw&T zmV#Kii&+&(r|ih3t2}X_N{JIDD0~+MU`z^}%gFsVg;nj|*$uG?2azstWAcoQsT4)y z&fk1nUO2OUy8wyVWQUt+=FmPOyen53mAa=!(*UCYBoaE>r)ZF+G5JAcE|m&!ceUxM zMvd()R_ufz=hV>bG2t}nUdFt<3G3~KPjvsGtOFt__G@p|ZHv&TmK&-jOf>-_-#UFw z-Wo3do0V=V4n9_=?~av6uR7}j6>Zn`rZ&h*UOY?{E_6#N#)QEsxn-#C>0HVx0$-!w zTSu}DeqWoJ8yFDo8;oGst>kJ66}4ujr|~Isg@ij}unf&wIZunc2-R-q^h>$|`WOM0uW)qvoo=bV zglb6jX0Wa*uq`!!-^8Xe|A+mnG?>%lRqcwp7Hd#tl?MWB4B*J&$o;Ds!hp+Nd9!_V zxYgh*dOV%s=qKS$*WkJfzM^}Ton7r+Z+yb>VQrEUQoWIdg8(>%zS+^Ju8Jizpx7ez zGa@V6H{1uR)K-_1zoOTdz{uClavhL{%Oh0(RFRkyKDKN@u} zCR;m@Z`4!Akzl_U!Bs@7vImUDv7UY0+K#*%s3+I&e4puF34I(J6hIx?;RKvkd&wLX zhGa!OS-~2KM-Ty-f>ldOg&F$%P(*BZ9bdzEadQA*cMlE#2&`L`zCZJgZSyR&0*D)g z*&QC&=L>lXyuPV6IXZlyl4!(2$k&o_x+a1oVNUUIUkP-!%^1&;({hfQzKk^T$@niZ zBqhc+Cd@x3bm|qvR|99Y(LrtS6y+au`jaUmWs)pSPzoSPL4W-#Q>en}&ZphQQ9`zY zFy>^{=$bv|bb`y9Cjg*6$oY8>kGLw7fTtEe&ATVO5s-MkQ8=~9?LkygEiW|unmKAAZ(X?8-`HB4|VPDGrre0EaM9PZB zoIK6yMOeQPyYgZ7vMYDa`V%@@$s{K?cbb9@(|-L|qIxOOPg2kuU5mVW{#FHrAj{Uy zk^u%}Knk$}!?a6=GJBW^7HL>ys61QIr(f|ffn~YsRX80xnK!wga?1DLd#FsTlBKkU z&mfvn=3MoRkEOVYDieEkciOQPWr=_Qs72EU?T!jbu6u0X+2xs+2Z&n`vKw8UO>u6KCC-I!EC2SO z$S;5M@blYqTk1;MA?nJA;bTSoZ%wRE-8CA!&Zus@^_ja72P}}}=RAbAr#zoAXBRB} zXJrY?VQ%U~;ySlXh2q+Sfw$EVMQ~@RmTO_Tn29;G38Fs#6ZMWePXSDH$;&GKAUI_> zZS>(@+$t|N)YAgpPVdYjtmBKpJdT^R4JYo~pXeoz*#k60j-{&t1x~3H#HY172pUzA zGAv*JI&#stW!3K*nEtKbZ5n8#y=DpO@ei4!eD!FGC}e;C3vqrvsl2Yc#Q*IU(u&7W z@mxby)CFOjkIC4(lDFtiWH74R=UIdNKMJ0p)h3oaVxYP40*Cf1yrIe+Y@1`;&os?* zEUz#}I7{_}kuv%X=>j)H>D{zPF2xRw&Yoj3T|&PXV!#1yMb0O2bjypWNjc0>VmRpvN%HkcLe@eE?e4j2CIfi{a6tsg=a znNgYRpUDx;JS$;Q2m%DNYEoq=48LDw%_vG%8JX6y|2$ZMk9KlKU}D7*ZO@jKV35gN zqg^zZwkPLDil;JSo?hLHr_0;-YB&HEdIX!`k_LXBp{S6;kPC@SMXF~B?HJ_`_U7xpMa#6q#t*&K(e0J@fj_~dg z$zt$mL6k8LwuzCkCtKTSmB-;7VSgwXC<`|qc}2ow4>dD5g@5oKAzigTCW2-@kKg4> z4V*mCVopv6%Z$s?_ZuzSzQgkgW)Si@)^p6e&iS=HYbG6(6Mk2HCbsOpy;}eVUhhvm z>wD3Ow7DXj9fZ*Uyd2^Z@nlN(l`XWFsG0H4n&ImSiVT{vk>mZf`(P+LjDV#{v{yg& zGmY5Qhc|@*JWUpw^V#7Qw<bM(@f{VcI!MvL6ghzL*jaEf4r6g`52=IVE&b7tDHgJc^BQ&!4#K zNCPL!2dJ8$EL8Nu`Ub#u3@mS66r!;3cS{~-_VvM`6*I6cA+tjBWb~?3h(()H^c^nP z-+9({CPQRF$VUPA4JS2aC!nFR8p7v?4pspqjLqRf`N77p3^jBYq_4eWz9#9!sMSnP zK4kF%USS*r<%I!}b1SZ$7zEK_;S%CF44^^CGT+@O53YADTniEsg{i%LP1mAtVGBg` zK<)%J;w}3@37)G;x4o`Y;?{4SS8shk5&ADoj2QW(3MxeE4_Ev+*}Z z%Od8A8RA;$b=d+z^IT5a4tOV67R?EzEaCuZrKm`H&UV!T;7^4sIWRtvly z^$!{l5tZa=%wSPUXj=oum6Yz#-L;rfza+7X83CM{llhVVB z=gx<;CmB#g+-p@h4#U&RSC^##J|S=*!s4sb^A z%AK+L{yI`z6%e!6N+vYlu%Qt^u`hh$IjTG+{N)cF^8b?;-qQgK0IN+UUX?w-vfc7q zssvxy@&Dk3U+Pr1cjZ2~#$ijVpe2%8K28v6a=#C&eeWjtDth)fj}f(pnOrPZx4pMt z|GKwMNn(-ICx8-Tbni?ix4v zEejojMS|H4+8C6z@u?n|HJ7;z2|*uAt+kU{8PHUQ45=lx7U=i@%4jGG{&a?IkE$@g zw=eFnGwLo!GKUMqjWpaWDVvi{#k%pADRto^AjWHmSGER2{By?jUcj$ReO+%Zd@a_g zStP;bckJNtt;&2Dlx%$(IuRCteT+WPW;>*hALhz^@6>oc|NOM_c@f3HT z*YYucK)HS=8Cw>q$@gy7Fq;MT*rkksQhC8u&AZ;W4%2N1D9qLZ4KlZz>P$p0Z%J!@ zYeA4ivD$W+br*;4)q?7tFFs^({1jQL@-DyV>TtRT-e~cltFrZ;w0jJM8|z4+n$sV5S|1`od9ZBa1LI(Uvz6JiNXOaS7m3Qjo#OiDcG z)0$t1#C4jZ7D`jE9ct4B3O@|gP9@_P5wedsW@iiha?L-!(nwm9?{lH0MfT*0FeCAh zJY(=}0)h}|(X4EKRO>l4X=vdf94j!)JCec!OCn(*fYx6jfML#tncyNNuhoM$4gvcx z9>k=8sl9otJ`0AIf^8}Td@_07Ftc;GG^**6WF8AkP9-j)J>5xh>4xjUl50nMSo#ia zRC7|EuKRoW!aK(?c>ElBNF>odRMk?{2lQSv|L&dDagQIQJ`|DN_~>9vUD>t)@7L`i zp+7%jlN-ROYs)2R5J78{1d+gz+}?ZK7W&bS(fXp@EVaa8kh$P3-m}n3w|u0-@{pwN zjiZ10q>rtW@_k9o!!kW>-m1eAqfU=OyX!M}XzY163)$y=T=TWcViv4vu7jOx)^L4v z$9W*R_k(YT_F!1SJ_3S^MZ+z<1$TLD{nLnP^r{zY`}TfUSaJvR0^dcHjqsPXu+H1d zG%oVin?@yQAhhUwUe(}+!-uHISp_~fa=5OU=rjtsj$4h5 zPgx7G6sV8y<28Gtb^dH0FR7t6c<}2tQ`2;z{_x82PwKYybP%nb6{=H%TkLKi&d%0mj> z0;<<~pNA?i%4)_D-yeCvg9_eo{o`{px+|Ztj1B{@vrFA z-RUz~zQxby*eTYLtSHqzJUm0G;m8^?Hj7FJrMOmWT2y~58~VBPe`(V9IH}XTk2;!( zevf8I44o$T4bZCf911*OJUO`--@6P?CRZZ|~Q=rDTeH;vF8<4=)=@mBM(S z`q3^MVr|F#SzSyJdh*8rnUGAMB`!F?h@xGexXJs4#D=DUqtNXA?6E0&OP(a?ifpiI zY{v@4^lL0h_W(uVcjy=nq_EPOQD^4W&1N z?Q1_%`ILh4Zt`#SUhjto#tUD?`~TYkW7?aX+}{2J)5gG2Wq{>zVN)A#1ro9y@GqaT4)X zyb0t*-~7?z=6wwvARLeu+J6&9U%Z=T2V&67-WqBpm9C|*|!hnAXbMtJ~o)dL0BM{ovNM5m*y$=G&n?ePLqEfkbG*(`DkcU4_Ajt?X*SbH!Es zO~q5*>Me8t+KfgHtrc3GSxOBNfXNOkFTW~1j;vgKt!#I@+8=u`y#J-uNBO4(Vmwag zD)mG9v&?xG^ODyHDHqlX@L_1g7o=?G>E8auMPX!%x$|K{x6^&Y0(!H`7D+EGpR8m; ziH#YJSqv?>LuU5z*U`iO!&Z@#Xx99At~EOoT>+)KUNofjflhY5Tx~Yz`mW=0weVq> zeDb6SQU+5anM&@c0rq_ee|iwg^IPZ}0QMXGkvCI^4|11I{@RE0)+45J?mmBf3|j|} zOO6!qrw=6O&EI_{efw!?8h=?x8k1=*o{@cjKgt{4AN6>R5g9tz;(g4Ty(M{0n60u* zqL%a3DQG>xlNC$$eK;%U3BFYdFf} zU0($<*F5|)jReJbB~2FpS*&sKPF>+DQljZY7`Ga6LopwNnqdU$0s24oJ7;Sfim<`{ Wf*!$*fgYx%0%Rl=B`U;>0{$2K364Gh literal 0 HcmV?d00001 diff --git a/elk_metrics_7x/bootstrap-embedded-ansible.sh b/elk_metrics_7x/bootstrap-embedded-ansible.sh new file mode 120000 index 00000000..19c54324 --- /dev/null +++ b/elk_metrics_7x/bootstrap-embedded-ansible.sh @@ -0,0 +1 @@ +../bootstrap-embedded-ansible/bootstrap-embedded-ansible.sh \ No newline at end of file diff --git a/elk_metrics_7x/conf.d/elk.yml b/elk_metrics_7x/conf.d/elk.yml new file mode 100644 index 00000000..342e8d03 --- /dev/null +++ b/elk_metrics_7x/conf.d/elk.yml @@ -0,0 +1,27 @@ +# For the puposes of this example, the kibana nodes have been added to +# different host machines that the logging nodes. The intention here +# is to show that the different components can scale independently of +# one another. +kibana_hosts: + infra01: + ip: 172.22.8.24 + infra02: + ip: 172.22.8.25 + infra03: + ip: 172.22.8.26 + +elastic-logstash_hosts: + logging01: + ip: 172.22.8.27 + logging02: + ip: 172.22.8.28 + logging03: + ip: 172.22.8.29 + +apm-server_hosts: + logging01: + ip: 172.22.8.27 + logging02: + ip: 172.22.8.28 + logging03: + ip: 172.22.8.29 diff --git a/elk_metrics_7x/createElasticIndexes.yml b/elk_metrics_7x/createElasticIndexes.yml new file mode 100644 index 00000000..dd04128d --- /dev/null +++ b/elk_metrics_7x/createElasticIndexes.yml @@ -0,0 +1,251 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Create/Setup known indexes in Elasticsearch + hosts: "elastic-logstash[0]" + become: true + + vars: + _elastic_refresh_interval: "{{ (elasticsearch_number_of_replicas | int) * 5 }}" + elastic_refresh_interval: "{{ (_elastic_refresh_interval > 0) | ternary(30, _elastic_refresh_interval) }}" + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_retention + + post_tasks: + - name: Create beat indexes + uri: + url: http://127.0.0.1:9200/{{ item.name }} + method: PUT + body: "{{ item.index_options | to_json }}" + status_code: 200,400 + body_format: json + register: elk_indexes + until: elk_indexes is success + retries: 3 + delay: 30 + with_items: |- + {% set beat_indexes = [] %} + {% for key, value in elastic_beat_retention_policy_hosts.items() %} + {% if ((value.hosts | length) > 0) and (value.make_index | default(false) | bool) %} + {% + set _index = { + 'name': key, + 'index_options': { + 'settings': { + 'index': { + 'codec': 'best_compression', + 'mapping': { + 'total_fields': { + 'limit': '10000' + } + }, + 'refresh_interval': elastic_refresh_interval + } + } + } + } + %} + {% set _ = beat_indexes.append(_index) %} + {% endif %} + {% endfor %} + {{ beat_indexes }} + - name: Create basic indexes + uri: + url: http://127.0.0.1:9200/{{ item.name }} + method: PUT + body: "{{ item.index_options | to_json }}" + status_code: 200,400 + body_format: json + register: elk_indexes + until: elk_indexes is success + retries: 3 + delay: 30 + with_items: + - name: "_all/_settings?preserve_existing=true" + index_options: + index.queries.cache.enabled: "true" + indices.queries.cache.size: "5%" + - name: "_all/_settings" + index_options: + index.number_of_replicas: "{{ elasticsearch_number_of_replicas | int }}" + index.translog.durability: "async" + index.refresh_interval: "{{ ((elastic_refresh_interval | int) > 30) | ternary(30, elastic_refresh_interval) }}s" + + - name: Check for basic index template + uri: + url: http://127.0.0.1:9200/_template/basic-index-template + method: HEAD + failed_when: false + register: check_basicIndexTemplate + until: check_basicIndexTemplate is success + retries: 3 + delay: 30 + + - name: Check for basic index template + uri: + url: http://127.0.0.1:9200/_template/basic-index-template + method: DELETE + status_code: 200 + register: delete_basicIndexTemplate + until: delete_basicIndexTemplate is success + retries: 3 + delay: 30 + when: + - check_basicIndexTemplate.status == 200 + + - name: Create basic index template + uri: + url: http://127.0.0.1:9200/_template/basic-index-template + method: PUT + body: "{{ index_option | to_json }}" + status_code: 200 + body_format: json + register: create_basicIndexTemplate + until: create_basicIndexTemplate is success + retries: 3 + delay: 30 + vars: + index_option: + index_patterns: >- + {{ + (elastic_beat_retention_policy_hosts.keys() | list) + | map('regex_replace', '(.*)', '\1-' ~ '*') + | list + }} + settings: + number_of_replicas: "{{ elasticsearch_number_of_replicas | int }}" + index: + mapping: + total_fields: + limit: "3072" + + - name: Create custom monitoring index template + uri: + url: http://127.0.0.1:9200/_template/custom_monitoring + method: PUT + body: "{{ index_option | to_json }}" + status_code: 200 + body_format: json + register: create_basicIndexTemplate + until: create_basicIndexTemplate is success + retries: 3 + delay: 30 + vars: + index_option: + template: ".monitoring*" + order: 1 + settings: + number_of_replicas: "{{ elasticsearch_number_of_replicas | int }}" + number_of_shards: "{{ ((elasticsearch_number_of_replicas | int) * 2) + 1 }}" + + - name: Create custom skydive index template + uri: + url: http://127.0.0.1:9200/_template/skydive + method: PUT + body: "{{ index_option | to_json }}" + status_code: 200 + body_format: json + register: create_basicIndexTemplate + until: create_basicIndexTemplate is success + retries: 3 + delay: 30 + vars: + index_option: + template: "skydive*" + order: 1 + settings: + number_of_replicas: "{{ elasticsearch_number_of_replicas | int }}" + number_of_shards: "{{ ((elasticsearch_number_of_replicas | int) * 2) + 1 }}" + + +- name: Create/Setup known indexes in Kibana + hosts: kibana + become: true + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_retention + + post_tasks: + - name: Create kibana indexe patterns + uri: + url: "http://127.0.0.1:5601/api/saved_objects/index-pattern/{{ item.name }}" + method: POST + body: "{{ item.index_options | to_json }}" + status_code: 200,409 + body_format: json + headers: + Content-Type: "application/json" + kbn-xsrf: "{{ inventory_hostname | to_uuid }}" + with_items: |- + {% set beat_indexes = [] %} + {% for key, value in elastic_beat_retention_policy_hosts.items() %} + {% if (value.hosts | length) > 0 %} + {% + set _index = { + 'name': key, + 'index_options': { + 'attributes': {} + } + } + %} + {% if 'beat' in key %} + {% set _ = _index.index_options.attributes.__setitem__('title', (key ~ '-*')) %} + {% else %} + {% set _ = _index.index_options.attributes.__setitem__('title', (key ~ '*')) %} + {% endif %} + {% if value.timeFieldName is defined %} + {% set _ = _index.index_options.attributes.__setitem__('timeFieldName', (value.timeFieldName | string)) %} + {% endif %} + {% set _ = beat_indexes.append(_index) %} + {% endif %} + {% endfor %} + {% set _ = beat_indexes.append({'name': 'default', 'index_options': {'attributes': {'title': '*'}}}) %} + {{ beat_indexes }} + register: kibana_indexes + until: kibana_indexes is success + retries: 6 + delay: 30 + run_once: true + + - name: Create basic indexes + uri: + url: "http://127.0.0.1:5601/api/kibana/settings/defaultIndex" + method: POST + body: "{{ item.index_options | to_json }}" + status_code: 200 + body_format: json + headers: + Content-Type: "application/json" + kbn-xsrf: "{{ inventory_hostname | to_uuid }}" + with_items: + - name: "default" + index_options: + value: "default" + register: kibana_indexes + until: kibana_indexes is success + retries: 6 + delay: 30 + run_once: true + + tags: + - server-install diff --git a/elk_metrics_7x/env.d/elk.yml b/elk_metrics_7x/env.d/elk.yml new file mode 100644 index 00000000..286534ce --- /dev/null +++ b/elk_metrics_7x/env.d/elk.yml @@ -0,0 +1,53 @@ +--- +component_skel: + apm-server: + belongs_to: + - elk_all + - apm_all + elastic-logstash: + belongs_to: + - elk_all + - elasticsearch + - elasticsearch_all + - logstash + - logstash_all + kibana: + belongs_to: + - elk_all + +container_skel: + apm-server_container: + belongs_to: + - apm-server_containers + contains: + - apm-server + elastic-logstash_container: + belongs_to: + - elastic-logstash_containers + contains: + - elastic-logstash + kibana_container: + belongs_to: + - kibana_containers + contains: + - kibana + +physical_skel: + apm-server_containers: + belongs_to: + - all_containers + apm-server_hosts: + belongs_to: + - hosts + elastic-logstash_containers: + belongs_to: + - all_containers + elastic-logstash_hosts: + belongs_to: + - hosts + kibana_containers: + belongs_to: + - all_containers + kibana_hosts: + belongs_to: + - hosts diff --git a/elk_metrics_7x/fieldRefresh.yml b/elk_metrics_7x/fieldRefresh.yml new file mode 100644 index 00000000..de64c886 --- /dev/null +++ b/elk_metrics_7x/fieldRefresh.yml @@ -0,0 +1,101 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Refresh kibana index-pattern + hosts: "kibana[0]" + become: true + gather_facts: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + tasks: + - name: Get index fields + uri: + url: "http://127.0.0.1:{{ kibana_port }}/api/saved_objects/_bulk_get" + method: POST + body: + - id: "{{ index_pattern }}" + type: "index-pattern" + status_code: 200,404 + body_format: json + return_content: true + headers: + Content-Type: "application/json" + kbn-xsrf: "{{ inventory_hostname | to_uuid }}" + register: index_fields_return + until: index_fields_return is success + retries: 6 + delay: 30 + run_once: true + + - name: Get index fields format + uri: + url: >- + http://127.0.0.1:{{ kibana_port }}/api/index_patterns/_fields_for_wildcard?pattern={{ index_pattern }}&meta_fields=["_source","_id","_type","_index","_score"] + method: GET + status_code: 200,404 + return_content: true + headers: + Content-Type: "application/json" + kbn-xsrf: "{{ inventory_hostname | to_uuid }}" + register: index_fields_format_return + until: index_fields_format_return is success + retries: 6 + delay: 30 + run_once: true + + - name: Refresh fields block + block: + - name: Set index-pattern refresh fact attributes + set_fact: + attributes: "{{ index_fields_return['json']['saved_objects'][0]['attributes'] }}" + + - name: Set index-refresh fact + set_fact: + index_refresh_fact: + attributes: + fieldFormatMap: "{{ attributes['fieldFormatMap'] | string }}" + timeFieldName: "{{ attributes['timeFieldName'] }}" + title: "{{ attributes['title'] }}" + fields: "{{ index_fields_format_return['content'] | string }}" + + - name: Put index fields + uri: + url: "http://127.0.0.1:{{ kibana_port }}/api/saved_objects/index-pattern/{{ index_pattern }}" + method: PUT + body: "{{ index_refresh_fact }}" + status_code: 200 + body_format: json + timeout: 120 + headers: + Content-Type: "application/json" + kbn-xsrf: "{{ inventory_hostname | to_uuid }}" + register: index_fields_return + until: index_fields_return is success + retries: 6 + delay: 30 + run_once: true + rescue: + - name: Notify deployer + debug: + msg: >- + Index pattern refresh was not possible at this time. Either there are no dashboards + loaded or the index being refreshed does not exist. While the task failed, this is + not a fatal error, so the play has been rescued. + run_once: true + when: + - index_fields_return.status == 200 + - index_fields_format_return.status == 200 diff --git a/elk_metrics_7x/installAPMserver.yml b/elk_metrics_7x/installAPMserver.yml new file mode 100644 index 00000000..53c4b645 --- /dev/null +++ b/elk_metrics_7x/installAPMserver.yml @@ -0,0 +1,51 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install apm-server + hosts: apm-server + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_apm_server + - role: elastic_rollup + index_name: apm + when: + - elastic_create_rollup | bool + tags: + - apm-server + +- name: Setup apm-server rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: apm + when: + - elastic_create_rollup | bool + tags: + - apm-server diff --git a/elk_metrics_7x/installAuditbeat.yml b/elk_metrics_7x/installAuditbeat.yml new file mode 100644 index 00000000..957ce0ae --- /dev/null +++ b/elk_metrics_7x/installAuditbeat.yml @@ -0,0 +1,52 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Auditbeat + hosts: hosts + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_auditbeat + + tags: + - beat-install + +- name: Setup auditbeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: auditbeat + when: + - elastic_create_rollup | bool + tags: + - auditbeat + +- import_playbook: fieldRefresh.yml + vars: + index_pattern: auditbeat-* diff --git a/elk_metrics_7x/installCurator.yml b/elk_metrics_7x/installCurator.yml new file mode 100644 index 00000000..85746723 --- /dev/null +++ b/elk_metrics_7x/installCurator.yml @@ -0,0 +1,30 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Curator + hosts: "elastic-logstash" + become: true + gather_facts: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_curator + + tags: + - beat-install diff --git a/elk_metrics_7x/installElastic.yml b/elk_metrics_7x/installElastic.yml new file mode 100644 index 00000000..748834b7 --- /dev/null +++ b/elk_metrics_7x/installElastic.yml @@ -0,0 +1,27 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Elastic Search + hosts: "elastic-logstash:kibana" + become: true + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elasticsearch + + tags: + - server-install diff --git a/elk_metrics_7x/installFilebeat.yml b/elk_metrics_7x/installFilebeat.yml new file mode 100644 index 00000000..d40fb154 --- /dev/null +++ b/elk_metrics_7x/installFilebeat.yml @@ -0,0 +1,52 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Filebeat + hosts: hosts + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_filebeat + + tags: + - beat-install + +- name: Setup filebeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: filebeat + when: + - elastic_create_rollup | bool + tags: + - filebeat + +- import_playbook: fieldRefresh.yml + vars: + index_pattern: filebeat-* diff --git a/elk_metrics_7x/installHeartbeat.yml b/elk_metrics_7x/installHeartbeat.yml new file mode 100644 index 00000000..d7228fa6 --- /dev/null +++ b/elk_metrics_7x/installHeartbeat.yml @@ -0,0 +1,66 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Set heartbeat host deployment group + hosts: kibana + gather_facts: false + connection: local + tasks: + - name: Add hosts to dynamic inventory group + group_by: + key: heatbeat_deployment_targets + parents: kibana + when: + - inventory_hostname in groups['kibana'][:3] + tags: + - always + +- name: Install Heartbeat + hosts: heatbeat_deployment_targets + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_heartbeat + + tags: + - beat-install + +- name: Setup heartbeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: heartbeat + when: + - elastic_create_rollup | bool + tags: + - heartbeat + +- import_playbook: fieldRefresh.yml + vars: + index_pattern: heartbeat-* diff --git a/elk_metrics_7x/installJournalbeat.yml b/elk_metrics_7x/installJournalbeat.yml new file mode 100644 index 00000000..9d239795 --- /dev/null +++ b/elk_metrics_7x/installJournalbeat.yml @@ -0,0 +1,102 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Detect journalbeat host deployment group(s) + hosts: all + gather_facts: false + connection: local + tasks: + - name: Add hosts to dynamic inventory group + group_by: + key: journalbeat_deployment_containers + parents: all_journalbeat_deployments + when: + - openstack_release is defined and + openstack_release is version('18.0.0', 'lt') + - physical_host is defined and + physical_host != inventory_hostname + + - name: Add hosts to dynamic inventory group + group_by: + key: journalbeat_deployment_hosts + parents: all_journalbeat_deployments + when: + - physical_host is undefined or + physical_host == inventory_hostname + + tags: + - always + +- name: Install Journalbeat + hosts: all_journalbeat_deployments + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + pre_tasks: + - name: Check for journal directory + stat: + path: /var/log/journal + register: journal_dir + tags: + - always + + - name: Halt this playbook if no journal is found + meta: end_play + when: + - not (journal_dir.stat.exists | bool) or + (ansible_service_mgr != 'systemd') + + roles: + - role: elastic_journalbeat + + tags: + - beat-install + +- name: Setup journalbeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + pre_tasks: + - name: Check for journal directory + stat: + path: /var/log/journal + register: journal_dir + tags: + - always + + - name: Halt this playbook if no journal is found + meta: end_play + when: + - not (journal_dir.stat.exists | bool) or + (ansible_service_mgr != 'systemd') + + roles: + - role: elastic_rollup + index_name: journalbeat + when: + - elastic_create_rollup | bool + tags: + - journalbeat diff --git a/elk_metrics_7x/installKibana.yml b/elk_metrics_7x/installKibana.yml new file mode 100644 index 00000000..087160c1 --- /dev/null +++ b/elk_metrics_7x/installKibana.yml @@ -0,0 +1,26 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Kibana + hosts: kibana + become: true + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_kibana + + tags: + - server-install diff --git a/elk_metrics_7x/installLogstash.yml b/elk_metrics_7x/installLogstash.yml new file mode 100644 index 00000000..02a3e6d9 --- /dev/null +++ b/elk_metrics_7x/installLogstash.yml @@ -0,0 +1,26 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Logstash + hosts: elastic-logstash + become: true + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_logstash + + tags: + - server-install diff --git a/elk_metrics_7x/installMetricbeat.yml b/elk_metrics_7x/installMetricbeat.yml new file mode 100644 index 00000000..66e2df43 --- /dev/null +++ b/elk_metrics_7x/installMetricbeat.yml @@ -0,0 +1,52 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Metricsbeat + hosts: all + become: true + vars: + haproxy_ssl: false + + environment: "{{ deployment_environment_variables | default({}) }}" + + vars_files: + - vars/variables.yml + + roles: + - role: elastic_metricbeat + + tags: + - beat-install + +- name: Setup metricbeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: metricbeat + when: + - elastic_create_rollup | bool + tags: + - metricbeat + +- import_playbook: fieldRefresh.yml + vars: + index_pattern: metricbeat-* diff --git a/elk_metrics_7x/installMonitorStack.yml b/elk_metrics_7x/installMonitorStack.yml new file mode 100644 index 00000000..ca4320a2 --- /dev/null +++ b/elk_metrics_7x/installMonitorStack.yml @@ -0,0 +1,297 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Detect monitorstack host deployment group(s) + hosts: "hosts:all_containers" + gather_facts: false + connection: local + tasks: + - name: Add hosts to dynamic inventory group + group_by: + key: monitorstack_deployment + parents: monitorstack_all + when: + - inventory_hostanme in ( + (groups['nova_compute'] | default([])) | + union(groups['utility_all'] | default([])) | + union(groups['memcached_all'] | default([])) | + union(groups['memcached_all'] | default([])) + ) + - ansible_service_mgr == 'systemd' + tags: + - always + +- name: Install MonitorStack + hosts: monitorstack_all + become: true + gather_facts: true + vars: + haproxy_ssl: false + monitorstack_distro_packages: + ubuntu: + - gcc + - git + - python-dev + - pkg-config + redhat: + - gcc + - git + - python-devel + suse: + - gcc + - git + - python-devel + - pkg-config + + monitorstack_config_enabled: + - check: kvm + options: '' + condition: >- + {{ + inventory_hostname in (groups['nova_compute'] | default([])) + }} + - check: memcache + options: >- + --host {{ (monitorstack_memcached_access.stdout_lines[0] | default("127.0.0.1:11211")).split(":")[0] }} + --port {{ (monitorstack_memcached_access.stdout_lines[0] | default("127.0.0.1:11211")).split(":")[1] }} + condition: >- + {{ + inventory_hostname in (groups['memcached_all'] | default([])) + }} + - check: os_block_pools_totals + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_block_pools_usage + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_quota_cores + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_quota_instance + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_quota_ram + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_used_cores + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_used_disk + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_used_instance + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: os_vm_used_ram + options: '' + condition: >- + {{ + (clouds_config.stat.exists | bool) and + (inventory_hostname in (groups['utility_all'] | default([]))) and + (inventory_hostname == (groups['utility_all'] | default([null]))[0]) + }} + - check: uptime + options: '' + condition: true + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_data_hosts + + post_tasks: + - name: Find clouds config + stat: + path: "{{ ansible_env.HOME }}/.config/openstack/clouds.yaml" + register: clouds_config + + - name: Find openstack release + stat: + path: "/etc/openstack-release" + register: openstack_release + + - name: Find osp release + stat: + path: "/etc/rhosp-release" + register: rhosp_release + + - name: MonitorStack block + when: + - (openstack_release.stat.exists | bool) or + (rhosp_release.stat.exists | bool) + block: + - name: Ensure disto packages are installed + package: + name: "{{ monitorstack_distro_packages[(ansible_distribution | lower)] }}" + state: "{{ monitorstack_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + + - name: Refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - always + + - name: create the system group + group: + name: "monitorstack" + state: "present" + system: "yes" + + - name: Create the monitorstack system user + user: + name: "monitorstack" + group: "monitorstack" + comment: "monitorstack user" + shell: "/bin/false" + createhome: "yes" + home: "/var/lib/monitorstack" + + - name: Create monitorstack data path + file: + path: "{{ item }}" + state: directory + owner: "monitorstack" + group: "monitorstack" + mode: "0750" + recurse: true + with_items: + - "/var/lib/monitorstack" + - "/var/lib/monitorstack/.config" + - "/var/lib/monitorstack/.config/openstack" + - "/var/lib/monitorstack/venv" + - "/var/log/monitorstack" + - "/etc/monitorstack" + + - name: Copy the clouds config into monitorstack + copy: + src: "{{ ansible_env.HOME }}/.config/openstack/clouds.yaml" + dest: "/var/lib/monitorstack/.config/openstack/clouds.yaml" + remote_src: yes + when: + - clouds_config.stat.exists | bool + + - name: Create the virtualenv (if it does not exist) + command: "virtualenv --no-setuptools --system-site-packages /var/lib/monitorstack/venv" + args: + creates: "/var/lib/monitorstack/venv/bin/activate" + + - name: Setup venv + pip: + name: + - pip + - setuptools + virtualenv_site_packages: yes + extra_args: "-U" + virtualenv: "/var/lib/monitorstack/venv" + + - name: Ensure monitorstack is installed + pip: + name: "git+https://github.com/openstack/monitorstack@{{ monitorstack_release | default('master') }}" + state: "{{ monitorstack_package_state | default('present') }}" + extra_args: --isolated + virtualenv: /var/lib/monitorstack/venv + register: _pip_task + until: _pip_task is success + retries: 3 + delay: 2 + tags: + - package_install + + - name: Create montiorstack config + copy: + dest: "/etc/monitorstack/monitorstack.ini" + content: | + [elasticsearch] + hosts = {{ elasticsearch_data_hosts | join(',') }} + port = {{ elastic_port }} + + - name: Run memcached port scan + shell: "ss -ntlp | awk '/11211/ {print $4}'" + register: monitorstack_memcached_access + changed_when: false + + - name: Run the systemd service role + include_role: + name: systemd_service + vars: + systemd_user_name: monitorstack + systemd_group_name: monitorstack + systemd_services: |- + {% set services = [] %} + {% for item in monitorstack_config_enabled %} + {% if item.condition | bool %} + {% + set check = { + "service_name": ("monitorstack-" ~ item.check), + "execstarts": ("/var/lib/monitorstack/venv/bin/monitorstack --format elasticsearch --config-file /etc/monitorstack/monitorstack.ini " ~ item.check ~ ' ' ~ item.options), + "timer": { + "state": "started", + "options": { + "OnBootSec": "5min", + "OnUnitActiveSec": "10m", + "Persistent": true + } + } + } + %} + {% set _ = services.append(check) %} + {% endif %} + {% endfor %} + {{ services }} + + tags: + - beat-install diff --git a/elk_metrics_7x/installPacketbeat.yml b/elk_metrics_7x/installPacketbeat.yml new file mode 100644 index 00000000..b83b2b7f --- /dev/null +++ b/elk_metrics_7x/installPacketbeat.yml @@ -0,0 +1,52 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Packetbeat + hosts: hosts + become: true + vars: + haproxy_ssl: false + + environment: "{{ deployment_environment_variables | default({}) }}" + + vars_files: + - vars/variables.yml + + roles: + - role: elastic_packetbeat + + tags: + - beat-install + +- name: Setup packetbeat rollup + hosts: elastic-logstash[0] + become: true + vars: + haproxy_ssl: false + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_rollup + index_name: packetbeat + when: + - elastic_create_rollup | bool + tags: + - packetbeat + +- import_playbook: fieldRefresh.yml + vars: + index_pattern: packetbeat-* diff --git a/elk_metrics_7x/roles/elastic_apm_server/defaults/main.yml b/elk_metrics_7x/roles/elastic_apm_server/defaults/main.yml new file mode 100644 index 00000000..7b38493e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/defaults/main.yml @@ -0,0 +1,19 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# APM vars +apm_interface: 0.0.0.0 +apm_port: 8200 +apm_token: SuperSecrete diff --git a/elk_metrics_7x/roles/elastic_apm_server/handlers/main.yml b/elk_metrics_7x/roles/elastic_apm_server/handlers/main.yml new file mode 100644 index 00000000..b61ae69b --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart apm-server (systemd) + systemd: + name: "apm-server" + enabled: true + state: restarted + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart apm server + +- name: Enable and restart apm-server (upstart) + service: + name: "apm-server" + state: restarted + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart apm server diff --git a/elk_metrics_7x/roles/elastic_apm_server/meta/main.yml b/elk_metrics_7x/roles/elastic_apm_server/meta/main.yml new file mode 100644 index 00000000..4191ac34 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x apm-server role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_apm_server/tasks/main.yml b/elk_metrics_7x/roles/elastic_apm_server/tasks/main.yml new file mode 100644 index 00000000..835c5c8d --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/tasks/main.yml @@ -0,0 +1,81 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure apm-server is installed + package: + name: "{{ apm_server_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart apm server + tags: + - package_install + +- name: Create apm-server systemd service config dir + file: + path: "/etc/systemd/system/apm-server.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "/etc/systemd/system/apm-server.service.d/{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "apm-server-overrides.conf" + notify: + - Enable and restart apm server + +- name: Drop apm-server conf file + template: + src: "apm-server.yml.j2" + dest: "/etc/apm-server/apm-server.yml" + notify: + - Enable and restart apm server + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "apm-server" + +- name: Force beat handlers + meta: flush_handlers diff --git a/elk_metrics_7x/roles/elastic_apm_server/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_apm_server/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 b/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 new file mode 100644 index 00000000..b0dfaf3f --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/templates/apm-server.yml.j2 @@ -0,0 +1,145 @@ +{% import 'templates/_macros.j2' as elk_macros %} +######################## APM Server Configuration ############################# + +############################# APM Server ###################################### + +apm-server: + # Defines the host and port the server is listening on + host: "{{ apm_interface }}:{{ apm_port }}" + + # Maximum permitted size in bytes of an unzipped request accepted by the server to be processed. + #max_unzipped_size: 52428800 + # Maximum permitted size in bytes of a request's header accepted by the server to be processed. + #max_header_size: 1048576 + + # Maximum permitted duration in seconds for reading an entire request. + #read_timeout: 2s + # Maximum permitted duration in seconds for writing a response. + #write_timeout: 2s + + # Maximum duration in seconds before releasing resources when shutting down the server. + #shutdown_timeout: 5s + + # Maximum number of requests permitted to be sent to the server concurrently. + #concurrent_requests: 40 + + # Authorization token to be checked. If a token is set here the agents must + # send their token in the following format: Authorization: Bearer . + # It is recommended to use an authorization token in combination with SSL enabled. + secret_token: {{ apm_token }} + #ssl.enabled: false + #ssl.certificate : "path/to/cert" + #ssl.key : "path/to/private_key" + + # Please be aware that frontend support is an experimental feature at the moment! + frontend: + # To enable experimental frontend support set this to true. + enabled: true + + # Rate limit per second and IP address for requests sent to the frontend endpoint. + #rate_limit: 10 + + # Comma separated list of permitted origins for frontend. User-agents will send + # a origin header that will be validated against this list. + # An origin is made of a protocol scheme, host and port, without the url path. + # Allowed origins in this setting can have * to match anything (eg.: http://*.example.com) + # If an item in the list is a single '*', everything will be allowed + #allow_origins : ['*'] + + # Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes. + # If the regexp matches, the stacktrace frame is considered to be a library frame. + #library_pattern: "node_modules|bower_components|~" + + # Regexp to be matched against a stacktrace frame's `file_name`. + # If the regexp matches, the stacktrace frame is not used for calculating error groups. + # The default pattern excludes stacktrace frames that have + # - a filename starting with '/webpack' + #exclude_from_grouping: "^/webpack" + + # If a source map has previously been uploaded, source mapping is automatically applied + # to all error and transaction documents sent to the frontend endpoint. + #source_mapping: + + # Source maps are are fetched from Elasticsearch and then kept in an in-memory cache for a certain time. + # The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch. + # Note that values configured without a time unit will be interpreted as seconds. + #cache: + #expiration: 5m + + # Source maps are stored in the same index as transaction and error documents. + # If the default index pattern at 'outputs.elasticsearch.index' is changed, + # a matching index pattern needs to be specified here. + #index_pattern: "apm-*" + +#================================ General ====================================== + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # A value of 0 (the default) ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_elasticsearch(inventory_hostname, elasticsearch_data_hosts) }} + +#================================= Paths ====================================== + +# The home path for the apm-server installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the apm-server installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the apm-server installation. This is the default base path +# for all the files in which apm-server needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a apm-server installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('apm') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('apm', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#============================== Kibana ===================================== +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('apm-server') }} diff --git a/elk_metrics_7x/roles/elastic_apm_server/vars/redhat.yml b/elk_metrics_7x/roles/elastic_apm_server/vars/redhat.yml new file mode 100644 index 00000000..3fac8c52 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apm_server_distro_packages: + - apm-server diff --git a/elk_metrics_7x/roles/elastic_apm_server/vars/suse.yml b/elk_metrics_7x/roles/elastic_apm_server/vars/suse.yml new file mode 100644 index 00000000..3fac8c52 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apm_server_distro_packages: + - apm-server diff --git a/elk_metrics_7x/roles/elastic_apm_server/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_apm_server/vars/ubuntu.yml new file mode 100644 index 00000000..3fac8c52 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_apm_server/vars/ubuntu.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apm_server_distro_packages: + - apm-server diff --git a/elk_metrics_7x/roles/elastic_auditbeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_auditbeat/defaults/main.yml new file mode 100644 index 00000000..25ab8922 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Vexxhost, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +auditbeat_service_state: restarted diff --git a/elk_metrics_7x/roles/elastic_auditbeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_auditbeat/handlers/main.yml new file mode 100644 index 00000000..bf751755 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart auditbeat (systemd) + systemd: + name: "auditbeat" + enabled: true + state: "{{ auditbeat_service_state }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart auditbeat + +- name: Enable and restart auditbeat (upstart) + service: + name: "auditbeat" + state: "{{ auditbeat_service_state }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart auditbeat diff --git a/elk_metrics_7x/roles/elastic_auditbeat/meta/main.yml b/elk_metrics_7x/roles/elastic_auditbeat/meta/main.yml new file mode 100644 index 00000000..9faaba56 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x auditbeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_auditbeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_auditbeat/tasks/main.yml new file mode 100644 index 00000000..2e6cf744 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/tasks/main.yml @@ -0,0 +1,112 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure beat is installed (x86_64) + package: + name: "{{ auditbeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + when: + - ansible_architecture == 'x86_64' + notify: + - Enable and restart auditbeat + tags: + - package_install + +- name: Ensure beat is installed (aarch64) + apt: + deb: 'https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/8709ca2640344a4ba85cba0a1d6eea69/aarch64/auditbeat-6.5.0-arm64.deb' + when: + - ansible_pkg_mgr == 'apt' + - ansible_architecture == 'aarch64' + notify: + - Enable and restart auditbeat + tags: + - package_install + +- name: Create auditbeat systemd service config dir + file: + path: "/etc/systemd/system/auditbeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "/etc/systemd/system/auditbeat.service.d/{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "auditbeat-overrides.conf" + notify: + - Enable and restart auditbeat + +- name: Drop auditbeat conf file + template: + src: templates/auditbeat.yml.j2 + dest: /etc/auditbeat/auditbeat.yml + notify: + - Enable and restart auditbeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "auditbeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set auditbeat service state (upstart) + service: + name: "auditbeat" + state: "{{ auditbeat_service_state }}" + enabled: "{{ auditbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - auditbeat_service_state in ['started', 'stopped'] + +- name: set auditbeat service state (systemd) + systemd: + name: "auditbeat" + state: "{{ auditbeat_service_state }}" + enabled: "{{ auditbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - auditbeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_auditbeat/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_auditbeat/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 b/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 new file mode 100644 index 00000000..c0c164e2 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/templates/auditbeat.yml.j2 @@ -0,0 +1,845 @@ +{% import 'templates/_macros.j2' as elk_macros %} +########################## Auditbeat Configuration ############################# + +# This is a reference configuration file documenting all non-deprecated options +# in comments. For a shorter configuration example that contains only the most +# common options, please see auditbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/auditbeat/index.html + +#============================ Config Reloading ================================ + +# Config reloading allows to dynamically load modules. Each file which is +# monitored must contain one or multiple modules as a list. +auditbeat.config.modules: + + # Glob pattern for configuration reloading + path: ${path.config}/conf.d/*.yml + + # Period on which files under path should be checked for changes + reload.period: 60s + + # Set to true to enable config reloading + reload.enabled: true + +# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# disable startup delay. +auditbeat.max_start_delay: 10s + +#========================== Modules configuration ============================= +auditbeat.modules: + +# The auditd module collects events from the audit framework in the Linux +# kernel. You need to specify audit rules for the events that you want to audit. +- module: auditd +{% if ansible_kernel is version_compare('4.4', '>=') %} + socket_type: {{ (apply_security_hardening | default(true) | bool) | ternary('multicast', 'unicast') }} +{% endif %} + resolve_ids: true + failure_mode: silent + backlog_limit: 8196 + rate_limit: 0 + include_raw_message: false + include_warnings: true + +{% if not apply_security_hardening | default(true) | bool %} + audit_rule_files: + - '${path.config}/audit.rules.d/*.conf' + - '/etc/audit/rules.d/*.rules' + audit_rules: | + ## Define audit rules here. + ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these + ## examples or add your own rules. + + ## If you are on a 64 bit platform, everything should be running + ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls + ## because this might be a sign of someone exploiting a hole in the 32 + ## bit API. + -a always,exit -F arch=b32 -S all -F key=32bit-abi + + ## Executions. + -a always,exit -F arch=b64 -S execve,execveat -k exec + + # Things that affect identity. + -w /etc/group -p wa -k identity + -w /etc/passwd -p wa -k identity + -w /etc/gshadow -p wa -k identity + -w /etc/shadow -p wa -k identity + + # Unauthorized access attempts to files (unsuccessful). + -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access + -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access + -a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -F key=access + -a always,exit -F arch=b64 -S open,truncate,ftruncate,creat,openat,open_by_handle_at -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -F key=access + +{% endif %} + +# The file integrity module sends events when files are changed (created, +# updated, deleted). The events contain file metadata and hashes. +- module: file_integrity + paths: + - /bin + - /etc/ansible/roles + - /etc/apt + - /etc/apache2 + - /etc/httpd + - /etc/network + - /etc/nginx + - /etc/mysql + - /etc/openstack_deploy + - /etc/sysconfig + - /etc/systemd + - /etc/uwsgi + - /etc/yum + - /etc/zypp + - /openstack/venvs + - /opt/openstack-ansible + - /sbin + - /usr/bin + - /usr/local/bin + - /usr/sbin + - /var/lib/lxc + + # List of regular expressions to filter out notifications for unwanted files. + # Wrap in single quotes to workaround YAML escaping rules. By default no files + # are ignored. + exclude_files: + - '(?i)\.sw[nop]$' + - '~$' + - '/\.git($|/)' + + # Scan over the configured file paths at startup and send events for new or + # modified files since the last time Auditbeat was running. + scan_at_start: true + + # Average scan rate. This throttles the amount of CPU and I/O that Auditbeat + # consumes at startup while scanning. Default is "50 MiB". + scan_rate_per_sec: 64 MiB + + # Limit on the size of files that will be hashed. Default is "100 MiB". + # Limit on the size of files that will be hashed. Default is "100 MiB". + max_file_size: 128 MiB + + # Hash types to compute when the file changes. Supported types are + # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384, + # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384 and sha3_512. + # Default is sha1. + hash_types: [sha1] + + # Detect changes to files included in subdirectories. Disabled by default. + recursive: true + +# The system module collects security related information about a host. +# All datasets send both periodic state information (e.g. all currently +# running processes) and real-time changes (e.g. when a new process starts +# or stops). +- module: system + datasets: + - host # General host information, e.g. uptime, IPs + - process # Started and stopped processes + - socket # Opened and closed sockets + - user # User information + + # How often datasets send state updates with the + # current state of the system (e.g. all currently + # running processes, all open sockets). + state.period: 12h + + # The state.period can be overridden for any dataset. + # host.state.period: 12h + # process.state.period: 12h + # socket.state.period: 12h + # user.state.period: 12h + + # Enabled by default. Auditbeat will read password fields in + # /etc/passwd and /etc/shadow and store a hash locally to + # detect any changes. + user.detect_password_changes: true + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +processors: +- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using auditbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "auditbeat" plus date + # and generates [auditbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "auditbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version auditbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is auditbeat. + #key: auditbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/auditbeat" + + # Name of the generated files. The default is `auditbeat` and it generates + # files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc. + #filename: auditbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every auditbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the auditbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the auditbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the auditbeat installation. This is the default base path +# for all the files in which auditbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a auditbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('auditbeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('auditbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#============================== Kibana ===================================== +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('auditbeat') }} + +#============================== Xpack Monitoring ===================================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/elk_metrics_7x/roles/elastic_auditbeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_auditbeat/vars/redhat.yml new file mode 100644 index 00000000..59cc129c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/vars/redhat.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +auditbeat_distro_packages: + - audispd-plugins + - auditbeat diff --git a/elk_metrics_7x/roles/elastic_auditbeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_auditbeat/vars/suse.yml new file mode 100644 index 00000000..4144a896 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/vars/suse.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +auditbeat_distro_packages: + - audit-audispd-plugins + - auditbeat diff --git a/elk_metrics_7x/roles/elastic_auditbeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_auditbeat/vars/ubuntu.yml new file mode 100644 index 00000000..59cc129c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_auditbeat/vars/ubuntu.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +auditbeat_distro_packages: + - audispd-plugins + - auditbeat diff --git a/elk_metrics_7x/roles/elastic_beat_setup/defaults/main.yml b/elk_metrics_7x/roles/elastic_beat_setup/defaults/main.yml new file mode 100644 index 00000000..998b689f --- /dev/null +++ b/elk_metrics_7x/roles/elastic_beat_setup/defaults/main.yml @@ -0,0 +1,30 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Each setup flag is run one at a time. +elastic_setup_flags: + - "--template" + - "--pipelines" + # - "--dashboards" + +# Setup options are cast as a string with, one option per line. +elastic_beat_setup_options: >- + -E 'output.logstash.enabled=false' + -E 'output.elasticsearch.hosts={{ coordination_nodes | to_json }}' + -E 'setup.template.enabled=true' + -E 'setup.template.overwrite=true' + +# The node defined here will be used with the environment variable, "no_proxy". +elastic_beat_kibana_host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}" diff --git a/elk_metrics_7x/roles/elastic_beat_setup/meta/main.yml b/elk_metrics_7x/roles/elastic_beat_setup/meta/main.yml new file mode 100644 index 00000000..c529fd00 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_beat_setup/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x repositories role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - beats + - elastic-beats + - elasticsearch + - elastic-stack +dependencies: [] diff --git a/elk_metrics_7x/roles/elastic_beat_setup/tasks/main.yml b/elk_metrics_7x/roles/elastic_beat_setup/tasks/main.yml new file mode 100644 index 00000000..2a359510 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_beat_setup/tasks/main.yml @@ -0,0 +1,72 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Check for coordination_nodes var + fail: + msg: >- + To use this role the variable `coordination_nodes` must be defined. + when: + - coordination_nodes is undefined + +- name: Check for elastic_beat_name var + fail: + msg: >- + To use this role the variable `elastic_beat_name` must be defined. + when: + - elastic_beat_name is undefined + +- name: Refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - always + +- name: Load templates + shell: >- + {% if item == '--dashboards' %} + sed -i 's@\\\"index\\\": \\\"{{ elastic_beat_name }}-\*\\\"@\\\"index\\\": \\\"{{ elastic_beat_name }}\\\"@g' /usr/share/{{ elastic_beat_name }}/kibana/6/dashboard/*.json + sed -i 's@"id": "{{ elastic_beat_name }}\-\*",@"id": "{{ elastic_beat_name }}",@g' /usr/share/{{ elastic_beat_name }}/kibana/6/index-pattern/*.json + {% endif %} + {{ elastic_beat_name }} setup + {{ item }} + {{ elastic_beat_setup_options }} + -e -v + with_items: "{{ elastic_setup_flags }}" + register: templates + environment: + no_proxy: "{{ elastic_beat_kibana_host }}" + until: templates is success + retries: 5 + delay: 5 + run_once: true + when: + - ((ansible_local['elastic']['setup'][elastic_beat_name + '_loaded_templates'] is undefined) or + (not (ansible_local['elastic']['setup'][elastic_beat_name + '_loaded_templates'] | bool))) or + ((elk_package_state | default('present')) == "latest") or + (elk_beat_setup | default(false) | bool) + tags: + - setup + +- name: Set template fact + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "setup" + option: "{{ elastic_beat_name + '_loaded_templates' }}" + value: true + when: + - templates is changed + tags: + - setup diff --git a/elk_metrics_7x/roles/elastic_curator/handlers/main.yml b/elk_metrics_7x/roles/elastic_curator/handlers/main.yml new file mode 100644 index 00000000..ef153bdc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/handlers/main.yml @@ -0,0 +1,25 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart curator.timer + systemd: + name: "curator.timer" + enabled: true + state: restarted + when: + - (elk_package_state | default('present')) != 'absent' + - ansible_service_mgr == 'systemd' + tags: + - config diff --git a/elk_metrics_7x/roles/elastic_curator/meta/main.yml b/elk_metrics_7x/roles/elastic_curator/meta/main.yml new file mode 100644 index 00000000..c1e7ffbe --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/meta/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x curator role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_retention diff --git a/elk_metrics_7x/roles/elastic_curator/tasks/curator_systemd.yml b/elk_metrics_7x/roles/elastic_curator/tasks/curator_systemd.yml new file mode 100644 index 00000000..3c2b17ec --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/tasks/curator_systemd.yml @@ -0,0 +1,46 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Run the systemd service role + include_role: + name: systemd_service + vars: + systemd_service_enabled: "{{ ((elk_package_state | default('present')) != 'absent') | ternary(true, false) }}" + systemd_service_restart_changed: false + systemd_user_name: curator + systemd_group_name: curator + systemd_services: + - service_name: "curator" + execstarts: + - /opt/elasticsearch-curator/bin/curator + --config /var/lib/curator/curator.yml + /var/lib/curator/actions-age.yml + timer: + state: "started" + options: + OnBootSec: 30min + OnUnitActiveSec: 12h + Persistent: true + - service_name: "curator-size" + execstarts: + - /opt/elasticsearch-curator/bin/curator + --config /var/lib/curator/curator.yml + /var/lib/curator/actions-size.yml + timer: + state: "started" + options: + OnBootSec: 30min + OnUnitActiveSec: 1h + Persistent: true diff --git a/elk_metrics_7x/roles/elastic_curator/tasks/curator_upstart.yml b/elk_metrics_7x/roles/elastic_curator/tasks/curator_upstart.yml new file mode 100644 index 00000000..3a3d1d49 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/tasks/curator_upstart.yml @@ -0,0 +1,32 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Create cron job for curator (age) + cron: + name: "Run curator" + minute: "0" + hour: "1" + user: "curator" + job: "/opt/elasticsearch-curator/bin/curator --config /var/lib/curator/curator.yml /var/lib/curator/actions-age.yml" + cron_file: "elasticsearch-curator" + +- name: Create cron job for curator (size) + cron: + name: "Run curator" + minute: "0" + hour: "*/5" + user: "curator" + job: "/opt/elasticsearch-curator/bin/curator --config /var/lib/curator/curator.yml /var/lib/curator/actions-size.yml" + cron_file: "elasticsearch-curator" diff --git a/elk_metrics_7x/roles/elastic_curator/tasks/main.yml b/elk_metrics_7x/roles/elastic_curator/tasks/main.yml new file mode 100644 index 00000000..1c014d3e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/tasks/main.yml @@ -0,0 +1,103 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - always + +- name: Ensure virtualenv is installed + package: + name: "{{ curator_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + tags: + - package_install + +- name: Create the virtualenv (if it does not exist) + command: "virtualenv --never-download --no-site-packages /opt/elasticsearch-curator" + args: + creates: "/opt/elasticsearch-curator/bin/activate" + +- name: Ensure curator is installed + pip: + name: "elasticsearch-curator<6" + state: "{{ elk_package_state | default('present') }}" + extra_args: --isolated + virtualenv: /opt/elasticsearch-curator + register: _pip_task + until: _pip_task is success + retries: 3 + delay: 2 + tags: + - package_install + +- name: create the system group + group: + name: "curator" + state: "present" + system: "yes" + +- name: Create the curator system user + user: + name: "curator" + group: "curator" + comment: "curator user" + shell: "/bin/false" + createhome: "yes" + home: "/var/lib/curator" + +- name: Create curator data path + file: + path: "{{ item }}" + state: directory + owner: "curator" + group: "curator" + mode: "0755" + recurse: true + with_items: + - "/var/lib/curator" + - "/var/log/curator" + - "/etc/curator" + +- name: Drop curator conf file(s) + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "curator.yml.j2" + dest: /var/lib/curator/curator.yml + - src: "curator-actions-age.yml.j2" + dest: /var/lib/curator/actions-age.yml + - src: "curator-actions-size.yml.j2" + dest: /var/lib/curator/actions-size.yml + notify: + - Enable and restart curator.timer + +- include_tasks: "curator_{{ ansible_service_mgr }}.yml" diff --git a/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-age.yml.j2 b/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-age.yml.j2 new file mode 100644 index 00000000..d3e5e95b --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-age.yml.j2 @@ -0,0 +1,65 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{% set action_items = [] -%} +{# Delete index loop #} +{% for key in (ansible_local['elastic']['retention']['elastic_beat_retention_policy_keys'] | from_yaml) -%} +{% set delete_indices = {} -%} +{# Total retention size in days #} +{% set _index_retention = ansible_local['elastic']['retention']['elastic_' + key + '_retention'] -%} +{% set index_retention = ((_index_retention | int) > 0) | ternary(_index_retention, 1) | int %} +{% set _ = delete_indices.update( + { + 'action': 'delete_indices', + 'description': 'Prune indices for ' + key + ' after ' ~ index_retention ~ ' days', + 'options': { + 'ignore_empty_list': true, + 'disable_action': false + } + } + ) +-%} +{% set filters = [] -%} +{% set _ = filters.append( + { + 'filtertype': 'pattern', + 'kind': 'prefix', + 'value': key + } + ) +-%} +{% set _ = filters.append( + { + 'filtertype': 'age', + 'source': 'name', + 'direction': 'older', + 'timestring': '%Y.%m.%d', + 'unit': 'days', + 'unit_count': index_retention + } + ) +-%} +{% set _ = delete_indices.update({'filters': filters}) -%} +{% set _ = action_items.append(delete_indices) -%} +{% endfor -%} + +{% set actions = {} -%} +{% for action_item in action_items -%} +{% set _ = actions.update({loop.index: action_item}) -%} +{% endfor -%} + +{# Render all actions #} +{% set curator_actions = {'actions': actions} -%} +{{ curator_actions | to_nice_yaml(indent=2) }} diff --git a/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-size.yml.j2 b/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-size.yml.j2 new file mode 100644 index 00000000..86cb6527 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/templates/curator-actions-size.yml.j2 @@ -0,0 +1,63 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{% set action_items = [] -%} +{# Delete index loop #} +{% for key in (ansible_local['elastic']['retention']['elastic_beat_retention_policy_keys'] | from_yaml) -%} +{% set delete_indices = {} -%} +{# Total retention size in gigabytes #} +{% set _index_size = ((ansible_local['elastic']['retention']['elastic_' + key + '_size'] | int) // 1024) -%} +{% set index_size = ((_index_size | int) > 0) | ternary(_index_size, 1) | int %} +{% set _ = delete_indices.update( + { + 'action': 'delete_indices', + 'description': 'Prune indices for ' + key + ' after index is > ' ~ index_size ~ 'gb', + 'options': { + 'ignore_empty_list': true, + 'disable_action': false + } + } + ) +-%} +{% set filters = [] -%} +{% set _ = filters.append( + { + 'filtertype': 'pattern', + 'kind': 'prefix', + 'value': key + } + ) +-%} +{% set _ = filters.append( + { + 'filtertype': 'space', + 'disk_space': index_size, + 'use_age': true, + 'source': 'creation_date' + } + ) +-%} +{% set _ = delete_indices.update({'filters': filters}) -%} +{% set _ = action_items.append(delete_indices) -%} +{% endfor -%} + +{% set actions = {} -%} +{% for action_item in action_items -%} +{% set _ = actions.update({loop.index: action_item}) -%} +{% endfor -%} + +{# Render all actions #} +{% set curator_actions = {'actions': actions} -%} +{{ curator_actions | to_nice_yaml(indent=2) }} diff --git a/elk_metrics_7x/roles/elastic_curator/templates/curator.yml.j2 b/elk_metrics_7x/roles/elastic_curator/templates/curator.yml.j2 new file mode 100644 index 00000000..ff0754ee --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/templates/curator.yml.j2 @@ -0,0 +1,32 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +client: + hosts: + - {{ ansible_host }} + port: {{ elastic_port }} + url_prefix: "" + use_ssl: false + ssl_no_validate: true + http_auth: "" + timeout: 120 + master_only: true + +logging: + loglevel: INFO + logfile: /var/log/curator/curator + logformat: default + blacklist: + - elasticsearch + - urllib3 diff --git a/elk_metrics_7x/roles/elastic_curator/vars/redhat.yml b/elk_metrics_7x/roles/elastic_curator/vars/redhat.yml new file mode 100644 index 00000000..a3eb703e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +curator_distro_packages: + - python-virtualenv diff --git a/elk_metrics_7x/roles/elastic_curator/vars/suse.yml b/elk_metrics_7x/roles/elastic_curator/vars/suse.yml new file mode 100644 index 00000000..a3eb703e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +curator_distro_packages: + - python-virtualenv diff --git a/elk_metrics_7x/roles/elastic_curator/vars/ubuntu-14.04.yml b/elk_metrics_7x/roles/elastic_curator/vars/ubuntu-14.04.yml new file mode 100644 index 00000000..a3eb703e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/vars/ubuntu-14.04.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +curator_distro_packages: + - python-virtualenv diff --git a/elk_metrics_7x/roles/elastic_curator/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_curator/vars/ubuntu.yml new file mode 100644 index 00000000..363a2210 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_curator/vars/ubuntu.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +curator_distro_packages: + - python-virtualenv + - virtualenv diff --git a/elk_metrics_7x/roles/elastic_data_hosts/defaults/main.yml b/elk_metrics_7x/roles/elastic_data_hosts/defaults/main.yml new file mode 100644 index 00000000..6e2353ee --- /dev/null +++ b/elk_metrics_7x/roles/elastic_data_hosts/defaults/main.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This interface is used to determine cluster recovery speed. +elastic_data_interface: "{{ ansible_default_ipv4['alias'] }}" diff --git a/elk_metrics_7x/roles/elastic_data_hosts/meta/main.yml b/elk_metrics_7x/roles/elastic_data_hosts/meta/main.yml new file mode 100644 index 00000000..c7d684e6 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_data_hosts/meta/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x data hosts role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: [] diff --git a/elk_metrics_7x/roles/elastic_data_hosts/tasks/main.yml b/elk_metrics_7x/roles/elastic_data_hosts/tasks/main.yml new file mode 100644 index 00000000..5f08eb6c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_data_hosts/tasks/main.yml @@ -0,0 +1,41 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Refresh minimal facts + setup: + gather_subset: '!all,!any,network,virtual' + tags: + - always + +- name: Load data node variables + include_vars: "data-node-variables.yml" + tags: + - always + +- name: Ensure local facts directory exists + file: + dest: "/etc/ansible/facts.d" + state: directory + group: "root" + owner: "root" + mode: "0755" + recurse: no + +- name: Initialize local facts + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "setup" + option: cacheable + value: true diff --git a/elk_metrics_7x/roles/elastic_data_hosts/vars/data-node-variables.yml b/elk_metrics_7x/roles/elastic_data_hosts/vars/data-node-variables.yml new file mode 100644 index 00000000..2354daaa --- /dev/null +++ b/elk_metrics_7x/roles/elastic_data_hosts/vars/data-node-variables.yml @@ -0,0 +1,204 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# storage node count is equal to the cluster size +storage_node_count: "{{ groups['elastic-logstash'] | length }}" + +# the elasticserch cluster elects one master from all those which are marked as master-eligible +# 1 node cluster can only have one master +# 2 node clusters have 1 master-eligable nodes to avoid split-brain +# 3 node clusters have 3 master-eligable nodes +# >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number +elastic_master_node_count: |- + {% set masters = 0 %} + {% if (storage_node_count | int) < 3 %} + {% set masters = 1 %} + {% elif (storage_node_count | int) == 3 %} + {% set masters = 3 %} + {% else %} + {% set masters = (storage_node_count | int ) // 2 %} + {% if ((masters | int) % 2 == 0) %} + {% set masters = (masters | int) + 1 %} + {% endif %} + {% endif %} + {{ masters }} + +# Assign node roles +# the first 'elastic_master_node_count' hosts in groups['elastic-logstash'] become master-eligible nodes +# the first 'elastic_master_node_count' and subsequent alternate hosts in groups['elastic-logstash'] becomes data nodes +## While the data node group is dynamically chosen the override +## `elasticsearch_node_data` can be used to override the node type. +## Dynamic node inclusion will still work for all other nodes in the group. +_data_nodes: "{{ (groups['elastic-logstash'][:elastic_master_node_count | int] | union(groups['elastic-logstash'][elastic_master_node_count | int::2])) }}" +data_nodes: |- + {% set nodes = [] %} + {% for node in groups['elastic-logstash'] %} + {% if (hostvars[node]['elasticsearch_node_data'] is defined) and (hostvars[node]['elasticsearch_node_data'] | bool) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endfor %} + {% for node in groups['elastic-logstash'] %} + {% if (nodes | length) <= (_data_nodes | length) %} + {% if (node in _data_nodes) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endif %} + {% endfor %} + {{ nodes }} + +## While the logstash node group is dynamically chosen the override +## `elasticsearch_node_ingest` can be used to override the node type. +## Dynamic node inclusion will still work for all other nodes in the group. +_logstash_nodes: "{{ data_nodes }}" +logstash_nodes: |- + {% set nodes = [] %} + {% for node in groups['elastic-logstash'] %} + {% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endfor %} + {% for node in groups['elastic-logstash'] %} + {% if (nodes | length) <= (_logstash_nodes | length) %} + {% if (node in _logstash_nodes) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endif %} + {% endfor %} + {{ nodes }} + +## While the logstash node group is dynamically chosen the override +## `elasticsearch_node_ingest` can be used to override the node type. +## Dynamic node inclusion will still work for all other nodes in the group. +_ingest_nodes: "{{ data_nodes }}" +ingest_nodes: |- + {% set nodes = [] %} + {% for node in groups['elastic-logstash'] %} + {% if (hostvars[node]['elasticsearch_node_ingest'] is defined) and (hostvars[node]['elasticsearch_node_ingest'] | bool) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endfor %} + {% for node in groups['elastic-logstash'] %} + {% if (nodes | length) <= (_ingest_nodes | length) %} + {% if (node in _ingest_nodes) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endif %} + {% endfor %} + {{ nodes }} + +## While the master node group is dynamically chosen the override +## `elasticsearch_node_master` can be used to override the node type. +## Dynamic node inclusion will still work for all other nodes in the group. +_master_nodes: "{{ groups['elastic-logstash'][:elastic_master_node_count | int] }}" +master_nodes: |- + {% set nodes = [] %} + {% for node in groups['elastic-logstash'] %} + {% if (nodes | length) <= (elastic_master_node_count | int) %} + {% if (hostvars[node]['elasticsearch_node_master'] is defined) and (hostvars[node]['elasticsearch_node_master'] | bool) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endif %} + {% endfor %} + {% for node in groups['elastic-logstash'] %} + {% if (nodes | length) <= (elastic_master_node_count | int) %} + {% if (node in _master_nodes) %} + {% set _ = nodes.append(node) %} + {% endif %} + {% endif %} + {% endfor %} + {{ nodes }} +master_node_count: "{{ master_nodes | length }}" + +coordination_nodes: |- + {% if (groups['kibana'] | length) > 0 %} + {% set c_nodes = groups['kibana'] %} + {% else %} + {% set c_nodes = groups['elastic-logstash'] %} + {% endif %} + {{ + (c_nodes | map('extract', hostvars, 'ansible_host') | list) + | map('regex_replace', '(.*)' ,'\1:' ~ elastic_port) + | list + }} +zen_nodes: >- + {{ + (groups['elastic-logstash'] | union(groups['kibana'])) | map('extract', hostvars, 'ansible_host') | list | shuffle(seed=inventory_hostname) + }} +elasticserch_interface_speed: |- + {% set default_interface_fact = hostvars[inventory_hostname]['ansible_' + (elastic_data_interface | replace('-', '_'))] %} + {% set speeds = [] %} + {% if default_interface_fact['type'] == 'bridge' %} + {% for interface in default_interface_fact['interfaces'] %} + {% set interface_fact = hostvars[inventory_hostname]['ansible_' + (interface | replace('-', '_'))] %} + {% if 'speed' in interface_fact %} + {% set speed = (interface_fact['speed'] | default(1000)) | string %} + {% if speed == "-1" %} + {% set _ = speeds.append(1000) %} + {% else %} + {% set _ = speeds.append(speed | int) %} + {% endif %} + {% if 'module' in interface_fact %} + {% set _ = speeds.append((interface_fact['speed'] | default(1000)) | int) %} + {% else %} + {% set _ = speeds.append(1000) %} + {% endif %} + {% endif %} + {% endfor %} + {% else %} + {% if ('module' in default_interface_fact) or (default_interface_fact['type'] == 'bond') %} + {% set speed = (default_interface_fact['speed'] | default(1000)) | string %} + {% if speed == "-1" %} + {% set _ = speeds.append(1000) %} + {% else %} + {% set _ = speeds.append(speed | int) %} + {% endif %} + {% else %} + {% set _ = speeds.append(1000) %} + {% endif %} + {% endif %} + {% set interface_speed = ((speeds | min) * 0.20) | int %} + {{ ((interface_speed | int) > 750) | ternary(750, interface_speed) }} +elasticsearch_data_node_details: >- + {{ + (data_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ elastic_port) | list + }} +logstash_data_node_details: >- + {{ + (logstash_nodes | map('extract', hostvars, 'ansible_host') | list) | map('regex_replace', '(.*)' ,'\1:' ~ logstash_beat_input_port) | list + }} + +# based on the assignment of roles to hosts, set per host booleans +master_node: "{{ (inventory_hostname in master_nodes) | ternary(true, false) }}" +data_node: "{{ (inventory_hostname in data_nodes) | ternary(true, false) }}" + +elastic_processors_floor: "{{ ((ansible_processor_count | int) - 1) }}" +elastic_processors_floor_set: "{{ ((elastic_processors_floor | int) > 0) | ternary(elastic_processors_floor, 1) }}" +elastic_thread_pool_size: "{{ ((ansible_processor_count | int) >= 24) | ternary(23, elastic_processors_floor_set) }}" + +# Set a data node facts. The data nodes, in the case of elasticsearch are also +# ingest nodes. +elasticsearch_number_of_replicas: "{{ ((data_nodes | length) > 2) | ternary('2', ((data_nodes | length) > 1) | ternary('1', '0')) }}" +elasticsearch_data_hosts: |- + {% set data_hosts = elasticsearch_data_node_details | shuffle(seed=inventory_hostname) %} + {% if inventory_hostname in data_nodes %} + {% set _ = data_hosts.insert(0, '127.0.0.1:' ~ elastic_port) %} + {% endif %} + {{ data_hosts }} +logstash_data_hosts: |- + {% set data_hosts = logstash_data_node_details | shuffle(seed=inventory_hostname) %} + {% if inventory_hostname in data_nodes %} + {% set _ = data_hosts.insert(0, '127.0.0.1:' ~ logstash_beat_input_port) %} + {% endif %} + {{ data_hosts }} diff --git a/elk_metrics_7x/roles/elastic_dependencies/defaults/main.yml b/elk_metrics_7x/roles/elastic_dependencies/defaults/main.yml new file mode 100644 index 00000000..1fa2c5c6 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/defaults/main.yml @@ -0,0 +1,46 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Adds option to set the UID/GID of a given service user. +# service_group_gid: 5000 +# service_owner_uid: 5000 + +#define this in host/group vars as needed to mount remote filesystems +#set the client address as appropriate, eth1 assumes osa container mgmt network +#mountpoints and server paths are just examples +#elastic_shared_fs_repos: +# - fstype: nfs4 +# src: ":/esbackup" +# opts: clientaddr="{{ ansible_eth1['ipv4']['address'] }}" +# path: "/elastic-backup" +# state: mounted + +# NOTE(cloudnull) - When the heap size for a given elastic node is graeter than +# 6GiB the G1 garbage collector can be enabled. +elastic_g1gc_enabled: true + +elastic_lxc_template_config: + 3: + aa_profile: lxc.apparmor.profile + mount: lxc.mount.entry + 2: + aa_profile: lxc.aa_profile + mount: lxc.mount.entry + +# Set the elastic search heap size. If this option is undefined the value will +# be derived automatically using 1/4 of the available RAM for logstash and 1/2 +# of the available RAM for elasticsearch. The value is expected to be in MiB. +# elastic_heap_size_default: 10240 # type `int` + +# Set the friendly name of the version of java that will be used as the default. +elastic_java_version: java-8 diff --git a/elk_metrics_7x/roles/elastic_dependencies/meta/main.yml b/elk_metrics_7x/roles/elastic_dependencies/meta/main.yml new file mode 100644 index 00000000..1dbd7a67 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/meta/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +allow_duplicates: true +galaxy_info: + author: OpenStack + description: Elastic v6.x dependencies role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: [] diff --git a/elk_metrics_7x/roles/elastic_dependencies/tasks/main.yml b/elk_metrics_7x/roles/elastic_dependencies/tasks/main.yml new file mode 100644 index 00000000..27eef9a2 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/tasks/main.yml @@ -0,0 +1,238 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Check for service_name var + fail: + msg: >- + The required variable [ service_name ] is undefined. + when: + - service_name is undefined + +- name: Check for service_owner var + fail: + msg: >- + The required variable [ service_owner ] is undefined. + when: + - service_owner is undefined + +- name: Check for service_group var + fail: + msg: >- + The required variable [ service_group ] is undefined. + when: + - service_group is undefined + +- name: Load service variables + include_vars: "vars_{{ service_name }}.yml" + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Set elastic log rotate path + set_fact: + elastic_log_rotate_path: "/var/log/{{ service_name }}" + +- name: Configure systcl vm.max_map_count=524288 on elastic hosts + sysctl: + name: "vm.max_map_count" + value: "524288" + state: "present" + reload: "yes" + sysctl_file: /etc/sysctl.d/99-elasticsearch.conf + delegate_to: "{{ physical_host }}" + tags: + - sysctl + +- name: Configure systcl fs.inotify.max_user_watches=1048576 on elastic hosts + sysctl: + name: "fs.inotify.max_user_watches" + value: "1048576" + state: "present" + reload: "yes" + sysctl_file: /etc/sysctl.d/99-elasticsearch.conf + delegate_to: "{{ physical_host }}" + tags: + - sysctl + +- name: Create the system group + group: + name: "{{ service_group }}" + gid: "{{ service_group_gid | default(omit) }}" + state: "present" + system: "yes" + +- name: Create the system user + block: + - name: Create the system user + user: + name: "{{ service_owner }}" + uid: "{{ service_owner_uid | default(omit) }}" + group: "{{ service_group }}" + shell: "/bin/false" + system: "yes" + createhome: "no" + home: "/var/lib/{{ service_name }}" + rescue: + - name: Check for system user + debug: + msg: >- + The general user creation task failed. This typically means that the + user already exists and something in the user configuration provided + is changing the system user in way that is simply not possible at this + time. The playbooks will now simply ensure the user exists and before + carrying on to the next task. While it's not required, it may be + benificial to schedule a maintenance where the elastic services are + stopped. + + - name: Ensure the system user exists + user: + name: "{{ service_owner }}" + group: "{{ service_group }}" + +- name: Physical host block + block: + - name: Check for directory + stat: + path: "/var/lib/{{ service_name }}" + register: service_dir + + - name: Check for data directory + debug: + msg: >- + The service data directory [ /var/lib/{{ service_name }} ] already + exists. To ensure no data is lost, the linked directory path to + [ /openstack/{{ inventory_hostname }}/{{ service_name }} ] will not be + created for this host. + when: + - service_dir.stat.isdir is defined and + service_dir.stat.isdir + + - name: Ensure service directories data-path exists + file: + path: "/openstack/{{ inventory_hostname }}/{{ service_name }}" + state: "directory" + owner: "{{ service_owner }}" + group: "{{ service_group }}" + when: + - not (service_dir.stat.exists | bool) + + - name: Ensure data link exists + file: + src: "/openstack/{{ inventory_hostname }}/{{ service_name }}" + dest: "/var/lib/{{ service_name }}" + owner: "{{ service_owner }}" + group: "{{ service_group }}" + state: link + when: + - not (service_dir.stat.exists | bool) + when: + - physical_host == inventory_hostname + +- name: Container block + block: + - name: Ensure service directories data-path exists + file: + path: "/openstack/{{ inventory_hostname }}/{{ service_name }}" + state: "directory" + delegate_to: "{{ physical_host }}" + + - name: Pull lxc version + command: "lxc-ls --version" + delegate_to: "{{ physical_host }}" + changed_when: false + register: lxc_version + tags: + - skip_ansible_lint + + - name: Enable or Disable lxc three syntax + set_fact: + lxc_major_version: "{{ lxc_version.stdout.split('.')[0] }}" + + - name: elasticsearch datapath bind mount + lxc_container: + name: "{{ inventory_hostname }}" + container_command: | + [[ ! -d "/var/lib/{{ service_name }}" ]] && mkdir -p "/var/lib/{{ service_name }}" + container_config: + - "{{ elastic_lxc_template_config[(lxc_major_version | int)]['mount'] }}=/openstack/{{ inventory_hostname }}/{{ service_name }} var/lib/{{ service_name }} none bind 0 0" + - "{{ elastic_lxc_template_config[(lxc_major_version | int)]['aa_profile'] }}=unconfined" + delegate_to: "{{ physical_host }}" + when: + - container_tech | default('lxc') == 'lxc' + - physical_host != inventory_hostname + +- name: Ensure Java is installed + package: + name: "{{ elastic_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + install_recommends: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + tags: + - package_install + +- name: Set java alternatives + block: + - name: Get java version alternantive + shell: >- + update-alternatives --query java | awk -F':' '/{{ elastic_java_version }}/ && /Alternative/ {print $2}' + register: java_alternatives + changed_when: false + + - name: Set java version alternantive + alternatives: + name: java + path: "{{ java_alternatives.stdout.strip() }}" + when: + - (ansible_os_family | lower) == 'debian' + +- name: Ensure service directories exists + file: + path: "/etc/{{ service_name }}" + state: "directory" + owner: "{{ service_owner }}" + group: "{{ service_group }}" + +- name: Drop logrotate conf file(s) + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "templates/logrotate.j2" + dest: "/etc/logrotate.d/{{ service_name }}" + +- name: Ensure host can resolve itself + lineinfile: + path: /etc/hosts + regexp: '^{{ item }}' + line: '{{ item }} {{ ansible_hostname }} {{ ansible_fqdn }}' + owner: root + group: root + mode: 0644 + with_items: + - "127.0.2.1" + - "{{ ansible_host }}" diff --git a/elk_metrics_7x/roles/elastic_dependencies/templates/logrotate.j2 b/elk_metrics_7x/roles/elastic_dependencies/templates/logrotate.j2 new file mode 100644 index 00000000..d868cf28 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/templates/logrotate.j2 @@ -0,0 +1,12 @@ +{{ elastic_log_rotate_path }}/*.log +{ + copytruncate + daily + rotate 2 + delaycompress + compress + dateext + notifempty + missingok + maxage 5 +} diff --git a/elk_metrics_7x/roles/elastic_dependencies/vars/redhat.yml b/elk_metrics_7x/roles/elastic_dependencies/vars/redhat.yml new file mode 100644 index 00000000..b31d349c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_distro_packages: + - java-1.8.0-openjdk diff --git a/elk_metrics_7x/roles/elastic_dependencies/vars/suse.yml b/elk_metrics_7x/roles/elastic_dependencies/vars/suse.yml new file mode 100644 index 00000000..554bfe46 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_distro_packages: + - java-1_8_0-openjdk diff --git a/elk_metrics_7x/roles/elastic_dependencies/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_dependencies/vars/ubuntu.yml new file mode 100644 index 00000000..3e016058 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/vars/ubuntu.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_distro_packages: + - openjdk-8-jre diff --git a/elk_metrics_7x/roles/elastic_dependencies/vars/vars_elasticsearch.yml b/elk_metrics_7x/roles/elastic_dependencies/vars/vars_elasticsearch.yml new file mode 100644 index 00000000..e82f639c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/vars/vars_elasticsearch.yml @@ -0,0 +1,17 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The heap size is set using the a half of the total memory available with +# a cap of 32GiB. If the total available memory is less than 32GiB a buffer of +# 10% will be used to ensure the underlying system is not starved of memory. +_elastic_heap_size_default: "{{ ((elastic_memory_upper_limit | int) > 30720) | ternary(30720, ((elastic_memory_upper_limit | int) - ((elastic_memory_upper_limit | int) * 0.1))) }}" diff --git a/elk_metrics_7x/roles/elastic_dependencies/vars/vars_logstash.yml b/elk_metrics_7x/roles/elastic_dependencies/vars/vars_logstash.yml new file mode 100644 index 00000000..a5a0c4bd --- /dev/null +++ b/elk_metrics_7x/roles/elastic_dependencies/vars/vars_logstash.yml @@ -0,0 +1,17 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The heap size is set using the a quarter of the total memory available with +# a cap of 32GiB. If the total available memory is less than 32GiB a buffer of +# 10% will be used to ensure the underlying system is not starved of memory. +_elastic_heap_size_default: "{{ ((elastic_memory_lower_limit | int) > 30720) | ternary(30720, ((elastic_memory_lower_limit | int) - ((elastic_memory_lower_limit | int) * 0.1))) }}" diff --git a/elk_metrics_7x/roles/elastic_filebeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_filebeat/defaults/main.yml new file mode 100644 index 00000000..0a6dae1a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/defaults/main.yml @@ -0,0 +1,284 @@ +--- +# Copyright 2018, Vexxhost, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +filebeat_service_state: restarted +filebeat_oslo_log_multiline_config: + pattern: '^[0-9-]{10} +[0-9:\.]+ +[0-9]+ +[A-Z]+ +[A-Za-z0-9\._]+ \[|Traceback' + negate: true + match: after +filebeat_prospectors: + - type: log + enabled: "{{ filebeat_repo_enabled | default(true) }}" + paths: + - /openstack/log/*repo_container*/apt-cacher-ng/apt-cacher.* + - /openstack/log/*repo_container*/pypiserver/*.log + - /openstack/log/*repo_container*/rsyncd.log + tags: + - infrastructure + - repo-server + - type: log + enabled: "{{ filebeat_haproxy_enabled | default(true) }}" + paths: + - /var/log/haproxy/*.log + tags: + - infrastructure + - haproxy + - type: log + enabled: "{{ filebeat_rabbitmq_enabled | default(true) }}" + paths: + - /openstack/log/*rabbit*/rabbitmq/*.log + - /openstack/log/*rabbit*/rabbitmq/log/*.log + - /var/log/rabbitmq/*.log + - /var/log/rabbitmq/log/*.log + multiline: + pattern: '^=' + negate: true + match: after + tags: + - infrastructure + - rabbitmq + - type: log + enabled: "{{ filebeat_ceph_enabled | default(true) }}" + paths: + - /openstack/log/*ceph*/ceph/ceph-mon.*.log + - /var/log/ceph/ceph-mon.*.log + tags: + - infrastructure + - ceph + - ceph-mon + - type: log + enabled: "{{ filebeat_ceph_enabled | default(true) }}" + paths: + - /openstack/log/*ceph*/ceph/ceph-mgr.*.log + - /var/log/ceph/ceph-mgr.*.log + tags: + - infrastructure + - ceph + - ceph-mgr + - type: log + enabled: "{{ filebeat_ceph_enabled | default(true) }}" + paths: + - /openstack/log/*ceph*/ceph/ceph-osd.*.log + - /var/log/ceph-osd.*.log + tags: + - infrastructure + - ceph + - ceph-osd + - type: log + enabled: "{{ filebeat_keystone_enabled | default(true) }}" + paths: + - /openstack/log/*keystone*/keystone/keystone.log + - /var/log/keystone/keystone.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - keystone + # NOTE(mnaser): Barbican ships to Journal + - type: log + enabled: "{{ filebeat_glance_enabled | default(true) }}" + paths: + - /openstack/log/*glance*/glance/*.log + - /var/log/glance/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - glance + # NOTE(mnaser): Cinder ships to journal + - type: log + enabled: "{{ filebeat_nova_enabled | default(true) }}" + paths: + - /openstack/log/*nova*/nova/*.log + - /var/log/nova/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - nova + - type: log + enabled: "{{ filebeat_neutron_enabled | default(true) }}" + paths: + - /openstack/log/*neutron*/neutron/*.log + - /var/log/neutron/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - neutron + - type: log + enabled: "{{ filebeat_heat_enabled | default(true) }}" + paths: + - /openstack/log/*heat*/heat/*.log + - /var/log/heat/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - heat + - type: log + enabled: "{{ filebeat_designate_enabled | default(true) }}" + paths: + - /openstack/log/*designate*/designate/*.log + - /var/log/designate/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - designate + - type: log + enabled: "{{ filebeat_swift_enabled | default(true) }}" + paths: + - /openstack/log/*swift*/account*.log + - /var/log/swift/account*.log + multiline: + pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ container-replicator: +[A-Za-z0-9-\ ]+' + negate: false + match: after + tags: + - openstack + - swift + - swift-account + - type: log + enabled: "{{ filebeat_swift_enabled | default(true) }}" + paths: + - /openstack/log/*swift*/container*.log + - /var/log/swift/container*.log + multiline: + pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ account-replicator: +[A-Za-z0-9-\ ]+' + negate: false + match: after + tags: + - openstack + - swift + - swift-container + - type: log + enabled: "{{ filebeat_swift_enabled | default(true) }}" + paths: + - /openstack/log/*swift*/object*.log + - /var/log/swift/object*.log + multiline: + pattern: '^[A-Za-z]+[[:space:]]* +[0-9]{1,2} +[0-9:\.]+ +[A-Za-z0-9-]+ object-replicator: +[A-Za-z0-9-\ ]+' + negate: false + match: after + tags: + - openstack + - swift + - swift-object + - type: log + enabled: "{{ filebeat_swift_enabled | default(true) }}" + paths: + - /openstack/log/*swift*/proxy*.log + - /var/log/swift/proxy*.log + tags: + - openstack + - swift + - swift-proxy + - type: log + enabled: "{{ filebeat_gnocchi_enabled | default(true) }}" + paths: + - /openstack/log/*gnocchi*/gnocchi/*.log + - /var/log/gnocchi/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - gnocchi + - type: log + enabled: "{{ filebeat_ceilometer_enabled | default(true) }}" + paths: + - /openstack/log/*ceilometer*/ceilometer/*.log + - /var/log/ceilometer/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - ceilometer + - type: log + enabled: "{{ filebeat_aodh_enabled | default(true) }}" + paths: + - /openstack/log/*aodh*/aodh/*.log + - /var/log/aodh/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - aodh + - type: log + enabled: "{{ filebeat_ironic_enabled | default(true) }}" + paths: + - /openstack/log/*ironic*/ironic/*.log + - /var/log/ironic/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - ironic + - type: log + enabled: "{{ filebeat_magnum_enabled | default(true) }}" + paths: + - /openstack/log/*magnum*/magnum/*.log + - /var/log/magnum/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - magnum + - type: log + enabled: "{{ filebeat_trove_enabled | default(true) }}" + paths: + - /openstack/log/*trove*/trove/*.log + - /var/log/trove/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - trove + - type: log + enabled: "{{ filebeat_sahara_enabled | default(true) }}" + paths: + - /openstack/log/*sahara*/sahara/*.log + - /var/log/sahara/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - sahara + - type: log + enabled: "{{ filebeat_octavia_enabled | default(true) }}" + paths: + - /openstack/log/*octavia*/octavia/*.log + - /var/log/octavia/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - octavia + - type: log + enabled: "{{ filebeat_tacker_enabled | default(true) }}" + paths: + - /openstack/log/*tacker*/tacker/*.log + - /var/log/tacker/*.log + multiline: "{{ filebeat_oslo_log_multiline_config }}" + tags: + - openstack + - tacker + - type: log + enabled: "{{ filebeat_system_enabled | default(true) }}" + paths: + - /openstack/log/ansible-logging/*.log + - /var/log/*.log + - /var/log/libvirt/*.log + - /var/log/libvirt/*/*.log + - /var/log/lxc/*.log + tags: + - system + - type: log + enabled: "{{ filebeat_logging_enabled | default(true) }}" + paths: + - /openstack/log/*/beats/*.log + - /openstack/log/*/curator/curator + - /openstack/log/*/elasticsearch/*.log + - /var/log/beats/*.log + - /var/log/curator/curator + - /var/log/elasticsearch/*.log + tags: + - beats diff --git a/elk_metrics_7x/roles/elastic_filebeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_filebeat/handlers/main.yml new file mode 100644 index 00000000..76c5a128 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart filebeat (systemd) + systemd: + name: "filebeat" + enabled: true + state: "{{ filebeat_service_state }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart filebeat + +- name: Enable and restart filebeat (upstart) + service: + name: "filebeat" + state: "{{ filebeat_service_state }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart filebeat diff --git a/elk_metrics_7x/roles/elastic_filebeat/meta/main.yml b/elk_metrics_7x/roles/elastic_filebeat/meta/main.yml new file mode 100644 index 00000000..c3076d91 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x filebeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_filebeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_filebeat/tasks/main.yml new file mode 100644 index 00000000..5fd827e3 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/tasks/main.yml @@ -0,0 +1,112 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure beat is installed + package: + name: "{{ filebeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + when: + - ansible_architecture == 'x86_64' + notify: + - Enable and restart filebeat + tags: + - package_install + +- name: Ensure beat is installed (aarch64) + apt: + deb: 'https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/8709ca2640344a4ba85cba0a1d6eea69/aarch64/filebeat-6.5.0-arm64.deb' + when: + - ansible_pkg_mgr == 'apt' + - ansible_architecture == 'aarch64' + notify: + - Enable and restart filebeat + tags: + - package_install + +- name: Create filebeat systemd service config dir + file: + path: "/etc/systemd/system/filebeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "/etc/systemd/system/filebeat.service.d/{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "filebeat-overrides.conf" + notify: + - Enable and restart filebeat + +- name: Drop Filebeat conf file + template: + src: "filebeat.yml.j2" + dest: "/etc/filebeat/filebeat.yml" + notify: + - Enable and restart filebeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "filebeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set filebeat service state (upstart) + service: + name: "filebeat" + state: "{{ filebeat_service_state }}" + enabled: "{{ filebeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - filebeat_service_state in ['started', 'stopped'] + +- name: set filebeat service state (systemd) + systemd: + name: "filebeat" + state: "{{ filebeat_service_state }}" + enabled: "{{ filebeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - filebeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_filebeat/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_filebeat/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 b/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 new file mode 100644 index 00000000..49ab5525 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/templates/filebeat.yml.j2 @@ -0,0 +1,938 @@ +{% import 'templates/_macros.j2' as elk_macros %} +######################## Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + + +#========================== Modules configuration ============================ +filebeat.modules: + +#------------------------------- System Module ------------------------------- +- module: system + # Syslog + syslog: + enabled: "{{ filebeat_syslog_enabled | default(true) }}" + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + var.convert_timezone: false + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Authorization logs + auth: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + var.convert_timezone: false + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Apache2 Module ------------------------------ +- module: apache2 + access: + enabled: "{{ filebeat_httpd_enabled | default(true) }}" + var.paths: + - /openstack/log/*horizon*/horizon/*access.log + error: + enabled: "{{ filebeat_httpd_enabled | default(true) }}" + var.paths: + - /openstack/log/*horizon*/horizon/horizon-error.log + +#------------------------------- Auditd Module ------------------------------- +- module: auditd + log: + enabled: "{{ filebeat_auditd_enabled | default(true) }}" + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#------------------------------- Icinga Module ------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#--------------------------------- IIS Module -------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#-------------------------------- Kafka Module ------------------------------- +#- module: kafka + # All logs + #log: + #enabled: true + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + +#------------------------------ logstash Module ------------------------------ +- module: logstash + # logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + slowlog: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- mongodb Module ------------------------------ +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- MySQL Module ------------------------------- +- module: mysql + error: + enabled: "{{ filebeat_galera_enabled | default(true) }}" + var.paths: + - /openstack/log/*galera*/mysql_logs/galera_server_error.log + - /var/log/mysql_logs/galera_server_error.log + slowlog: + enabled: false + +#-------------------------------- Nginx Module ------------------------------- +- module: nginx + access: + enabled: "{{ filebeat_nginx_enabled | default(true) }}" + var.paths: + - /openstack/log/*repo_container*/nginx/*access.log + - /openstack/log/*keystone*/nginx/*access.log + error: + enabled: "{{ filebeat_nginx_enabled | default(true) }}" + var.paths: + - /openstack/log/*repo_container*/nginx/*error.log + - /openstack/log/*keystone*/nginx/*error.log + +#------------------------------- Osquery Module ------------------------------ +- module: osquery + result: + enabled: "{{ filebeat_osquery_enabled | default(true) }}" + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + var.use_namespace: true + +#----------------------------- PostgreSQL Module ----------------------------- +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + +#-------------------------------- Redis Module ------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: + +#------------------------------- Traefik Module ------------------------------ +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Prospector configuration (advanced). Any prospector configuration option + # can be added under this section. + #prospector: + + +#=========================== Filebeat prospectors ============================= + +# List of prospectors to fetch data. +filebeat.prospectors: +# Each - is a prospector. Most options can be set at the prospector level, so +# you can use different prospectors for various configurations. +# Below are the prospector specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +{% for p in filebeat_prospectors %} +- type: {{ p['type'] }} + enabled: {{ p['enabled'] }} + paths: +{% for path in p['paths'] %} + - {{ path }} +{% endfor %} +{% if 'multiline' in p %} + multiline.pattern: '{{ p['multiline']['pattern'] }}' + multiline.negate: {{ p['multiline']['negate'] }} + multiline.match: {{ p['multiline']['match'] }} +{% endif %} + tags: +{% for tag in p['tags'] %} + - {{ tag }} +{% endfor %} +{% endfor %} + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- type: stdin + +#------------------------- Redis slowlog prospector --------------------------- +# Experimental: Config options for the redis slow log prospector +#- type: redis + #hosts: ["localhost:6379"] + #username: + #password: + #enabled: false + #scan_frequency: 10s + + # Timeout after which time the prospector should return an error + #timeout: 1s + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------ Udp prospector -------------------------------- +# Experimental: Config options for the udp prospector +#- type: udp + + # Maximum size of the message received over UDP + #max_message_size: 10240 + +#========================== Filebeat autodiscover ============================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# or prospectors as they happen. + +#filebeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: busybox +# config: +# - type: log +# paths: +# - /var/lib/docker/containers/${data.docker.container.id}/*.log + +#========================= Filebeat global options ============================ + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: ${path.data}/registry + +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: + +# How long filebeat waits on shutdown for the publisher to finish. +# Default is 0, not waiting. +#filebeat.shutdown_timeout: 0 + +# Enable filebeat config reloading +#filebeat.config: + #prospectors: + #enabled: false + #path: prospectors.d/*.yml + #reload.enabled: true + #reload.period: 10s + #modules: + #enabled: false + #path: modules.d/*.yml + #reload.enabled: true + #reload.period: 10s + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +tags: + - filebeat + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # A value of 0 (the default) ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, and +# add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +processors: +- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "filebeat" plus date + # and generates [filebeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version filebeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is filebeat. + #key: filebeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates + # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every filebeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Pretty print json event + #pretty: false + +#================================= Paths ====================================== + +# The home path for the filebeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the filebeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the filebeat installation. This is the default base path +# for all the files in which filebeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a filebeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('filebeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('filebeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#============================== Kibana ===================================== +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('filebeat') }} + +#============================== Xpack Monitoring ===================================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 diff --git a/elk_metrics_7x/roles/elastic_filebeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_filebeat/vars/redhat.yml new file mode 100644 index 00000000..2f055205 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +filebeat_distro_packages: + - filebeat diff --git a/elk_metrics_7x/roles/elastic_filebeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_filebeat/vars/suse.yml new file mode 100644 index 00000000..2f055205 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +filebeat_distro_packages: + - filebeat diff --git a/elk_metrics_7x/roles/elastic_filebeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_filebeat/vars/ubuntu.yml new file mode 100644 index 00000000..2f055205 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_filebeat/vars/ubuntu.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +filebeat_distro_packages: + - filebeat diff --git a/elk_metrics_7x/roles/elastic_heartbeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_heartbeat/defaults/main.yml new file mode 100644 index 00000000..d6bbf645 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Vexxhost, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +heartbeat_service_state: restarted diff --git a/elk_metrics_7x/roles/elastic_heartbeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_heartbeat/handlers/main.yml new file mode 100644 index 00000000..6eee1cc2 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart heartbeat (systemd) + systemd: + name: "heartbeat-elastic" + enabled: true + state: "{{ heartbeat_service_state }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart heartbeat + +- name: Enable and restart heartbeat (upstart) + service: + name: "heartbeat-elastic" + state: "{{ heartbeat_service_state }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart heartbeat diff --git a/elk_metrics_7x/roles/elastic_heartbeat/meta/main.yml b/elk_metrics_7x/roles/elastic_heartbeat/meta/main.yml new file mode 100644 index 00000000..ad5a729a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x heartbeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_heartbeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_heartbeat/tasks/main.yml new file mode 100644 index 00000000..f6f7ddf8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/tasks/main.yml @@ -0,0 +1,118 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure beat is installed + package: + name: "{{ heartbeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + when: + - ansible_architecture == 'x86_64' + notify: + - Enable and restart heartbeat + tags: + - package_install + +- name: Ensure beat is installed (aarch64) + apt: + deb: 'https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/8709ca2640344a4ba85cba0a1d6eea69/aarch64/heartbeat-6.5.0-arm64.deb' + when: + - ansible_pkg_mgr == 'apt' + - ansible_architecture == 'aarch64' + notify: + - Enable and restart heartbeat + tags: + - package_install + +- name: Create heartbeat systemd service config dir + file: + path: "/etc/systemd/system/heartbeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/heartbeat.service.d/heartbeat-overrides.conf" + notify: + - Enable and restart heartbeat + +- name: Create heartbeat configs + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "heartbeat.yml.j2" + dest: "/etc/heartbeat/heartbeat.yml" + notify: + - Enable and restart heartbeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "heartbeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set heartbeat service state (upstart) + service: + name: "heartbeat-elastic" + state: "{{ heartbeat_service_state }}" + enabled: "{{ heartbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - heartbeat_service_state in ['started', 'stopped'] + +- name: set heartbeat service state (systemd) + systemd: + name: "heartbeat-elastic" + state: "{{ heartbeat_service_state }}" + enabled: "{{ heartbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - heartbeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_heartbeat/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_heartbeat/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 b/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 new file mode 100644 index 00000000..f2def5b8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/templates/heartbeat.yml.j2 @@ -0,0 +1,951 @@ +{% import 'templates/_macros.j2' as elk_macros %} +################### Heartbeat Configuration Example ######################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains +# only some common options, please see heartbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/heartbeat/index.html + +############################# Heartbeat ###################################### +{% set icmp_hosts = [] %} +{% for host_item in groups['all'] %} +{% if hostvars[host_item]['ansible_host'] is defined %} +{% set _ = icmp_hosts.extend([hostvars[host_item]['ansible_host']]) %} +{% endif %} +{% endfor %} +# Configure monitors +heartbeat.monitors: +- type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping + # configured hosts + + # Monitor name used for job name and document type. + name: icmp + + # Enable/Disable monitor + enabled: true + + # Configure task schedule using cron-like syntax + schedule: '@every 30s' # every 30 seconds from start of beat + + # List of hosts to ping + hosts: {{ (icmp_hosts | default([])) | to_json }} + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: any + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # Total running time per ping test. + timeout: {{ icmp_hosts | length }}s + + # Waiting duration until another ICMP Echo Request is emitted. + wait: 1s + + # The tags of the monitors are included in their own field with each + # transaction published. Tags make it easy to group servers by different + # logical properties. + #tags: ["service-X", "web-tier"] + + # Optional fields that you can specify to add additional information to the + # monitor output. Fields can be scalar values, arrays, dictionaries, or any nested + # combination of these. + #fields: + # env: staging + + # If this option is set to true, the custom fields are stored as top-level + # fields in the output document instead of being grouped under a fields + # sub-dictionary. Default is false. + #fields_under_root: false + + # NOTE: THIS FEATURE IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + +# Define a directory to load monitor definitions from. Definitions take the form +# of individual yaml files. +# heartbeat.config.monitors: + # Directory + glob pattern to search for configuration files + #path: /path/to/my/monitors.d/*.yml + # If enabled, heartbeat will periodically check the config.monitors path for changes + #reload.enabled: true + # How often to check for changes + #reload.period: 1s + +{% for item in heartbeat_services %} +{% if item.type == 'tcp' %} +{% set hosts = [] %} +{% for port in item.ports | default([]) %} +{% for backend in item.group | default([]) %} +{% set backend_host = hostvars[backend]['ansible_host'] %} +{% set _ = hosts.extend([backend_host + ":" + (port | string)]) %} +{% endfor %} +{% endfor %} +{% if hosts | length > 0 %} +- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint + # by sending/receiving a custom payload + + # Monitor name used for job name and document type + name: "{{ item.name }}" + + # Enable/Disable monitor + enabled: true + + # Configure task schedule + schedule: '@every 45s' # every 30 seconds from start of beat + + # configure hosts to ping. + # Entries can be: + # - plain host name or IP like `localhost`: + # Requires ports configs to be checked. If ssl is configured, + # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # will be established + # - hostname + port like `localhost:12345`: + # Connect to port on given host. If ssl is configured, + # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # will be established + # - full url syntax. `scheme://:[port]`. The `` can be one of + # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain + # tcp connection will be established, even if ssl is configured. + # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, + # system defaults will be used (not supported on windows). + # If `port` is missing in url, the ports setting is required. + hosts: {{ (hosts | default([])) | to_json }} + + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: any + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # List of ports to ping if host does not contain a port number + # ports: [80, 9200, 5044] + + # Total test connection and data exchange timeout + #timeout: 16s + + # Optional payload string to send to remote and expected answer. If none is + # configured, the endpoint is expected to be up if connection attempt was + # successful. If only `send_string` is configured, any response will be + # accepted as ok. If only `receive_string` is configured, no payload will be + # send, but client expects to receive expected payload on connect. + #check: + #send: '' + #receive: '' + + # SOCKS5 proxy url + # proxy_url: '' + + # Resolve hostnames locally instead on SOCKS5 server: + #proxy_use_local_resolver: false + + # TLS/SSL connection settings: + #ssl: + # Certificate Authorities + #certificate_authorities: [''] + + # Required TLS protocols + #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] +{% endif %} +{% elif item.type == 'http' %} +{% set hosts = [] %} +{% for port in item.ports | default([]) %} +{% for backend in item.group | default([]) %} +{% set backend_host = hostvars[backend]['ansible_host'] %} +{% set _ = hosts.extend(["http://" + backend_host + ":" + (port | string) + item.path]) %} +{% endfor %} +{% endfor %} +{% if hosts | length > 0 %} +- type: http # monitor type `http`. Connect via HTTP an optionally verify response + + # Monitor name used for job name and document type + name: "{{ item.name }}" + + # Enable/Disable monitor + enabled: true + + # Configure task schedule + schedule: '@every 60s' # every 30 seconds from start of beat + + # Configure URLs to ping + urls: {{ (hosts | default([])) | to_json }} + + # Configure IP protocol types to ping on if hostnames are configured. + # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. + ipv4: true + ipv6: true + mode: "any" + + # Configure file json file to be watched for changes to the monitor: + #watch.poll_file: + # Path to check for updates. + #path: + + # Interval between file file changed checks. + #interval: 5s + + # Optional HTTP proxy url. If not set HTTP_PROXY environment variable will be used. + #proxy_url: '' + + # Total test connection and data exchange timeout + #timeout: 16s + + # Optional Authentication Credentials + #username: '' + #password: '' + + # TLS/SSL connection settings for use with HTTPS endpoint. If not configured + # system defaults will be used. + #ssl: + # Certificate Authorities + #certificate_authorities: [''] + + # Required TLS protocols + #supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] + + # Request settings: + check.request: + # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. + method: "{{ item.method }}" + + # Dictionary of additional HTTP headers to send: + headers: + User-agent: osa-heartbeat-healthcheck + + # Optional request body content + #body: + + # Expected response settings +{% if item.check_response is defined %} + check.response: {{ item.check_response }} + #check.response: + # Expected status code. If not configured or set to 0 any status code not + # being 404 is accepted. + #status: 0 + + # Required response headers. + #headers: + + # Required response contents. + #body: +{% endif %} +{% endif %} +{% endif %} +{% endfor %} + +heartbeat.scheduler: + # Limit number of concurrent tasks executed by heartbeat. The task limit if + # disabled if set to 0. The default is 0. + limit: {{ icmp_hosts | length // 4 }} + + # Set the scheduler it's timezone + #location: '' + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +processors: +- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using heartbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "heartbeat" plus date + # and generates [heartbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "heartbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version heartbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is heartbeat. + #key: heartbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/heartbeat" + + # Name of the generated files. The default is `heartbeat` and it generates + # files: `heartbeat`, `heartbeat.1`, `heartbeat.2`, etc. + #filename: heartbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every heartbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the heartbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the heartbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the heartbeat installation. This is the default base path +# for all the files in which heartbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a heartbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('heartbeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('heartbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#============================== Kibana ===================================== +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('heartbeat') }} + +#============================== Xpack Monitoring ===================================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/elk_metrics_7x/roles/elastic_heartbeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_heartbeat/vars/redhat.yml new file mode 100644 index 00000000..4a32d6b7 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +heartbeat_distro_packages: + - heartbeat-elastic diff --git a/elk_metrics_7x/roles/elastic_heartbeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_heartbeat/vars/suse.yml new file mode 100644 index 00000000..4a32d6b7 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +heartbeat_distro_packages: + - heartbeat-elastic diff --git a/elk_metrics_7x/roles/elastic_heartbeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_heartbeat/vars/ubuntu.yml new file mode 100644 index 00000000..4a32d6b7 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_heartbeat/vars/ubuntu.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +heartbeat_distro_packages: + - heartbeat-elastic diff --git a/elk_metrics_7x/roles/elastic_journalbeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_journalbeat/defaults/main.yml new file mode 100644 index 00000000..6dbf010e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Vexxhost, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +journalbeat_service_state: restarted diff --git a/elk_metrics_7x/roles/elastic_journalbeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_journalbeat/handlers/main.yml new file mode 100644 index 00000000..48f3802b --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/handlers/main.yml @@ -0,0 +1,25 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart journalbeat + systemd: + name: "journalbeat" + enabled: true + state: "{{ journalbeat_service_state }}" + daemon_reload: yes + when: + - (elk_package_state | default('present')) != 'absent' + tags: + - config diff --git a/elk_metrics_7x/roles/elastic_journalbeat/meta/main.yml b/elk_metrics_7x/roles/elastic_journalbeat/meta/main.yml new file mode 100644 index 00000000..00b05be8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x journalbeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_journalbeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_journalbeat/tasks/main.yml new file mode 100644 index 00000000..e6fee44f --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/tasks/main.yml @@ -0,0 +1,118 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Uninstall legacy journalbeat + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/systemd/system/journalbeat.service + - /usr/local/bin/journalbeat + +- name: Ensure beat is installed + package: + name: "{{ journalbeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart journalbeat + tags: + - package_install + +- name: Ensure beat is installed (aarch64) + apt: + deb: 'https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/8709ca2640344a4ba85cba0a1d6eea69/aarch64/journalbeat-6.5.0-arm64.deb' + when: + - ansible_pkg_mgr == 'apt' + - ansible_architecture == 'aarch64' + notify: + - Enable and restart heartbeat + tags: + - package_install + +- name: Create journalbeat systemd service config dir + file: + path: "/etc/systemd/system/journalbeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/journalbeat.service.d/journalbeat-overrides.conf" + notify: + - Enable and restart journalbeat + +- name: Drop journalbeat configs + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + with_items: + - src: "journalbeat.yml.j2" + dest: "/etc/journalbeat/journalbeat.yml" + notify: + - Enable and restart journalbeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "journalbeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set journalbeat service state (upstart) + service: + name: "journalbeat" + state: "{{ journalbeat_service_state }}" + enabled: "{{ journalbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - journalbeat_service_state in ['started', 'stopped'] + +- name: set journalbeat service state (systemd) + systemd: + name: "journalbeat" + state: "{{ journalbeat_service_state }}" + enabled: "{{ journalbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - journalbeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 b/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 new file mode 100644 index 00000000..c00ac6fc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/templates/journalbeat.yml.j2 @@ -0,0 +1,796 @@ +{% import 'templates/_macros.j2' as elk_macros %} +###################### Journalbeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The journalbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/journalbeat/index.html + +# For more available modules and options, please see the journalbeat.reference.yml sample +# configuration file. + +#=========================== Journalbeat inputs ============================= + +journalbeat.inputs: + # Paths that should be crawled and fetched. Possible values files and directories. + # When setting a directory, all journals under it are merged. + # When empty starts to read from local journal. +- paths: ["/var/log/journal"] + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from journal. Valid values: head, tail, cursor + seek: cursor + # Fallback position if no cursor data is available. + #cursor_seek_fallback: head + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #include_matches: [] + + # Optional fields that you can specify to add additional information to the + # output. Fields can be scalar values, arrays, dictionaries, or any nested + # combination of these. + #fields: + # env: staging + + +#========================= Journalbeat global options ============================ +journalbeat: + # Name of the registry file. If a relative path is used, it is considered relative to the + # data path. + registry_file: registry + + # The number of seconds to wait before trying to read again from journals. + backoff: 10s + # The maximum number of seconds to wait before attempting to read again from journals. + max_backoff: 60s + + # Position to start reading from all journal. Possible values: head, tail, cursor + seek: head + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #matches: [] + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +name: journalbeat + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +tags: + - journald + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +processors: +- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using journalbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what outputs to use when sending the data collected by the beat. +# Multiple outputs may be used. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + #ilm.rollover_alias: "journalbeat" + #ilm.pattern: "{now/d}-000001" + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "journalbeat" plus date + # and generates [journalbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "journalbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count, 'journalbeat') }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version journalbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is journalbeat. + #key: journalbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/journalbeat" + + # Name of the generated files. The default is `journalbeat` and it generates + # files: `journalbeat`, `journalbeat.1`, `journalbeat.2`, etc. + #filename: journalbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every journalbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the journalbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the journalbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the journalbeat installation. This is the default base path +# for all the files in which journalbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a journalbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('journalbeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('journalbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#============================== Kibana ===================================== +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('journalbeat') }} + +#============================== Xpack Monitoring =============================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/elk_metrics_7x/roles/elastic_journalbeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_journalbeat/vars/redhat.yml new file mode 100644 index 00000000..ac55767d --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/vars/redhat.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +journalbeat_distro_packages: + - journalbeat diff --git a/elk_metrics_7x/roles/elastic_journalbeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_journalbeat/vars/suse.yml new file mode 100644 index 00000000..ac55767d --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/vars/suse.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +journalbeat_distro_packages: + - journalbeat diff --git a/elk_metrics_7x/roles/elastic_journalbeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_journalbeat/vars/ubuntu.yml new file mode 100644 index 00000000..4b271059 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_journalbeat/vars/ubuntu.yml @@ -0,0 +1,17 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +journalbeat_distro_packages: + - journalbeat \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_kibana/defaults/main.yml b/elk_metrics_7x/roles/elastic_kibana/defaults/main.yml new file mode 100644 index 00000000..8a910861 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/defaults/main.yml @@ -0,0 +1,26 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kibana_enable_basic_auth: false + +# kibana vars +kibana_interface: 0.0.0.0 +kibana_port: 5601 +kibana_username: admin +kibana_password: admin +kibana_nginx_port: 81 +kibana_server_name: "{{ ansible_hostname }}" +kibana_index_on_elasticsearch: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port}}/.kibana" +kibana_elastic_request_timeout: 1800000 diff --git a/elk_metrics_7x/roles/elastic_kibana/handlers/main.yml b/elk_metrics_7x/roles/elastic_kibana/handlers/main.yml new file mode 100644 index 00000000..619855db --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/handlers/main.yml @@ -0,0 +1,39 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart services (systemd) + systemd: + name: "{{ item }}" + enabled: true + state: restarted + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + with_items: + - nginx + - kibana + listen: Enable and restart services + +- name: Enable and restart services (upstart) + service: + name: "{{ item }}" + state: restarted + enabled: yes + when: + - ansible_service_mgr == 'upstart' + with_items: + - nginx + - kibana + listen: Enable and restart services diff --git a/elk_metrics_7x/roles/elastic_kibana/meta/main.yml b/elk_metrics_7x/roles/elastic_kibana/meta/main.yml new file mode 100644 index 00000000..fe889e75 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/meta/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x kibana role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_kibana/tasks/main.yml b/elk_metrics_7x/roles/elastic_kibana/tasks/main.yml new file mode 100644 index 00000000..c94a77d8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/tasks/main.yml @@ -0,0 +1,88 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure distro packages are installed + package: + name: "{{ kibana_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart services + tags: + - package_install + +- name: create kibana user to access web interface + htpasswd: + path: "/etc/nginx/htpasswd.users" + name: "{{ kibana_username }}" + password: "{{ kibana_password }}" + owner: root + mode: 0644 + when: + - kibana_enable_basic_auth + +- name: Drop Nginx default conf file + template: + src: "nginx_default.j2" + dest: "{{ kibana_nginx_vhost_path }}/default" + notify: + - Enable and restart services + +- name: Create kibana systemd service config dir + file: + path: "/etc/systemd/system/kibana.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/kibana.service.d/kibana-overrides.conf" + notify: + - Enable and restart services + +- name: Drop kibana conf file + template: + src: "kibana.yml.j2" + dest: "/etc/kibana/kibana.yml" + mode: "0666" + notify: + - Enable and restart services diff --git a/elk_metrics_7x/roles/elastic_kibana/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_kibana/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_kibana/templates/kibana.yml.j2 b/elk_metrics_7x/roles/elastic_kibana/templates/kibana.yml.j2 new file mode 100644 index 00000000..e6e09ca1 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/templates/kibana.yml.j2 @@ -0,0 +1,92 @@ +# Kibana is served by a back end server. This setting specifies the port to use. + server.port: {{ kibana_port }} + +# This setting specifies the IP address of the back end server. + server.host: {{ kibana_interface }} + +# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This setting +# cannot end in a slash. +# server.basePath: "" + +# The maximum payload size in bytes for incoming server requests. +# server.maxPayloadBytes: 1048576 + +# The URL of the Elasticsearch instance to use for all your queries. + elasticsearch.url: "http://127.0.0.1:{{ elastic_port }}" + +# When this setting's value is true Kibana uses the hostname specified in the server.host +# setting. When the value of this setting is false, Kibana uses the hostname of the host +# that connects to this Kibana instance. +# elasticsearch.preserveHost: true + +# Kibana uses an index in Elasticsearch to store saved searches, visualizations and +# dashboards. Kibana creates a new index if the index doesn't already exist. +# kibana.index: ".kibana" + +# The default application to load. +# kibana.defaultAppId: "discover" + +# If your Elasticsearch is protected with basic authentication, these settings provide +# the username and password that the Kibana server uses to perform maintenance on the Kibana +# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which +# is proxied through the Kibana server. +# elasticsearch.username: "user" +# elasticsearch.password: "pass" + +# Paths to the PEM-format SSL certificate and SSL key files, respectively. These +# files enable SSL for outgoing requests from the Kibana server to the browser. +# server.ssl.cert: /path/to/your/server.crt +# server.ssl.key: /path/to/your/server.key + +# Optional settings that provide the paths to the PEM-format SSL certificate and key files. +# These files validate that your Elasticsearch backend uses the same key files. +# elasticsearch.ssl.cert: /path/to/your/client.crt +# elasticsearch.ssl.key: /path/to/your/client.key + +# Optional setting that enables you to specify a path to the PEM file for the certificate +# authority for your Elasticsearch instance. +# elasticsearch.ssl.ca: /path/to/your/CA.pem + +# To disregard the validity of SSL certificates, change this setting's value to false. +# elasticsearch.ssl.verify: true + +# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of +# the elasticsearch.requestTimeout setting. +# elasticsearch.pingTimeout: 1500 + +# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value +# must be a positive integer. + elasticsearch.requestTimeout: {{ kibana_elastic_request_timeout }} + +# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable. +# elasticsearch.shardTimeout: 0 + +# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying. +# elasticsearch.startupTimeout: 5000 + +# Specifies the path where Kibana creates the process ID file. +# pid.file: /var/run/kibana.pid + +# Enables you specify a file where Kibana stores log output. + logging.dest: stdout + +# Set the value of this setting to true to suppress all logging output. +# logging.silent: false + +# Set the value of this setting to true to suppress all logging output other than error messages. +# logging.quiet: false + +# Set the value of this setting to true to log all events, including system usage information +# and all requests. +# logging.verbose: false + +# ---------------------------------- X-Pack ------------------------------------ +# X-Pack Monitoring +# https://www.elastic.co/guide/en/kibana/6.3/monitoring-settings-kb.html + xpack.monitoring.enabled: true + xpack.xpack_main.telemetry.enabled: false + xpack.monitoring.kibana.collection.enabled: true + xpack.monitoring.kibana.collection.interval: 30000 + xpack.monitoring.min_interval_seconds: 30 + xpack.monitoring.ui.enabled: true + xpack.monitoring.ui.container.elasticsearch.enabled: true diff --git a/elk_metrics_7x/roles/elastic_kibana/templates/nginx_default.j2 b/elk_metrics_7x/roles/elastic_kibana/templates/nginx_default.j2 new file mode 100644 index 00000000..e295e4dc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/templates/nginx_default.j2 @@ -0,0 +1,19 @@ +server { + listen {{ kibana_nginx_port }}; + + server_name {{ kibana_server_name }}; + +{% if kibana_enable_basic_auth | bool %} + auth_basic "Restricted Access"; + auth_basic_user_file /etc/nginx/htpasswd.users; +{% endif %} + + location / { + proxy_pass http://127.0.0.1:{{ kibana_port }}; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + proxy_cache_bypass $http_upgrade; + } +} diff --git a/elk_metrics_7x/roles/elastic_kibana/vars/redhat.yml b/elk_metrics_7x/roles/elastic_kibana/vars/redhat.yml new file mode 100644 index 00000000..6d5b7c26 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/vars/redhat.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kibana_nginx_vhost_path: /etc/nginx/conf.d/ +kibana_distro_packages: + - kibana + - nginx + - python-passlib diff --git a/elk_metrics_7x/roles/elastic_kibana/vars/suse.yml b/elk_metrics_7x/roles/elastic_kibana/vars/suse.yml new file mode 100644 index 00000000..761a02bc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/vars/suse.yml @@ -0,0 +1,21 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kibana_nginx_vhost_path: /etc/nginx/vhosts.d +kibana_distro_packages: + - apache2-utils + - kibana + - nginx + - python-passlib diff --git a/elk_metrics_7x/roles/elastic_kibana/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_kibana/vars/ubuntu.yml new file mode 100644 index 00000000..193b6e8a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_kibana/vars/ubuntu.yml @@ -0,0 +1,21 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kibana_nginx_vhost_path: /etc/nginx/sites-available +kibana_distro_packages: + - apache2-utils + - kibana + - nginx + - python-passlib diff --git a/elk_metrics_7x/roles/elastic_logstash/defaults/main.yml b/elk_metrics_7x/roles/elastic_logstash/defaults/main.yml new file mode 100644 index 00000000..cbd9ca49 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/defaults/main.yml @@ -0,0 +1,102 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +temp_dir: /var/lib/logstash/tmp +logstash_pipelines: "{{lookup('template', 'logstash-pipelines.yml.j2') }}" + +# Set processor cores fact +q_storage: 1 + +# Set logstash facts +logstash_queue_size: "{{ ((((q_storage | int) >= 2) | ternary(q_storage, 2) | int) * 1024) // ((logstash_pipelines | from_yaml) | length) }}" +elastic_log_rotate_path: "/var/log/logstash" + +# Enable direct syslog input into logstash. When this is enabled syslog messages +# can be sent directly to logstash via TCP or UDP. +logstash_syslog_input_enabled: false + +# The typical syslog port is 514 however that is not available to logstash +# because it's a "privledged" port. For this reason 5140 is used as the default. +# Changing this port to 514 will require overrides to the service files making +# logstash run as root (not recommended). +logstash_syslog_input_port: 5140 + +# Protocol used when the syslog input is enabled. Modes are "tcp" or "udp". +logstash_syslog_input_mode: udp + +logstash_beat_input_port: 5044 +logstash_deploy_filters: true + +## Logstash config showing a complete kafka setup using SSL for authentication. +# logstash_kafka_options: +# codec: json +# topic_id: "elk_kafka" +# ssl_key_password: "{{ logstash_kafka_ssl_key_password }}" +# ssl_keystore_password: "{{ logstash_kafka_ssl_keystore_password }}" +# ssl_keystore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_keystore_location | basename }}" +# ssl_truststore_location: "/var/lib/logstash/{{ logstash_kafka_ssl_truststore_location | basename }}" +# ssl_truststore_password: "{{ logstash_kafka_ssl_truststore_password }}" +# bootstrap_servers: +# - server1.local:9092 +# - server2.local:9092 +# - server3.local:9092 +# client_id: "elk_metrics_6x" +# compression_type: "gzip" +# security_protocol: "SSL" + +## The following variables are options that correspond to the +## `logstash_kafka_options` variable. +# logstash_kafka_ssl_key_password: "secrete" +# logstash_kafka_ssl_keystore_password: "secrete" +# logstash_kafka_ssl_truststore_password: "secrete" +# logstash_kafka_ssl_keystore_location: "/root/kafka/keystore.jks" +# logstash_kafka_ssl_truststore_location: "/root/kafka/truststore.jks" + +## Setup servers that read events from the Smart Connector directly. This +## supports multiple entries in list format using the "host" and "port" for the +## smart connector. +# logstash_arcsight_smart_connectors: +# - host: 127.0.0.1 +# port: 5000 +logstash_arcsight_smart_connectors: [] + +## Setup servers to read events from the Eevnt Broker Stream. This +## multiple entries in list format using the "host" and "port" for the +## for the event brokers. +# logstash_arcsight_event_brokers: +# - host: 127.0.0.1 +# port: 5000 +logstash_arcsight_event_brokers: [] + +## The logstash queue type can be set to "memory" or "persisted". If the queue +## type is set to memory a ramdisk will be created limiting the in memory queue +## to 50% of the JVM heap size. When this option is undefined the playbook will +## detect the media type where the queue will exist. If the media type is +## "rotational" in memory queues will be used. +# logstash_queue_type: + +# Set the logstash search heap size. If this option is undefined the value will +# be derived automatically using 1/4 of the available RAM for logstash and 1/2 +# of the available RAM for elasticsearch. The value is expected to be in MiB. +# logstash_heap_size: 10240 # type `int` + +# Collectd ingestion options +logstash_collectd_input_enabled: false +logstash_collectd_port: 25826 +logstash_collectd_buffer_size: 1452 +# Security level can be ["Sign", "Encrypt"]. +logstash_collectd_security_level: Sign +# To enable security the auth file is required. +#logstash_collectd_authfile: /etc/collectd/passwd diff --git a/elk_metrics_7x/roles/elastic_logstash/handlers/main.yml b/elk_metrics_7x/roles/elastic_logstash/handlers/main.yml new file mode 100644 index 00000000..3a1c3e52 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart logstash (systemd) + systemd: + name: "logstash" + enabled: true + state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart logstash + +- name: Enable and restart logstash (upstart) + service: + name: "logstash" + state: "{{ (inventory_hostname in logstash_nodes) | ternary('restarted', 'stopped') }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart logstash diff --git a/elk_metrics_7x/roles/elastic_logstash/meta/main.yml b/elk_metrics_7x/roles/elastic_logstash/meta/main.yml new file mode 100644 index 00000000..d40186fc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/meta/main.yml @@ -0,0 +1,39 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x logstash role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories + - role: elastic_dependencies + service_name: logstash + service_owner: logstash + service_group: logstash diff --git a/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_arcsight.yml b/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_arcsight.yml new file mode 100644 index 00000000..f9588ca8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_arcsight.yml @@ -0,0 +1,62 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Initialise arcsight local facts + ini_file: + dest: "/etc/ansible/facts.d/logstash.fact" + section: arcsight + option: "initialise" + value: true + +- name: refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - nova-config + +- name: Setup arcsight smart connector + shell: >- + /usr/local/bin/logstash --modules arcsight {{ (ansible_local['arcsight'][item.host] is defined) | ternary('', '--setup') }} + -M "arcsight.var.input.smartconnector.bootstrap_servers={{ item.host }}:{{ item.port }}" + -M "arcsight.var.elasticsearch.hosts=localhost:{{ elastic_port }}" + -M "arcsight.var.kibana.host={{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + with_items: "{{ logstash_arcsight_smart_connectors }}" + run_once: true + register: smart_connector + until: smart_connector is success + retries: 5 + delay: 5 + +- name: Setup arcsight event broker + shell: >- + /usr/local/bin/logstash --modules arcsight {{ (ansible_local['arcsight'][item.host] is defined) | ternary('', '--setup') }} + -M "arcsight.var.input.eventbroker.bootstrap_servers={{ item.host }}:{{ item.port }}" + -M "arcsight.var.elasticsearch.hosts=localhost:{{ elastic_port }}" + -M "arcsight.var.kibana.host={{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}" + with_items: "{{ logstash_arcsight_event_brokers }}" + run_once: true + register: event_broker + until: event_broker is success + retries: 5 + delay: 5 + +- name: Set arcsight local facts + ini_file: + dest: "/etc/ansible/facts.d/logstash.fact" + section: arcsight + option: "{{ item.host }}" + value: "{{ item.port }}" + with_items: "{{ logstash_arcsight_smart_connectors | union(logstash_arcsight_event_brokers) }}" diff --git a/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_kafka_ssl.yml b/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_kafka_ssl.yml new file mode 100644 index 00000000..fdcad549 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/tasks/logstash_kafka_ssl.yml @@ -0,0 +1,24 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Copy kafka keystore into place + copy: + src: "{{ logstash_kafka_ssl_keystore_location }}" + dest: "/var/lib/logstash/{{ logstash_kafka_ssl_keystore_location | basename }}" + +- name: Copy kafka truststore into place + copy: + src: "{{ logstash_kafka_ssl_truststore_location }}" + dest: "/var/lib/logstash/{{ logstash_kafka_ssl_truststore_location | basename }}" diff --git a/elk_metrics_7x/roles/elastic_logstash/tasks/main.yml b/elk_metrics_7x/roles/elastic_logstash/tasks/main.yml new file mode 100644 index 00000000..2e733149 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/tasks/main.yml @@ -0,0 +1,250 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Check collectd plugin + block: + - name: Check for collectd authentication file + stat: + path: "{{ logstash_collectd_authfile }}" + register: _logstash_collectd + when: + - logstash_collectd_authfile is defined + + - name: Notify collectd plugin failure + fail: + msg: >- + The logstash collectd plugin was enabled with security + enabled however the authentication file was not found. + when: + - not (_logstash_collectd.stat.exists | bool) + when: + - logstash_collectd_input_enabled | bool + +- name: Ensure Logstash is installed + package: + name: "{{ logstash_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart logstash + tags: + - package_install + +- name: Create logstash systemd service config dir + file: + path: "/etc/systemd/system/logstash.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/logstash.service.d/logstash-overrides.conf" + notify: + - Enable and restart logstash + +- name: Set sysconfig service defaults + lineinfile: + path: "{{ logstash_sysconfig_path }}" + regexp: '^{{ item.key }}=' + line: '{{ item.key }}={{ item.value }}' + with_items: + - key: LS_OPEN_FILES + value: 32768 + +- name: Set service specific haap size + set_fact: + _service_heap_size: "{{ logstash_heap_size }}" + when: + - logstash_heap_size is defined + +- name: Drop jvm conf file(s) + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "jvm.options.j2" + dest: "/etc/logstash/jvm.options" + notify: + - Enable and restart logstash + +- name: Check queue type + block: + - name: Get block device for logstash + command: findmnt -vno SOURCE --target=/var/lib/logstash + changed_when: false + register: _logstash_block_device + + - name: Set device fact + set_fact: + _logstash_device: "{{ _logstash_block_device.stdout.split('/')[-1] | regex_replace('[0-9]$','') }}" + + - name: Set device info fact + set_fact: + _logstash_device_info: "{{ ansible_devices[_logstash_device] }}" + + - name: Set persisted queue fact + set_fact: + logstash_queue_type: "{{ ((_logstash_device_info['rotational'] | int) != 1) | ternary('persisted', 'memory') }}" + rescue: + - name: Set persisted queue fact (fallback) + set_fact: + logstash_queue_type: memory + when: + - logstash_queue_type is undefined + +- name: Systemd memory backed queue block + block: + - name: Get logstash UID + command: id -u logstash + register: logstash_uid + changed_when: false + when: + - ansible_service_mgr == 'systemd' + + - name: Get logstash GID + command: id -g logstash + register: logstash_gid + changed_when: false + when: + - ansible_service_mgr == 'systemd' + + - name: Read logstash queue path + command: "readlink -f /var/lib/logstash/queue" + register: logstash_queue_path + changed_when: false + + - name: Run the systemd mount role + include_role: + name: systemd_mount + vars: + systemd_mounts: + - what: "tmpfs" + where: "{{ logstash_queue_path.stdout.strip() }}" + type: "tmpfs" + options: "size={{ (elastic_memory_lower_limit | int) // 2 }}m,uid={{ logstash_uid.stdout }},gid={{ logstash_gid.stdout }},nodev,nodiratime,noatime" + unit: + Before: + - logstash.service + state: 'started' + enabled: true + when: + - ansible_service_mgr == 'systemd' + + - name: Apply fstab options for memory queues + mount: + path: "{{ logstash_queue_path.stdout.strip() }}" + src: tmpfs + fstype: tmpfs + opts: size={{ (elastic_memory_lower_limit | int) // 2 }}m + state: mounted + when: + - ansible_service_mgr != 'systemd' + when: + - logstash_queue_type == 'memory' + +- name: Create patterns directory + file: + name: "/opt/logstash/patterns" + owner: "logstash" + group: "logstash" + state: directory + tags: + - logstash-patterns + +- name: Logstash Extra Patterns + template: + src: "extras" + dest: "/opt/logstash/patterns/extras" + owner: "logstash" + group: "logstash" + when: + - logstash_deploy_filters + notify: + - Enable and restart logstash + tags: + - logstash-filters + - config + +- name: Run kafka ssl deployment + include_tasks: logstash_kafka_ssl.yml + when: + - logstash_kafka_options is defined + - logstash_kafka_ssl_keystore_location is defined + +- name: Drop logstash conf file(s) + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "logstash.yml.j2" + dest: "/etc/logstash/logstash.yml" + - src: "logstash-pipelines.yml.j2" + dest: "/etc/logstash/pipelines.yml" + notify: + - Enable and restart logstash + tags: + - config + +- name: Ensure logstash ownership + file: + path: "/var/lib/logstash/" + owner: logstash + group: logstash + recurse: true + register: l_perms + until: l_perms is success + retries: 3 + delay: 1 + +- name: Ensure logstash tmp dir + file: + path: "/var/lib/logstash/tmp" + state: directory + owner: "logstash" + group: "logstash" + mode: "0750" + +- name: Deploy arcsight collector + include_tasks: logstash_arcsight.yml + when: + - logstash_arcsight_smart_connectors or + logstash_arcsight_event_brokers diff --git a/elk_metrics_7x/roles/elastic_logstash/templates/extras b/elk_metrics_7x/roles/elastic_logstash/templates/extras new file mode 100644 index 00000000..8d9454de --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/templates/extras @@ -0,0 +1,10 @@ +APACHE_ERROR_TIMESTAMP %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} +NGINX_ERROR_TIMESTAMP %{YEAR}/%{MONTHNUM}/%{MONTHDAY} %{TIME} +NGINX_TIMESTAMP %{YEAR}/%{MONTHNUM}/%{MONTHDAY}:%{TIME} + +SWIFTPROXY_DATE %{MONTHDAY}/%{MONTH}/%{YEAR}/%{HOUR}/%{MINUTE}/%{SECOND} + +SWIFTPROXY_ACCESS %{DATA:clientip} %{DATA:serverip} %{SWIFTPROXY_DATE:timestamp} %{WORD:verb} %{NOTSPACE:request} HTTP/%{NUMBER:httpversion} %{NUMBER:response} %{DATA:referrer} %{DATA:agent} %{DATA:swift_auth_token} %{DATA:swift_request_bytes} %{DATA:swift_response_bytes} %{DATA:swift_etag} %{DATA:swift_txn} %{DATA:swift_logged_headers} %{BASE10NUM:swift_trans_time} + +KEYSTONE_SUBSECOND_TIMESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME} +STANDARD_TIMESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME} diff --git a/elk_metrics_7x/roles/elastic_logstash/templates/logstash.yml.j2 b/elk_metrics_7x/roles/elastic_logstash/templates/logstash.yml.j2 new file mode 100644 index 00000000..f4d0559e --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/templates/logstash.yml.j2 @@ -0,0 +1,259 @@ +# Settings file in YAML +# +# Settings can be specified either in hierarchical form, e.g.: +# +# pipeline: +# batch: +# size: 125 +# delay: 5 +# +# Or as flat keys: +# +# pipeline.batch.size: 125 +# pipeline.batch.delay: 5 +# +# ------------ Node identity ------------ +# +# Use a descriptive name for the node: +# +node.name: {{ inventory_hostname }} +# +# If omitted the node name will default to the machine's host name +# +# ------------ Data path ------------------ +# +# Which directory should be used by logstash and its plugins +# for any persistent needs. Defaults to LOGSTASH_HOME/data +# +path.data: /var/lib/logstash +# +# ------------ Pipeline Settings -------------- +# +# The ID of the pipeline. +# +# pipeline.id: main +# +# Set the number of workers that will, in parallel, execute the filters+outputs +# stage of the pipeline. +# +# This defaults to the number of the host's CPU cores. +# + +{% set _d_processors = ((ansible_processor_count | int) * 3) %} +{% set _processors = ((_d_processors | int) > 0) | ternary(_d_processors, 2) %} +{% set _t_processors = (_processors | int) + (ansible_processor_count | int) %} +{% set processors = ((_t_processors | int) > 64) | ternary(64, _t_processors) %} +pipeline.workers: {{ processors | int }} +# +# How many events to retrieve from inputs before sending to filters+workers +# +pipeline.batch.size: 256 +# +# How long to wait in milliseconds while polling for the next event +# before dispatching an undersized batch to filters+outputs +# +pipeline.batch.delay: 64 +# +# Force Logstash to exit during shutdown even if there are still inflight +# events in memory. By default, logstash will refuse to quit until all +# received events have been pushed to the outputs. +# +# WARNING: enabling this can lead to data loss during shutdown +# +# pipeline.unsafe_shutdown: false +# +# ------------ Pipeline Configuration Settings -------------- +# +# Where to fetch the pipeline configuration for the main pipeline +# +# path.config: +# +# Pipeline configuration string for the main pipeline +# +# config.string: +# +# At startup, test if the configuration is valid and exit (dry run) +# +# config.test_and_exit: false +# +# Periodically check if the configuration has changed and reload the pipeline +# This can also be triggered manually through the SIGHUP signal +# +# config.reload.automatic: false +# +# How often to check if the pipeline configuration has changed (in seconds) +# +# config.reload.interval: 3s +# +# Show fully compiled configuration as debug log message +# NOTE: --log.level must be 'debug' +# +# config.debug: false +# +# When enabled, process escaped characters such as \n and \" in strings in the +# pipeline configuration files. +# +# config.support_escapes: false +# +# ------------ Module Settings --------------- +# Define modules here. Modules definitions must be defined as an array. +# The simple way to see this is to prepend each `name` with a `-`, and keep +# all associated variables under the `name` they are associated with, and +# above the next, like this: +# +# modules: +# - name: MODULE_NAME +# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE +# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE +# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE +# +# Module variable names must be in the format of +# +# var.PLUGIN_TYPE.PLUGIN_NAME.KEY +# +# modules: +# +# ------------ Cloud Settings --------------- +# Define Elastic Cloud settings here. +# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy +# and it may have an label prefix e.g. staging:dXMtZ... +# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host' +# cloud.id: +# +# Format of cloud.auth is: : +# This is optional +# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password' +# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password' +# cloud.auth: elastic: +# +# ------------ Queuing Settings -------------- +# +# Internal queuing model, "memory" for legacy in-memory based queuing and +# "persisted" for disk-based acked queueing. Defaults is memory +# +queue.type: persisted +# +# If using queue.type: persisted, the directory path where the data files will be stored. +# Default is path.data/queue +# +# path.queue: +# +# If using queue.type: persisted, the page data files size. The queue data consists of +# append-only data files separated into pages. Default is 64mb +# +# queue.page_capacity: 64mb +# +# If using queue.type: persisted, the maximum number of unread events in the queue. +# Default is 0 (unlimited) +# +# queue.max_events: 0 +# +# If using queue.type: persisted, the total capacity of the queue in number of bytes. +# If you would like more unacked events to be buffered in Logstash, you can increase the +# capacity using this setting. Please make sure your disk drive has capacity greater than +# the size specified here. If both max_bytes and max_events are specified, Logstash will pick +# whichever criteria is reached first +# Default is 1024mb or 1gb +# +{% if logstash_queue_type == 'memory' %} +# An in memory queue is being used. The actual size of the queue is 90% of the +# total memory limit, which is set using 50% of the heap size. +{% set _memory_queue_size = ((elastic_memory_lower_limit | int) // 2) %} +{% set _memory_queue_size_buffer = (((_memory_queue_size | int) * 0.1) | int) %} +queue.max_bytes: {{ (_memory_queue_size | int) - (_memory_queue_size_buffer | int) }}mb +{% else %} +queue.max_bytes: {{ logstash_queue_size }}mb +{% endif %} +# +# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.acks: 1024 +# +# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint +# Default is 1024, 0 for unlimited +# +# queue.checkpoint.writes: 1024 +# +# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page +# Default is 1000, 0 for no periodic checkpoint. +# +# queue.checkpoint.interval: 1000 +# +# ------------ Dead-Letter Queue Settings -------------- +# Flag to turn on dead-letter queue. +# +# dead_letter_queue.enable: false + +# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries +# will be dropped if they would increase the size of the dead letter queue beyond this setting. +# Default is 1024mb +# dead_letter_queue.max_bytes: 1024mb + +# If using dead_letter_queue.enable: true, the directory path where the data files will be stored. +# Default is path.data/dead_letter_queue +# +# path.dead_letter_queue: +# +# ------------ Metrics Settings -------------- +# +# Bind address for the metrics REST endpoint +# +# http.host: "127.0.0.1" +# +# Bind port for the metrics REST endpoint, this option also accept a range +# (9600-9700) and logstash will pick up the first available ports. +# +# http.port: 9600-9700 +# +# ------------ Debugging Settings -------------- +# +# Options for log.level: +# * fatal +# * error +# * warn +# * info (default) +# * debug +# * trace +# +# log.level: info +path.logs: /var/log/logstash +# +# ------------ Other Settings -------------- +# +# Where to find custom plugins +# path.plugins: [] +# +# ---------------------------------- X-Pack ------------------------------------ +# X-Pack Monitoring +# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html +xpack.monitoring.enabled: true +#xpack.monitoring.elasticsearch.username: logstash_system +#xpack.monitoring.elasticsearch.password: password +xpack.monitoring.elasticsearch.url: ["127.0.0.1:9200"] +#xpack.monitoring.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ] +#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file +#xpack.monitoring.elasticsearch.ssl.truststore.password: password +#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file +#xpack.monitoring.elasticsearch.ssl.keystore.password: password +#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate +xpack.monitoring.elasticsearch.sniffing: {{ elastic_sniffing_enabled | default(false) }} +xpack.monitoring.collection.interval: 30s +xpack.monitoring.collection.pipeline.details.enabled: true +# +# ------------ X-Pack Settings (not applicable for OSS build)-------------- +# X-Pack Management +# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html +#xpack.management.enabled: false +#xpack.management.pipeline.id: ["main", "apache_logs"] +#xpack.management.elasticsearch.username: logstash_admin_user +#xpack.management.elasticsearch.password: password +#xpack.management.elasticsearch.url: ["https://es1:9200", "https://es2:9200"] +#xpack.management.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ] +#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file +#xpack.management.elasticsearch.ssl.truststore.password: password +#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file +#xpack.management.elasticsearch.ssl.keystore.password: password +#xpack.management.elasticsearch.sniffing: {{ elastic_sniffing_enabled | default(false) }} +#xpack.management.logstash.poll_interval: 5s diff --git a/elk_metrics_7x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 b/elk_metrics_7x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 new file mode 100644 index 00000000..a90cadcd --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/templates/systemd.logstash-mem-queue.conf.j2 @@ -0,0 +1,2 @@ +[Unit] +Requires = logstash-mem-queue.service diff --git a/elk_metrics_7x/roles/elastic_logstash/vars/redhat.yml b/elk_metrics_7x/roles/elastic_logstash/vars/redhat.yml new file mode 100644 index 00000000..0d0e66ee --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/vars/redhat.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +logstash_distro_packages: + - logrotate + - logstash + +logstash_sysconfig_path: /etc/default/logstash diff --git a/elk_metrics_7x/roles/elastic_logstash/vars/suse.yml b/elk_metrics_7x/roles/elastic_logstash/vars/suse.yml new file mode 100644 index 00000000..0d0e66ee --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/vars/suse.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +logstash_distro_packages: + - logrotate + - logstash + +logstash_sysconfig_path: /etc/default/logstash diff --git a/elk_metrics_7x/roles/elastic_logstash/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_logstash/vars/ubuntu.yml new file mode 100644 index 00000000..0d0e66ee --- /dev/null +++ b/elk_metrics_7x/roles/elastic_logstash/vars/ubuntu.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +logstash_distro_packages: + - logrotate + - logstash + +logstash_sysconfig_path: /etc/default/logstash diff --git a/elk_metrics_7x/roles/elastic_metricbeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_metricbeat/defaults/main.yml new file mode 100644 index 00000000..1d5b039d --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/defaults/main.yml @@ -0,0 +1,24 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#metricbeats monitoring endpoints +elastic_metricbeat_rabbitmq_monitoring_hosts: '"localhost:15672"' +elastic_metricbeat_haproxy_monitoring_hosts: '"unix:///var/run/haproxy.stat"' + +metricbeat_service_state: restarted + +# Inventory group to configure metricbeat ceph monitoring +# via either ceph-restapi or mgr prometheus module +ceph_metricbeat_group: cinder_volume diff --git a/elk_metrics_7x/roles/elastic_metricbeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_metricbeat/handlers/main.yml new file mode 100644 index 00000000..08425049 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart metricbeat (systemd) + systemd: + name: "metricbeat" + enabled: true + state: "{{ metricbeat_service_state }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart metricbeat + +- name: Enable and restart metricbeat (upstart) + service: + name: "metricbeat" + state: "{{ metricbeat_service_state }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart metricbeat diff --git a/elk_metrics_7x/roles/elastic_metricbeat/meta/main.yml b/elk_metrics_7x/roles/elastic_metricbeat/meta/main.yml new file mode 100644 index 00000000..c26b3847 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x metricbeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_metricbeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_metricbeat/tasks/main.yml new file mode 100644 index 00000000..6ca09fb6 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/tasks/main.yml @@ -0,0 +1,335 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Refresh physical host facts + setup: {} + delegate_to: "{{ physical_host }}" + delegate_facts: true + when: + - physical_host is defined and physical_host != inventory_hostname + tags: + - always + +- name: Ensure beat is installed + package: + name: "{{ metricbeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + when: + - ansible_architecture == 'x86_64' + notify: + - Enable and restart metricbeat + tags: + - package_install + +- name: Ensure beat is installed (aarch64) + apt: + deb: 'https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/8709ca2640344a4ba85cba0a1d6eea69/aarch64/metricbeat-6.5.0-arm64.deb' + when: + - ansible_pkg_mgr == 'apt' + - ansible_architecture == 'aarch64' + notify: + - Enable and restart metricbeat + tags: + - package_install + +- name: Check for apache + stat: + path: /etc/apache2/sites-available + register: apache2 + +- name: Check for ceph + stat: + path: /etc/ceph + register: ceph + +# gather ceph stats from localhost +# except when a list of mons is provided +- name: Set ceph stats hosts + set_fact: + ceph_stats_hosts: |- + {% set ceph_stats = [] %} + {% if (ceph_mons is defined) and (ceph_mons | length > 0) %} + {% for mon in ceph_mons %} + {% set _ = ceph_stats.insert(loop.index, (mon + ":5000")) %} + {% endfor %} + {% else %} + {% set ceph_stats = [ ansible_hostname + ":5000" ] %} + {% endif %} + {{ ceph_stats }} + +- name: Check for Ceph restapi metric port + wait_for: + port: "{{ item | regex_replace('^.+:', '') | int }}" + delay: 2 + timeout: 5 + host: "{{ item | regex_replace(':\\d+$', '') }}" + with_items: "{{ ceph_stats_hosts }}" + when: ceph.stat.exists + register: ceph_restapi_port_check + ignore_errors: yes + +- name: Set ceph_restapi_listening + set_fact: + ceph_restapi_listening: true + when: not (item.failed | default(true)) + with_items: "{{ ceph_restapi_port_check.results }}" + +- name: Check for Ceph prometheus metric port + wait_for: + port: 9283 + delay: 2 + timeout: 5 + host: "{{ item | regex_replace(':\\d+$', '') }}" + with_items: "{{ ceph_stats_hosts }}" + when: ceph.stat.exists + register: ceph_prometheus_port_check + ignore_errors: yes + +- name: Set ceph_prometheus_listening + set_fact: + ceph_prometheus_listening: true + when: not (item.failed | default(true)) + with_items: "{{ ceph_prometheus_port_check.results }}" + +- name: Check for etcd + stat: + path: /etc/etcd + register: etcd + +- name: Check for docker + stat: + path: /var/run/docker.sock + register: docker + +- name: Check for haproxy + stat: + path: /etc/haproxy + register: haproxy + +- name: Check for httpd + stat: + path: /etc/httpd + register: httpd + +- name: Check for kvm + stat: + path: /var/run/libvirt/libvirt-sock + register: kvm + +- name: Check for memcached + stat: + path: /etc/memcached.conf + register: memcached + +- name: Check for mysql + stat: + path: /var/lib/mysql + register: mysql + +- name: Check for nginx + stat: + path: /etc/nginx/nginx.conf + register: nginx + +- name: Check for rabbitmq + stat: + path: /var/lib/rabbitmq + register: rabbitmq + +- name: Check for uwsgi + stat: + path: /etc/uwsgi + register: uwsgi + +- name: Check for uwsgi stats sockets + find: + paths: /tmp + file_type: any + patterns: '*uwsgi-stats.sock' + register: uwsgi_find_sockets + +- name: Set discovery facts + set_fact: + apache_enabled: "{{ (apache2.stat.exists | bool) or (httpd.stat.exists | bool) }}" + + # Only enable ceph if something is listening on the ceph-rest-api port + # enable ceph on: cinder volume hosts when we have a list of ceph mons + # otherwise: all hosts which have /etc/ceph + ceph_restapi_enabled: |- + {% set ceph_detect = false %} + {% if ceph_restapi_listening is defined %} + {% if (ceph_mons is defined) and (ceph_mons | length > 0) and (inventory_hostname in groups[ceph_metricbeat_group]) %} + {% set ceph_detect = true %} + {% else %} + {% set ceph_detect = ceph.stat.exists | bool %} + {% endif %} + {% endif %} + {{ ceph_detect }} + + ceph_prometheus_enabled: |- + {% set ceph_detect = false %} + {% if ceph_prometheus_listening is defined and (inventory_hostname in groups[ceph_metricbeat_group]) %} + {% set ceph_detect = true %} + {% endif %} + {{ ceph_detect }} + + + docker_enabled: "{{ docker.stat.exists | bool }}" + etcd_enabled: "{{ etcd.stat.exists | bool }}" + haproxy_enabled: "{{ haproxy.stat.exists | bool }}" + kvm_enabled: "{{ kvm.stat.exists | bool }}" + memcached_enabled: "{{ memcached.stat.exists | bool }}" + mysql_enabled: "{{ mysql.stat.exists | bool }}" + nginx_enabled: "{{ nginx.stat.exists | bool }}" + rabbitmq_enabled: "{{ rabbitmq.stat.exists | bool }}" + uwsgi_enabled: "{{ uwsgi.stat.exists | bool }}" + uwsgi_sockets: "{{ uwsgi_find_sockets }}" + +# Apache 2 stats enablement +- name: Enable apache2 + block: + - name: Drop apache2 stats site config + template: + src: apache-status.conf.j2 + dest: /etc/apache2/sites-available/apache-status.conf + + - name: Enable apache2 stats site + file: + src: /etc/apache2/sites-available/apache-status.conf + dest: /etc/apache2/sites-enabled/apache-status.conf + state: link + + - name: Ensure apache2 stats mode is enabled + apache2_module: + name: status + state: present + register: apache_status_mod + + - name: Reload apache2 + service: + name: apache2 + state: reloaded + when: + - apache_status_mod is changed + rescue: + - name: Apache2 monitoring not enabled + debug: + msg: >- + The apache2 module was not enabled because of an error within the + enablement process. Check the host to ensure apache2 is really + available and resolve the noted errors before continuing. + + - name: Disable apache2 check + set_fact: + apache_enabled: false + when: + - apache_enabled | bool + + +# NGINX stats enablement +- name: Drop nginx stats site config + template: + src: nginx-status.conf.j2 + dest: "{{ metricbeat_nginx_vhost_path }}/nginx-status.conf" + register: nginx_status + when: nginx_enabled + +- name: Reload nginx + service: + name: nginx + state: reloaded + when: + - nginx_enabled + - nginx_status is changed + +- name: Create metricbeat systemd service config dir + file: + path: "/etc/systemd/system/metricbeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/metricbeat.service.d/metricbeat-overrides.conf" + notify: + - Enable and restart metricbeat + +- name: Drop metricbeat conf files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + with_items: + - src: "metricbeat.yml.j2" + dest: "/etc/metricbeat/metricbeat.yml" + notify: + - Enable and restart metricbeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "metricbeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set metricbeat service state (upstart) + service: + name: "metricbeat" + state: "{{ metricbeat_service_state }}" + enabled: "{{ metricbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - metricbeat_service_state in ['started', 'stopped'] + +- name: set metricbeat service state (systemd) + systemd: + name: "metricbeat" + state: "{{ metricbeat_service_state }}" + enabled: "{{ metricbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - metricbeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_metricbeat/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_metricbeat/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_metricbeat/templates/apache-status.conf.j2 b/elk_metrics_7x/roles/elastic_metricbeat/templates/apache-status.conf.j2 new file mode 100644 index 00000000..afedf443 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/templates/apache-status.conf.j2 @@ -0,0 +1,7 @@ +Listen 127.0.1.1:18181 + + + + SetHandler server-status + + diff --git a/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 b/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 new file mode 100644 index 00000000..1f9d9eff --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/templates/metricbeat.yml.j2 @@ -0,0 +1,1297 @@ +{% import 'templates/_macros.j2' as elk_macros %} +########################## Metricbeat Configuration ########################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see metricbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/metricbeat/index.html + +#============================ Config Reloading =============================== + +# Config reloading allows to dynamically load modules. Each file which is +# monitored must contain one or multiple modules as a list. +metricbeat.config.modules: + + # Glob pattern for configuration reloading + path: ${path.config}/conf.d/*.yml + + # Period on which files under path should be checked for changes + reload.period: 30s + + # Set to true to enable config reloading + reload.enabled: false + +# Maximum amount of time to randomly delay the start of a metricset. Use 0 to +# disable startup delay. +metricbeat.max_start_delay: 10s + +#============================== Autodiscover =================================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# as they happen. + +#metricbeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: etcd +# config: +# - module: etcd +# metricsets: ["leader", "self", "store"] +# period: 30s +# hosts: ["${host}:2379"] + +#========================== Modules configuration ============================ + +{% set metric_sets = ['network', 'process', 'process_summary', 'uptime'] %} +{% if physical_host is defined and physical_host != inventory_hostname %} +{% set host_mount_devices = (hostvars[physical_host]['ansible_mounts'] | map(attribute='device') | list) %} +{% set container_mount_devices = (ansible_mounts | map(attribute='device') | list) %} +{% if (((container_mount_devices | difference(host_mount_devices)) | length) > 0) | bool %} +{% set _ = metric_sets.extend(['filesystem', 'fsstat']) %} +{% endif %} +{% else %} +{% set _ = metric_sets.extend(['cpu', 'load', 'memory', 'core', 'diskio', 'raid', 'socket', 'filesystem', 'fsstat']) %} +{% endif %} +metricbeat.modules: +#------------------------------- System Module ------------------------------- +# metricsets: +# - cpu # CPU usage +# - filesystem # File system usage for each mountpoint +# - fsstat # File system summary metrics +# - load # CPU load averages +# - memory # Memory usage +# - network # Network IO +# - process # Per process metrics +# - process_summary # Process summary +# - uptime # System Uptime +# - core # Per CPU core usage +# - diskio # Disk IO +# - raid # Raid +# - socket # Sockets and connection info (linux only) +- module: system + metricsets: {{ metric_sets }} + enabled: true + period: 60s + processes: ['.*'] + + # Configure the metric types that are included by these metricsets. + cpu.metrics: ["percentages"] # The other available options are normalized_percentages and ticks. + core.metrics: ["percentages"] # The other available option is ticks. + + # A list of filesystem types to ignore. The filesystem metricset will not + # collect data from filesystems matching any of the specified types, and + # fsstats will not include data from these filesystems in its summary stats. + #filesystem.ignore_types: [] + + # These options allow you to filter out all processes that are not + # in the top N by CPU or memory, in order to reduce the number of documents created. + # If both the `by_cpu` and `by_memory` options are used, the union of the two sets + # is included. + process.include_top_n: + # + # Set to false to disable this feature and include all processes + enabled: true + + # How many processes to include from the top by CPU. The processes are sorted + # by the `system.process.cpu.total.pct` field. + by_cpu: 20 + + # How many processes to include from the top by memory. The processes are sorted + # by the `system.process.memory.rss.bytes` field. + by_memory: 20 + + # If false, cmdline of a process is not cached. + process.cmdline.cache.enabled: true + + # Enable collection of cgroup metrics from processes on Linux. + process.cgroups.enabled: {{ ansible_service_mgr == 'systemd' }} + + # A list of regular expressions used to whitelist environment variables + # reported with the process metricset's events. Defaults to empty. + #process.env.whitelist: [] + + # Include the cumulative CPU tick values with the process metrics. Defaults + # to false. + #process.include_cpu_ticks: false + + # Configure reverse DNS lookup on remote IP addresses in the socket metricset. + socket.reverse_lookup.enabled: true + socket.reverse_lookup.success_ttl: 60s + socket.reverse_lookup.failure_ttl: 60s + +##------------------------------ Aerospike Module ----------------------------- +#- module: aerospike +# metricsets: ["namespace"] +# enabled: false +# period: 30s +# hosts: ["localhost:3000"] +# +##------------------------------- Apache Module ------------------------------- +{% if apache_enabled | default(false) | bool %} +- module: apache + metricsets: ["status"] + enabled: true + period: 30s +# +# # Apache hosts + hosts: ["http://127.0.1.1:18181"] +# +# # Path to server status. Default server-status +# #server_status_path: "server-status" +# +# # Username of hosts. Empty by default +# #username: username +# +# # Password of hosts. Empty by default +# #password: password +{% endif %} +# +#-------------------------------- Ceph Module -------------------------------- +{% if ceph_restapi_enabled | default(false) | bool %} +- module: ceph + metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"] + enabled: true + period: 30s + hosts: {{ ceph_stats_hosts | to_json }} +{% endif %} +# +##------------------------------ Couchbase Module ----------------------------- +#- module: couchbase +# metricsets: ["bucket", "cluster", "node"] +# period: 30s +# hosts: ["localhost:8091"] +# +##------------------------------- Docker Module ------------------------------- +{% if docker_enabled | default(false) | bool %} +- module: docker + metricsets: ["container", "cpu", "diskio", "healthcheck", "info", "memory", "network"] + enabled: true + hosts: ["unix:///var/run/docker.sock"] + period: 30s + + # To connect to Docker over TLS you must specify a client and CA certificate. + #ssl: + #certificate_authority: "/etc/pki/root/ca.pem" + #certificate: "/etc/pki/client/cert.pem" + #key: "/etc/pki/client/cert.key" +{% endif %} +# +##----------------------------- Dropwizard Module ----------------------------- +#- module: dropwizard +# metricsets: ["collector"] +# period: 30s +# hosts: ["localhost:8080"] +# metrics_path: /metrics/metrics +# namespace: example +# +##---------------------------- Elasticsearch Module --------------------------- +{% if inventory_hostname in (groups['elastic-logstash'] | union(groups['kibana'])) %} +- module: elasticsearch + metricsets: + - node + - node_stats + - index + - index_recovery + - index_summary + - shard + - ml_job + enabled: true + period: 30s + hosts: ["localhost:{{ elastic_port }}"] +{% endif %} + +##----------------------------- envoyproxy Module ----------------------------- +- module: envoyproxy + metricsets: ["server"] + period: 10s + hosts: ["localhost:9901"] + +##-------------------------------- Etcd Module -------------------------------- +{% if etcd_enabled | default(false) | bool %} +- module: etcd + metricsets: ["leader", "self", "store"] + enabled: true + period: 30s + hosts: ["localhost:2379"] +{% endif %} +# +##------------------------------- Golang Module ------------------------------- +#- module: golang +# metricsets: ["expvar","heap"] +# period: 30s +# hosts: ["localhost:6060"] +# heap.path: "/debug/vars" +# expvar: +# namespace: "example" +# path: "/debug/vars" +# +##------------------------------ Graphite Module ------------------------------ +#- module: graphite +# metricsets: ["server"] +# enabled: true +## protocol: "udp" +## templates: +## - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats +## namespace: "test" +## template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash +## delimiter: "_" +# +# +##------------------------------- HAProxy Module ------------------------------ +{% if haproxy_enabled | default(false) | bool %} +- module: haproxy + metricsets: ["info", "stat"] + enabled: true + period: 30s + hosts: [ {{ elastic_metricbeat_haproxy_monitoring_hosts }} ] +{% endif %} +# +##-------------------------------- HTTP Module -------------------------------- +#- module: http +# metricsets: ["json"] +# period: 30s +# hosts: ["localhost:80"] +# namespace: "json_namespace" +# path: "/" +# #body: "" +# #method: "GET" +# #request.enabled: false +# #response.enabled: false +# #dedot.enabled: false +# + +##------------------------------- Jolokia Module ------------------------------ +#- module: jolokia +# metricsets: ["jmx"] +# period: 30s +# hosts: ["localhost"] +# namespace: "metrics" +# path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" +# jmx.mapping: +# jmx.application: +# jmx.instance: +# +##-------------------------------- Kafka Module ------------------------------- +#- module: kafka +# metricsets: ["partition"] +# period: 30s +# hosts: ["localhost:9092"] +# +# #client_id: metricbeat +# #retries: 3 +# #backoff: 250ms +# +# # List of Topics to query metadata for. If empty, all topics will be queried. +# #topics: [] +# +# # Optional SSL. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # SASL authentication +# #username: "" +# #password: "" +# +##------------------------------- Kibana Module ------------------------------- +{% if inventory_hostname in groups['kibana'] | default([]) %} +- module: kibana + metricsets: ["status"] + enabled: true + period: 30s + hosts: ["localhost:{{ kibana_port }}"] +{% endif %} +# +##----------------------------- Kubernetes Module ----------------------------- +# Node metrics, from kubelet: +#- module: kubernetes +# metricsets: +# - container +# - node +# - pod +# - system +# - volume +# period: 10s +# hosts: ["localhost:10255"] +# enabled: true +# #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token +# #ssl.certificate_authorities: +# # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt +# #ssl.certificate: "/etc/pki/client/cert.pem" +# #ssl.key: "/etc/pki/client/cert.key" +# +# # Enriching parameters: +# add_metadata: true +# in_cluster: true +# # When used outside the cluster: +# #host: node_name +# #kube_config: ~/.kube/config +# +## State metrics from kube-state-metrics service: +#- module: kubernetes +# enabled: true +# metricsets: +# - state_node +# - state_deployment +# - state_replicaset +# - state_statefulset +# - state_pod +# - state_container +# period: 10s +# hosts: ["kube-state-metrics:8080"] +# +# # Enriching parameters: +# add_metadata: true +# in_cluster: true +# # When used outside the cluster: +# #host: node_name +# #kube_config: ~/.kube/config +# +## Kubernetes events +#- module: kubernetes +# enabled: true +# metricsets: +# - event +# +## Kubernetes API server +#- module: kubernetes +# enabled: true +# metricsets: +# - apiserver +# hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"] + +##--------------------------------- kvm Module -------------------------------- +{% if kvm_enabled | default(false) | bool %} +- module: kvm + metricsets: ["dommemstat"] + enabled: true + period: 10s + hosts: ["unix:///var/run/libvirt/libvirt-sock"] + # For remote hosts, setup network access in libvirtd.conf + # and use the tcp scheme: + # hosts: [ "tcp://:16509" ] + + # Timeout to connect to Libvirt server + #timeout: 1s +{% endif %} + +##------------------------------ Logstash Module ------------------------------ +{% if inventory_hostname in groups['elastic-logstash'] | default([]) %} +- module: logstash + metricsets: ["node", "node_stats"] + enabled: true + period: 30s + hosts: ["localhost:9600"] +{% endif %} +# +##------------------------------ Memcached Module ----------------------------- +{% if memcached_enabled | default(false) | bool %} +- module: memcached + metricsets: ["stats"] + enabled: true + period: 30s + hosts: ["{{ ansible_hostname }}:11211"] +{% endif %} +# +##------------------------------- MongoDB Module ------------------------------ +#- module: mongodb +# metricsets: ["dbstats", "status"] +# period: 30s +# +# # The hosts must be passed as MongoDB URLs in the format: +# # [mongodb://][user:pass@]host[:port]. +# # The username and password can also be set using the respective configuration +# # options. The credentials in the URL take precedence over the username and +# # password configuration options. +# hosts: ["localhost:27017"] +# +# # Username to use when connecting to MongoDB. Empty by default. +# #username: user +# +# # Password to use when connecting to MongoDB. Empty by default. +# #password: pass +# +##-------------------------------- Munin Module ------------------------------- +#- module: munin +# metricsets: ["node"] +# enabled: true +# period: 10s +# hosts: ["localhost:4949"] +# node.namespace: node +# +##-------------------------------- MySQL Module ------------------------------- +{% if (mysql_enabled | default(false) | bool) and galera_root_user is defined and galera_root_password is defined %} +- module: mysql + metricsets: ["status"] + enabled: true + period: 30s +# +# # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/" +# # The username and password can either be set in the DSN or using the username +# # and password config options. Those specified in the DSN take precedence. + hosts: ["{{ galera_root_user }}:{{ galera_root_password }}@tcp({{ ansible_hostname }}:3306)/"] +# +# # Username of hosts. Empty by default. + username: {{ galera_root_user }} +# +# # Password of hosts. Empty by default. + password: {{ galera_root_password }} +# +# # By setting raw to true, all raw fields from the status metricset will be added to the event. +# #raw: false +# +{% endif %} +# +##-------------------------------- Nginx Module ------------------------------- +{% if nginx_enabled | default(false) | bool %} +- module: nginx + metricsets: ["stubstatus"] + enabled: true + period: 30s + + # Nginx hosts + hosts: ["http://127.0.1.1:18182"] + + # Path to server status. Default server-status + server_status_path: "server-status" +{% endif %} +# +##------------------------------- PHP_FPM Module ------------------------------ +#- module: php_fpm +# metricsets: ["pool"] +# period: 30s +# status_path: "/status" +# hosts: ["localhost:8080"] +# +##----------------------------- PostgreSQL Module ----------------------------- +#- module: postgresql +# metricsets: +# # Stats about every PostgreSQL database +# - database +# +# # Stats about the background writer process's activity +# - bgwriter +# +# # Stats about every PostgreSQL process +# - activity +# +# period: 30s +# +# # The host must be passed as PostgreSQL URL. Example: +# # postgres://localhost:5432?sslmode=disable +# # The available parameters are documented here: +# # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters +# hosts: ["postgres://localhost:5432"] +# +# # Username to use when connecting to PostgreSQL. Empty by default. +# #username: user +# +# # Password to use when connecting to PostgreSQL. Empty by default. +# #password: pass +# +##----------------------------- Prometheus Module ----------------------------- +{% if (prometheus_enabled | default(false) | bool) and (prometheus_config is defined) %} +{% for prometheus in prometheus_config %} +- module: prometheus + metricsets: [{% for mset in prometheus.metricsets|default(["collector"]) %}"{{ mset }}"{% if not loop.last %},{% endif %}{% endfor %}] + enabled: {{ prometheus.enabled | default('true') }} + period: {{ prometheus.period | default("10s") }} + hosts: [{% for phost in prometheus.hosts %}"{{ phost }}"{% if not loop.last %},{% endif %}{% endfor %}] + metrics_path: {{ prometheus.metrics_path | default("/metrics") }} + namespace: {{ prometheus.namespace }} +# +{% endfor %} +{% endif %} +{% if (ceph_prometheus_enabled | default(false) | bool) %} +- module: prometheus + metricsets: ["collector"] + enabled: 'true' + period: "10s" + hosts: [{% for phost in ceph_stats_hosts %}"{{ phost | regex_replace(':\\d+$', '') }}:9283"{% if not loop.last %},{% endif %}{% endfor %}] + metrics_path: "/metrics" + namespace: ceph +{% endif %} +# +##------------------------------ RabbitMQ Module ------------------------------ +{% if (rabbitmq_enabled | default(false) | bool) and (rabbitmq_monitoring_password is defined) %} +- module: rabbitmq + metricsets: ["node", "queue"] + enabled: true + period: 30s + hosts: [ {{ elastic_metricbeat_rabbitmq_monitoring_hosts }} ] + username: {{ rabbitmq_monitoring_userid | default('monitoring') }} + password: {{ rabbitmq_monitoring_password }} +{% endif %} +# +##-------------------------------- Redis Module ------------------------------- +#- module: redis +# metricsets: ["info", "keyspace"] +# period: 30s +# +# # Redis hosts +# hosts: ["127.0.0.1:6379"] +# +# # Timeout after which time a metricset should return an error +# # Timeout is by default defined as period, as a fetch of a metricset +# # should never take longer then period, as otherwise calls can pile up. +# #timeout: 1s +# +# # Optional fields to be added to each event +# #fields: +# # datacenter: west +# +# # Network type to be used for redis connection. Default: tcp +# #network: tcp +# +# # Max number of concurrent connections. Default: 10 +# #maxconn: 10 +# +# # Filters can be used to reduce the number of fields sent. +# #processors: +# # - include_fields: +# # fields: ["beat", "metricset", "redis.info.stats"] +# +# # Redis AUTH password. Empty by default. +# #password: foobared +# + +##------------------------------- traefik Module ------------------------------ +- module: traefik + metricsets: ["health"] + period: 10s + hosts: ["localhost:8080"] + +##-------------------------------- uwsgi Module ------------------------------- +{% if uwsgi_enabled | default(false) | bool %} +- module: uwsgi + metricsets: ["status"] + enabled: true + period: 30s +{% if uwsgi_sockets.matched | int > 0 %} + hosts: [{% for file in uwsgi_sockets.files %}"unix://{{ file.path }}"{% if not loop.last %},{% endif %}{% endfor %}] +{% else %} + hosts: ["tcp://127.0.0.1:9191"] +{% endif %} +{% endif %} +# +##------------------------------- vSphere Module ------------------------------ +#- module: vsphere +# metricsets: ["datastore", "host", "virtualmachine"] +# period: 30s +# hosts: ["https://localhost/sdk"] +# +# username: "user" +# password: "password" +# # If insecure is true, don't verify the server's certificate chain +# insecure: false +# # Get custom fields when using virtualmachine metric set. Default false. +# # get_custom_fields: false +# +# +##------------------------------- Windows Module ------------------------------ +#- module: windows +# metricsets: ["perfmon"] +# period: 30s +# perfmon.counters: +# +#- module: windows +# metricsets: ["service"] +# period: 60s +# +##------------------------------ ZooKeeper Module ----------------------------- +#- module: zookeeper +# metricsets: ["mntr"] +# period: 30s +# hosts: ["localhost:2181"] +# +# +# + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +processors: +- add_host_metadata: ~ + +##============================= Elastic Cloud ================================== +# +## These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). +# +## The cloud.id setting overwrites the `output.elasticsearch.hosts` and +## `setup.kibana.host` options. +## You can find the `cloud.id` in the Elastic Cloud web UI. +##cloud.id: +# +## The cloud.auth setting overwrites the `output.elasticsearch.username` and +## `output.elasticsearch.password` settings. The format is `:`. +##cloud.auth: +# +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: +# # Boolean flag to enable or disable the output module. +# #enabled: true +# +# # Array of hosts to connect to. +# # Scheme and port can be left out and will be set to the default (http and 9200) +# # In case you specify and additional path, the scheme is required: http://localhost:9200/path +# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 +# hosts: ["localhost:9200"] +# +# # Set gzip compression level. +# #compression_level: 0 +# +# # Optional protocol and basic auth credentials. +# #protocol: "https" +# #username: "elastic" +# #password: "changeme" +# +# # Dictionary of HTTP parameters to pass within the url with index operations. +# #parameters: +# #param1: value1 +# #param2: value2 +# +# # Number of workers per Elasticsearch host. +# #worker: 1 +# +# # Optional index name. The default is "metricbeat" plus date +# # and generates [metricbeat-]YYYY.MM.DD keys. +# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. +# #index: "metricbeat-%{[beat.version]}-%{+yyyy.MM.dd}" +# +# # Optional ingest node pipeline. By default no pipeline will be used. +# #pipeline: "" +# +# # Optional HTTP Path +# #path: "/elasticsearch" +# +# # Custom HTTP headers to add to each request +# #headers: +# # X-My-Header: Contents of the header +# +# # Proxy server url +# #proxy_url: http://proxy:3128 +# +# # The number of times a particular Elasticsearch index operation is attempted. If +# # the indexing operation doesn't succeed after this many retries, the events are +# # dropped. The default is 3. +# #max_retries: 3 +# +# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. +# # The default is 50. +# #bulk_max_size: 50 +# +# # Configure http request timeout before failing an request to Elasticsearch. +# #timeout: 90 +# +# # Use SSL settings for HTTPS. +# #ssl.enabled: true +# +# # Configure SSL verification mode. If `none` is configured, all server hosts +# # and certificates will be accepted. In this mode, SSL based connections are +# # susceptible to man-in-the-middle attacks. Use only for testing. Default is +# # `full`. +# #ssl.verification_mode: full +# +# # List of supported/valid TLS versions. By default all TLS versions 1.0 up to +# # 1.2 are enabled. +# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] +# +# # SSL configuration. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # Optional passphrase for decrypting the Certificate Key. +# #ssl.key_passphrase: '' +# +# # Configure cipher suites to be used for SSL connections +# #ssl.cipher_suites: [] +# +# # Configure curve types for ECDHE based cipher suites +# #ssl.curve_types: [] +# +# # Configure what types of renegotiation are supported. Valid options are +# # never, once, and freely. Default is never. +# #ssl.renegotiation: never +# +# +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version metricbeat is assumed to run against. Defaults to the oldest + # supported stable version (currently version 0.8.2.0) + #version: 0.8.2 + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is metricbeat. + #key: metricbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/metricbeat" + + # Name of the generated files. The default is `metricbeat` and it generates + # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + #filename: metricbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every metricbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the metricbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the metricbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the metricbeat installation. This is the default base path +# for all the files in which metricbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a metricbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('metricbeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('metricbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#================================ Kibana ======================================= +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('metricbeat') }} + +#============================== Xpack Monitoring =============================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ================================ +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/elk_metrics_7x/roles/elastic_metricbeat/templates/nginx-status.conf.j2 b/elk_metrics_7x/roles/elastic_metricbeat/templates/nginx-status.conf.j2 new file mode 100644 index 00000000..47291c4a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/templates/nginx-status.conf.j2 @@ -0,0 +1,9 @@ +server { + listen 127.0.1.1:18182; + location /server-status { + stub_status on; + access_log off; + allow 127.0.0.0/8; + deny all; + } +} diff --git a/elk_metrics_7x/roles/elastic_metricbeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_metricbeat/vars/redhat.yml new file mode 100644 index 00000000..f24dbcf3 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/vars/redhat.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +metricbeat_nginx_vhost_path: /etc/nginx/conf.d/ +metricbeat_distro_packages: + - metricbeat diff --git a/elk_metrics_7x/roles/elastic_metricbeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_metricbeat/vars/suse.yml new file mode 100644 index 00000000..8450bcfb --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/vars/suse.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +metricbeat_nginx_vhost_path: /etc/nginx/vhosts.d +metricbeat_distro_packages: + - metricbeat diff --git a/elk_metrics_7x/roles/elastic_metricbeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_metricbeat/vars/ubuntu.yml new file mode 100644 index 00000000..98f323b5 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_metricbeat/vars/ubuntu.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +metricbeat_nginx_vhost_path: /etc/nginx/sites-enabled +metricbeat_distro_packages: + - metricbeat diff --git a/elk_metrics_7x/roles/elastic_packetbeat/defaults/main.yml b/elk_metrics_7x/roles/elastic_packetbeat/defaults/main.yml new file mode 100644 index 00000000..85f01f0d --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Vexxhost, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +packetbeat_service_state: restarted diff --git a/elk_metrics_7x/roles/elastic_packetbeat/handlers/main.yml b/elk_metrics_7x/roles/elastic_packetbeat/handlers/main.yml new file mode 100644 index 00000000..1c4bbed8 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart packetbeat (systemd) + systemd: + name: "packetbeat" + enabled: true + state: "{{ packetbeat_service_state }}" + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart packetbeat + +- name: Enable and restart packetbeat (upstart) + service: + name: "packetbeat" + state: "{{ packetbeat_service_state }}" + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart packetbeat diff --git a/elk_metrics_7x/roles/elastic_packetbeat/meta/main.yml b/elk_metrics_7x/roles/elastic_packetbeat/meta/main.yml new file mode 100644 index 00000000..fd64a943 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/meta/main.yml @@ -0,0 +1,35 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x packetbeat role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories diff --git a/elk_metrics_7x/roles/elastic_packetbeat/tasks/main.yml b/elk_metrics_7x/roles/elastic_packetbeat/tasks/main.yml new file mode 100644 index 00000000..2b5337a0 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/tasks/main.yml @@ -0,0 +1,103 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Ensure beat is installed + package: + name: "{{ packetbeat_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart packetbeat + tags: + - package_install + +- name: Create packetbeat systemd service config dir + file: + path: "/etc/systemd/system/packetbeat.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.general-overrides.conf.j2" + dest: "/etc/systemd/system/packetbeat.service.d/packetbeat-overrides.conf" + notify: + - Enable and restart packetbeat + +- name: Drop packetbeat conf files + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: "0644" + with_items: + - src: "packetbeat.yml.j2" + dest: "/etc/packetbeat/packetbeat.yml" + notify: + - Enable and restart packetbeat + +- name: Run the beat setup role + include_role: + name: elastic_beat_setup + when: + - (groups['kibana'] | length) > 0 + vars: + elastic_beat_name: "packetbeat" + +- name: Force beat handlers + meta: flush_handlers + +- name: set packetbeat service state (upstart) + service: + name: "packetbeat" + state: "{{ packetbeat_service_state }}" + enabled: "{{ packetbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'upstart' + - packetbeat_service_state in ['started', 'stopped'] + +- name: set packetbeat service state (systemd) + systemd: + name: "packetbeat" + state: "{{ packetbeat_service_state }}" + enabled: "{{ packetbeat_service_state in ['running', 'started', 'restarted'] }}" + when: + - ansible_service_mgr == 'systemd' + - packetbeat_service_state in ['started', 'stopped'] diff --git a/elk_metrics_7x/roles/elastic_packetbeat/tasks/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elastic_packetbeat/tasks/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/tasks/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 b/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 new file mode 100644 index 00000000..aa67a6f4 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/templates/packetbeat.yml.j2 @@ -0,0 +1,1202 @@ +{% import 'templates/_macros.j2' as elk_macros %} +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +#============================== Network device ================================ + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +# Packetbeat supports three sniffer types: +# * pcap, which uses the libpcap library and works on most platforms, but it's +# not the fastest option. +# * af_packet, which uses memory-mapped sniffing. This option is faster than +# libpcap and doesn't require a kernel module, but it's Linux-specific. +packetbeat.interfaces.type: af_packet + +# The maximum size of the packets to capture. The default is 65535, which is +# large enough for almost all networks and interface types. If you sniff on a +# physical network interface, the optimal setting is the MTU size. On virtual +# interfaces, however, it's safer to accept the default value. +packetbeat.interfaces.snaplen: 65535 + +# The maximum size of the shared memory buffer to use between the kernel and +# user space. A bigger buffer usually results in lower CPU usage, but consumes +# more memory. This setting is only available for the af_packet sniffer type. +# The default is 30 MB. +packetbeat.interfaces.buffer_size_mb: 30 + +# Packetbeat automatically generates a BPF for capturing only the traffic on +# ports where it expects to find known protocols. Use this settings to tell +# Packetbeat to generate a BPF filter that accepts VLAN tags. +packetbeat.interfaces.with_vlans: true + +# Use this setting to override the automatically generated BPF filter. +#packetbeat.interfaces.bpf_filter: + +#================================== Flows ===================================== + +packetbeat.flows: + # Enable Network flows. Default: true + enabled: true + + # Set network flow timeout. Flow is killed if no packet is received before being + # timed out. + timeout: 90s + + # Configure reporting period. If set to -1, only killed flows will be reported + period: 30s + +#========================== Transaction protocols ============================= + +packetbeat.protocols: +- type: icmp + # Enable ICMPv4 and ICMPv6 monitoring. Default: true + enabled: true + +- type: amqp + # Enable AMQP monitoring. Default: true + enabled: true + + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + parse_headers: true + + # Hide the additional arguments of method frames. + # Default: false + parse_arguments: true + + # Hide all methods relative to connection negotiation between server and + # client. + # Default: true + hide_connection_information: false + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: cassandra + # Enable cassandra monitoring. Default: false + enabled: false + + #Cassandra port for traffic monitoring. + ports: [9042] + + # If this option is enabled, the raw message of the request (`cassandra_request` field) + # is included in published events. The default is true. + #send_request: true + + # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) + # is included in published events. The default is true. enable `send_request` first before enable this option. + #send_request_header: true + + # If this option is enabled, the raw message of the response (`cassandra_response` field) + # is included in published events. The default is true. + #send_response: true + + # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) + # is included in published events. The default is true. enable `send_response` first before enable this option. + #send_response_header: true + + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. + # By default no compressor is configured. + #compressor: "snappy" + + # This option indicates which Operator/Operators will be ignored. + #ignored_ops: ["SUPPORTED","OPTIONS"] + +- type: dhcpv4 + # Configure the DHCP for IPv4 ports. + ports: [67, 68] + +- type: dns + # Enable DNS monitoring. Default: true + enabled: true + + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: http + # Enable HTTP monitoring. Default: true +{% set used_ports = [53, 443, 2049, 3306, 5432, 5672, 6379, 9042, 9090, 11211, 27017] %} +{% set ports = [] %} +{% for item in heartbeat_services %} +{% for port in item.ports %} +{% if (item.type == 'http') and (not port in used_ports) %} +{% set _ = ports.extend([port]) %} +{% endif %} +{% endfor %} +{% endfor %} + enabled: true + + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: {{ ports | unique }} + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + + # A list of header names to capture and send to Elasticsearch. These headers + # are placed under the `headers` dictionary in the resulting JSON. + send_headers: true + + # Instead of sending a white list of headers to Elasticsearch, you can send + # all headers by setting this option to true. The default is false. + send_all_headers: true + + # The list of content types for which Packetbeat includes the full HTTP + # payload in the response field. + #include_body_for: [] + + # If the Cookie or Set-Cookie headers are sent, this option controls whether + # they are split into individual values. + #split_cookie: false + + # The header field to extract the real IP from. This setting is useful when + # you want to capture traffic behind a reverse proxy, but you want to get the + # geo-location information. + #real_ip_header: + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Maximum message size. If an HTTP message is larger than this, it will + # be trimmed to this size. Default is 10 MB. + #max_message_size: 10485760 + +- type: memcache + # Enable memcache monitoring. Default: true + enabled: true + + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: mysql + # Enable mysql monitoring. Default: true + enabled: true + + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: pgsql + # Enable pgsql monitoring. Default: true + enabled: false + + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: redis + # Enable redis monitoring. Default: true + enabled: false + + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: thrift + # Enable thrift monitoring. Default: true + enabled: false + + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + + # The Thrift transport type. Currently this option accepts the values socket + # for TSocket, which is the default Thrift transport, and framed for the + # TFramed Thrift transport. The default is socket. + #transport_type: socket + + # The Thrift protocol type. Currently the only accepted value is binary for + # the TBinary protocol, which is the default Thrift protocol. + #protocol_type: binary + + # The Thrift interface description language (IDL) files for the service that + # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include + # parameter and exception names. + #idl_files: [] + + # The maximum length for strings in parameters or return values. If a string + # is longer than this value, the string is automatically truncated to this + # length. + #string_max_size: 200 + + # The maximum number of elements in a Thrift list, set, map, or structure. + #collection_max_size: 15 + + # If this option is set to false, Packetbeat decodes the method name from the + # reply and simply skips the rest of the response message. + #capture_reply: true + + # If this option is set to true, Packetbeat replaces all strings found in + # method parameters, return codes, or exception structures with the "*" + # string. + #obfuscate_strings: false + + # The maximum number of fields that a structure can have before Packetbeat + # ignores the whole transaction. + #drop_after_n_struct_fields: 500 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: mongodb + # Enable mongodb monitoring. Default: true + enabled: false + + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + + + # The maximum number of documents from the response to index in the `response` + # field. The default is 10. + #max_docs: 10 + + # The maximum number of characters in a single document indexed in the + # `response` field. The default is 5000. You can set this to 0 to index an + # unlimited number of characters per document. + #max_doc_length: 5000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: nfs + # Enable NFS monitoring. Default: true + enabled: true + + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: tls + # Enable TLS monitoring. Default: true + enabled: true + + # Configure the ports where to listen for TLS traffic. You can disable + # the TLS protocol by commenting out the list of ports. + ports: [443] + + # If this option is enabled, the client and server certificates and + # certificate chains are sent to Elasticsearch. The default is true. + send_certificates: true + + # If this option is enabled, the raw certificates will be stored + # in PEM format under the `raw` key. The default is false. + #include_raw_certificates: false + +#=========================== Monitored processes ============================== + +# Configure the processes to be monitored and how to find them. If a process is +# monitored then Packetbeat attempts to use it's name to fill in the `proc` and +# `client_proc` fields. +# The processes can be found by searching their command line by a given string. +# +# Process matching is optional and can be enabled by uncommenting the following +# lines. +# +#packetbeat.procs: +# enabled: false +# monitored: +# - process: mysqld +# cmdline_grep: mysqld +# +# - process: pgsql +# cmdline_grep: postgres +# +# - process: nginx +# cmdline_grep: nginx +# +# - process: app +# cmdline_grep: gunicorn + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#packetbeat.ignore_outgoing: true + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +processors: +- add_host_metadata: ~ + +#============================= Elastic Cloud ================================== + +# These settings simplify using packetbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +#output.elasticsearch: +# # Boolean flag to enable or disable the output module. +# #enabled: true +# +# # Array of hosts to connect to. +# # Scheme and port can be left out and will be set to the default (http and 9200) +# # In case you specify and additional path, the scheme is required: http://localhost:9200/path +# # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 +# hosts: ["localhost:9200"] +# +# # Set gzip compression level. +# #compression_level: 0 +# +# # Optional protocol and basic auth credentials. +# #protocol: "https" +# #username: "elastic" +# #password: "changeme" +# +# # Dictionary of HTTP parameters to pass within the url with index operations. +# #parameters: +# #param1: value1 +# #param2: value2 +# +# # Number of workers per Elasticsearch host. +# #worker: 1 +# +# # Optional index name. The default is "packetbeat" plus date +# # and generates [packetbeat-]YYYY.MM.DD keys. +# # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. +# #index: "packetbeat-%{[beat.version]}-%{+yyyy.MM.dd}" +# +# # Optional ingest node pipeline. By default no pipeline will be used. +# #pipeline: "" +# +# # Optional HTTP Path +# #path: "/elasticsearch" +# +# # Custom HTTP headers to add to each request +# #headers: +# # X-My-Header: Contents of the header +# +# # Proxy server url +# #proxy_url: http://proxy:3128 +# +# # The number of times a particular Elasticsearch index operation is attempted. If +# # the indexing operation doesn't succeed after this many retries, the events are +# # dropped. The default is 3. +# #max_retries: 3 +# +# # The maximum number of events to bulk in a single Elasticsearch bulk API index request. +# # The default is 50. +# #bulk_max_size: 50 +# +# # Configure http request timeout before failing an request to Elasticsearch. +# #timeout: 90 +# +# # Use SSL settings for HTTPS. +# #ssl.enabled: true +# +# # Configure SSL verification mode. If `none` is configured, all server hosts +# # and certificates will be accepted. In this mode, SSL based connections are +# # susceptible to man-in-the-middle attacks. Use only for testing. Default is +# # `full`. +# #ssl.verification_mode: full +# +# # List of supported/valid TLS versions. By default all TLS versions 1.0 up to +# # 1.2 are enabled. +# #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] +# +# # SSL configuration. By default is off. +# # List of root certificates for HTTPS server verifications +# #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] +# +# # Certificate for SSL client authentication +# #ssl.certificate: "/etc/pki/client/cert.pem" +# +# # Client Certificate Key +# #ssl.key: "/etc/pki/client/cert.key" +# +# # Optional passphrase for decrypting the Certificate Key. +# #ssl.key_passphrase: '' +# +# # Configure cipher suites to be used for SSL connections +# #ssl.cipher_suites: [] +# +# # Configure curve types for ECDHE based cipher suites +# #ssl.curve_types: [] +# +# # Configure what types of renegotiation are supported. Valid options are +# # never, once, and freely. Default is never. +# #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +{{ elk_macros.output_logstash(inventory_hostname, logstash_data_hosts, ansible_processor_count) }} + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version packetbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is packetbeat. + #key: packetbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/packetbeat" + + # Name of the generated files. The default is `packetbeat` and it generates + # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + #filename: packetbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every packetbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the packetbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the packetbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the packetbeat installation. This is the default base path +# for all the files in which packetbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a packetbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +{{ elk_macros.setup_dashboards('packetbeat') }} + +#=============================== Template ====================================== +{{ elk_macros.setup_template('packetbeat', inventory_hostname, data_nodes, elasticsearch_number_of_replicas) }} + +#================================ Kibana ======================================= +{% if (groups['kibana'] | length) > 0 %} +{{ elk_macros.setup_kibana(hostvars[groups['kibana'][0]]['ansible_host'] ~ ':' ~ kibana_port) }} +{% endif %} + +#================================ Logging ====================================== +{{ elk_macros.beat_logging('packetbeat') }} + +#============================== Xpack Monitoring =============================== +{{ elk_macros.xpack_monitoring_elasticsearch(inventory_hostname, elasticsearch_data_hosts, ansible_processor_count) }} + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/elk_metrics_7x/roles/elastic_packetbeat/vars/redhat.yml b/elk_metrics_7x/roles/elastic_packetbeat/vars/redhat.yml new file mode 100644 index 00000000..e949478a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/vars/redhat.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +packetbeat_distro_packages: + - tcpdump + - packetbeat diff --git a/elk_metrics_7x/roles/elastic_packetbeat/vars/suse.yml b/elk_metrics_7x/roles/elastic_packetbeat/vars/suse.yml new file mode 100644 index 00000000..e949478a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/vars/suse.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +packetbeat_distro_packages: + - tcpdump + - packetbeat diff --git a/elk_metrics_7x/roles/elastic_packetbeat/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_packetbeat/vars/ubuntu.yml new file mode 100644 index 00000000..e949478a --- /dev/null +++ b/elk_metrics_7x/roles/elastic_packetbeat/vars/ubuntu.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +packetbeat_distro_packages: + - tcpdump + - packetbeat diff --git a/elk_metrics_7x/roles/elastic_repositories/defaults/main.yml b/elk_metrics_7x/roles/elastic_repositories/defaults/main.yml new file mode 100644 index 00000000..8448bf56 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/defaults/main.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# List of PPA repositories used on ubuntu based systems +elastic_repo_ppas: [] + +# List of packages to install +elastic_repo_distro_packages: [] diff --git a/elk_metrics_7x/roles/elastic_repositories/meta/main.yml b/elk_metrics_7x/roles/elastic_repositories/meta/main.yml new file mode 100644 index 00000000..5df9b15c --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/meta/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x repositories role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: [] diff --git a/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_apt_repos.yml b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_apt_repos.yml new file mode 100644 index 00000000..b46376bb --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_apt_repos.yml @@ -0,0 +1,42 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: add Elastic search public GPG key + apt_key: + url: "{{ elastic_repo.key_url }}" + state: "present" + register: _apt_task + until: _apt_task is success + retries: 3 + delay: 2 + tags: + - package_install + +- name: add elk repo to apt sources list + apt_repository: + repo: "{{ elastic_repo.repo }}" + state: "{{ elastic_repo.state }}" + filename: "{{ elastic_repo.filename | default(omit) }}" + register: _apt_task + until: _apt_task is success + retries: 3 + delay: 2 + tags: + - package_install + +- name: Add PPA (legacy) + apt_repository: + repo: "{{ item }}" + with_items: "{{ elastic_repo_ppas }}" diff --git a/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_yum_repos.yml b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_yum_repos.yml new file mode 100644 index 00000000..0a11e7e6 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_yum_repos.yml @@ -0,0 +1,58 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Add Elastic search public GPG key + rpm_key: + state: "{{ elastic_repo.state }}" + key: "{{ elastic_repo.key_url }}" + register: _zypp_task + until: _zypp_task is success + retries: 3 + delay: 2 + tags: + - package_install + +# Force refresh of a repository +- name: Add elk repo to yum sources list + yum_repository: + name: "elastic" + file: "elastic" + description: "Elastic repositories, you know, for ELK." + baseurl: "{{ elastic_repo.repo }}" + state: "{{ elastic_repo.state }}" + enabled: yes + register: _yum_task + until: _yum_task is success + retries: 3 + delay: 2 + tags: + - package_install + +# Force refresh of a repository +- name: Add nginx repo to yum sources list + yum_repository: + name: "nginx" + file: "nginx" + description: "NGINX repo" + baseurl: "{{ elastic_nginx_repo.repo }}" + state: "{{ elastic_nginx_repo.state }}" + enabled: yes + gpgcheck: no + register: _yum_task + until: _yum_task is success + retries: 3 + delay: 2 + tags: + - package_install diff --git a/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_zypper_repos.yml b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_zypper_repos.yml new file mode 100644 index 00000000..c2c85ff1 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/tasks/elastic_zypper_repos.yml @@ -0,0 +1,39 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: add Elastic search public GPG key + rpm_key: + state: "{{ elastic_repo.state }}" + key: "{{ elastic_repo.key_url }}" + register: _zypp_task + until: _zypp_task is success + retries: 3 + delay: 2 + tags: + - package_install + +# Force refresh of a repository +- name: add elk repo to zypper sources list + zypper_repository: + name: "elastic" + repo: "{{ elastic_repo.repo }}" + state: "{{ elastic_repo.state }}" + runrefresh: yes + register: _zypp_task + until: _zypp_task is success + retries: 3 + delay: 2 + tags: + - package_install diff --git a/elk_metrics_7x/roles/elastic_repositories/tasks/main.yml b/elk_metrics_7x/roles/elastic_repositories/tasks/main.yml new file mode 100644 index 00000000..b5eee5d5 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/tasks/main.yml @@ -0,0 +1,40 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Install repo disto packages + package: + name: "{{ elastic_repo_distro_packages }}" + state: present + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _apt_task + until: _apt_task is success + retries: 3 + delay: 2 + tags: + - package_install + +- include_tasks: "elastic_{{ ansible_pkg_mgr }}_repos.yml" diff --git a/elk_metrics_7x/roles/elastic_repositories/vars/redhat.yml b/elk_metrics_7x/roles/elastic_repositories/vars/redhat.yml new file mode 100644 index 00000000..888a7428 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/vars/redhat.yml @@ -0,0 +1,24 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# elk apt repo +elastic_repo: + repo: 'https://artifacts.elastic.co/packages/6.x/yum' + state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}" + key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" + +elastic_nginx_repo: + repo: 'http://nginx.org/packages/centos/$releasever/$basearch/' + state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}" diff --git a/elk_metrics_7x/roles/elastic_repositories/vars/suse.yml b/elk_metrics_7x/roles/elastic_repositories/vars/suse.yml new file mode 100644 index 00000000..9280dca7 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/vars/suse.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# elk apt repo +elastic_repo: + repo: 'https://artifacts.elastic.co/packages/6.x/yum' + state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}" + key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" diff --git a/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu-14.04.yml b/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu-14.04.yml new file mode 100644 index 00000000..a93425ae --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu-14.04.yml @@ -0,0 +1,26 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_repo_distro_packages: + - apt-transport-https + +elastic_repo_ppas: + - "ppa:openjdk-r/ppa" + +# elk apt repo +elastic_repo: + repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main' + state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}" + key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" diff --git a/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu.yml b/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu.yml new file mode 100644 index 00000000..90019c53 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_repositories/vars/ubuntu.yml @@ -0,0 +1,23 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_repo_distro_packages: + - apt-transport-https + +# elk apt repo +elastic_repo: + repo: 'deb https://artifacts.elastic.co/packages/6.x/apt stable main' + state: "{{ ((elk_package_state | default('present')) == 'absent') | ternary('absent', 'present') }}" + key_url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch" diff --git a/elk_metrics_7x/roles/elastic_retention/defaults/main.yml b/elk_metrics_7x/roles/elastic_retention/defaults/main.yml new file mode 100644 index 00000000..8b8e1da0 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_retention/defaults/main.yml @@ -0,0 +1,118 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_index_retention_algorithm: default + +### Elastic curator variables +## If any of these retention policy option are undefined a dynamic fact will be +## generated. +## These options are all in days. +# elastic_logstash_retention: 1 +# elastic_apm_retention: 1 +# elastic_auditbeat_retention: 1 +# elastic_filebeat_retention: 1 +# elastic_heartbeat_retention: 1 +# elastic_journalbeat_retention: 1 +# elastic_metricbeat_retention: 1 +# elastic_packetbeat_retention: 1 +# elastic_skydive_retention: 1 + +## These options are all in megabytes. +# elastic_logstash_size: 1024 +# elastic_apm_size: 1024 +# elastic_auditbeat_size: 1024 +# elastic_filebeat_size: 1024 +# elastic_heartbeat_size: 1024 +# elastic_journalbeat_size: 1024 +# elastic_metricbeat_size: 1024 +# elastic_packetbeat_size: 1024 +# elastic_skydive_size: 1024 + +## WHen a static retention policy option is not defined these options will be +## used for dynamic fact generation. +## +## Facts will be generated for the general retention using the total available +## storage from the ES data nodes, subtracting 25%. Using the weights, each +## index will be given a percentage of the total available storage. Indexes with +## higher weights are expected to use more storage. The list of hosts in a given +## index will be used to determine the number of days data can exist within an +## index before it's pruned. + +## Example: +# es cluster has 4TiB of storage +# filebeat is deployed to 100 hosts +# filebeat has a weight of 10 +# metricbeat is deployed to 125 hosts +# metricbeat has a weight of 2 +# +# es storage in MiB: 4194304 +# hosts and weighting total: (100 + 125) x (10 + 2) = 2700 +# filebeat pct: (100 x 10) / 2700 = 0.37 +# filebeat storage allowed: 0.37 * 4194304 = 1551892.48 MiB +# filebeat days allowed: 1551892.48 / (100 * 1024) = 15.1552 Days +# filebeat result: 15 days of retention or 1.5TiB of storage, whatever comes first +# metricbeat pct: (125 x 2) / 2700 = 0.09 +# metricbeat storage allowed: 0.09 * 4194304 = 377487.36 MiB +# metricbeat days allowed: 377487.36 / (125 * 1024) = 2.94912 Days +# metricbeat result: 2 days of retention or 38GiB of storage, whatever comes first + +elastic_beat_retention_policy_hosts: + logstash: + make_index: true + weight: 1 + hosts: "{{ groups['elastic-logstash'] | default([]) }}" + apm: + make_index: true + timeFieldName: '@timestamp' + weight: 1 + hosts: "{{ groups['apm-server'] | default([]) }}" + auditbeat: + timeFieldName: '@timestamp' + weight: 10 + hosts: "{{ groups['hosts'] | default([]) }}" + filebeat: + timeFieldName: '@timestamp' + weight: 10 + hosts: "{{ groups['hosts'] | default([]) }}" + syslog: + make_index: true + weight: 1 + hosts: "{{ groups['hosts'] | default([]) }}" + heartbeat: + timeFieldName: '@timestamp' + weight: 1 + hosts: "{{ groups['kibana'][:3] | default([]) }}" + journalbeat: + timeFieldName: '@timestamp' + weight: 3 + hosts: "{{ groups['hosts'] | default([]) }}" + metricbeat: + timeFieldName: '@timestamp' + weight: 2 + hosts: "{{ groups['all'] | default([]) }}" + packetbeat: + timeFieldName: '@timestamp' + weight: 1 + hosts: "{{ groups['hosts'] | default([]) }}" + monitorstack: + timeFieldName: '@timestamp' + weight: 1 + hosts: "{{ (groups['nova_compute'] | default([])) | union((groups['utility_all'] | default([]))) | union((groups['memcached_all'] | default([]))) }}" + skydive: + weight: 1 + hosts: "{{ (((groups['skydive_analyzers'] | default([])) | length) > 0) | ternary((groups['hosts'] | default([])), []) }}" + +# Refresh the elasticsearch retention policy local facts. +elastic_retention_refresh: false diff --git a/elk_metrics_7x/roles/elastic_retention/meta/main.yml b/elk_metrics_7x/roles/elastic_retention/meta/main.yml new file mode 100644 index 00000000..ab69fa29 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_retention/meta/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x retention role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts diff --git a/elk_metrics_7x/roles/elastic_retention/tasks/main.yml b/elk_metrics_7x/roles/elastic_retention/tasks/main.yml new file mode 100644 index 00000000..e80e8db3 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_retention/tasks/main.yml @@ -0,0 +1,104 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Ensure local facts directory exists + file: + dest: "/etc/ansible/facts.d" + state: directory + group: "root" + owner: "root" + mode: "0755" + recurse: no + +- name: Initialize local facts + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "retention" + option: cacheable + value: true + +- name: Refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - always + +- name: Retention storage block + block: + - name: Query es storage + uri: + url: "http://{{ coordination_nodes[0] }}/_nodes/{{ (data_nodes | map('extract', hostvars, 'ansible_host') | list) | join(',') }}/stats/fs" + method: GET + register: elk_data + environment: + no_proxy: "{{ coordination_nodes[0].split(':')[0] }}" + until: + - elk_data is success and elk_data['json'] is defined + retries: 5 + delay: 30 + run_once: true + + - name: Set retention keys fact + set_fact: + es_storage_json: "{{ elk_data['json'] }}" + + - name: Load retention algo variables + include_vars: "calculate_index_retention_{{ elastic_index_retention_algorithm }}.yml" + tags: + - always + + - name: Set storage fact + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "retention" + option: "cluster_nodes" + value: "{{ groups['elastic-logstash'] | length }}" + + - name: Set retention policy keys fact + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "retention" + option: "elastic_beat_retention_policy_keys" + value: "{{ elastic_beat_retention_policy_hosts.keys() | list | sort }}" + + - name: Set size fact + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "retention" + option: "elastic_{{ item.key }}_size" + value: "{{ item.value }}" + with_dict: "{{ es_storage_per_index }}" + + - name: Set retention fact + ini_file: + dest: "/etc/ansible/facts.d/elastic.fact" + section: "retention" + option: "elastic_{{ item.key }}_retention" + value: "{{ item.value }}" + with_dict: "{{ es_days_per_index }}" + + - name: Refresh local facts + setup: + filter: ansible_local + gather_subset: "!all" + tags: + - always + when: + - (ansible_local['elastic']['retention']['cluster_nodes'] is undefined) or + ((groups['elastic-logstash'] | length) != (ansible_local['elastic']['retention']['cluster_nodes'] | int)) or + ((ansible_local['elastic']['retention']['elastic_beat_retention_policy_keys'] is defined) and + ((ansible_local['elastic']['retention']['elastic_beat_retention_policy_keys'] | from_yaml) != (elastic_beat_retention_policy_hosts.keys() | list | sort))) or + (elastic_retention_refresh | bool) diff --git a/elk_metrics_7x/roles/elastic_retention/vars/calculate_index_retention_default.yml b/elk_metrics_7x/roles/elastic_retention/vars/calculate_index_retention_default.yml new file mode 100644 index 00000000..886cf5df --- /dev/null +++ b/elk_metrics_7x/roles/elastic_retention/vars/calculate_index_retention_default.yml @@ -0,0 +1,58 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set available storage fact. This tasks the total amount of storage found +# within the data nodes of the elasticsearch cluster and converts bytes to +# megabytes. +es_total_available_storage: "{{ ((es_storage_json['nodes'].values() | list) | map(attribute='fs.total.total_in_bytes') | list | sum) // 1024 // 1024 }}" + +# Set assumed buffer storage fact. This will result in 25% of the total +# available storage. +es_assumed_buffer_storage: "{{ ((es_total_available_storage | int) * 0.25) | round | int }}" + +# Set usable buffer storage fact(s). This is the toal storage minus the buffer. +es_usable_buffer_storage: "{{ (es_total_available_storage | int) - (es_assumed_buffer_storage | int) }}" + +# This function will take the sum total of all hosts in the retention policy +# after weighting. Once the policy is set the sum total will be carved up into +# individual percentages of the total amount of usable storage after the buffer +# is calculated. +es_storage_per_index: |- + {%- set es_hash = {} %} + {%- set total_weight = (elastic_beat_retention_policy_hosts.values() | list | map(attribute='weight') | list | sum) %} + {%- set host_count = (elastic_beat_retention_policy_hosts.values() | list | map(attribute='hosts') | list | map('flatten') | list | length) %} + {%- set total_values = (total_weight | int) * (host_count | int) %} + {%- for key, value in elastic_beat_retention_policy_hosts.items() %} + {%- set value_pct = (((value.weight | int) * (value.hosts | length)) / (total_values | int)) %} + {%- set value_total = ((value_pct | float) * (es_usable_buffer_storage | int)) %} + {%- set _ = es_hash.__setitem__(key, value_total | int) %} + {%- endfor %} + {{ es_hash }} + +# The assumed number of days an index will be retained is based on the size of +# the given index. With the sizes all figured out in the function above this +# function will divide each retention size be a constant of 1024 and the number +# of hosts within a given collector segment. +es_days_per_index: |- + {%- set es_hash = {} %} + {%- for key, value in elastic_beat_retention_policy_hosts.items() %} + {%- if (es_storage_per_index[key] | int) > 0 %} + {%- set value_days = ((es_storage_per_index[key] | int) // ((value.hosts | length) * 1024)) %} + {%- set _ = es_hash.__setitem__(key, ((value_days | int) > 0) | ternary(value_days, 1) ) %} + {%- else %} + {%- set _ = es_hash.__setitem__(key, 1) %} + {%- endif %} + {%- endfor %} + {{ es_hash }} diff --git a/elk_metrics_7x/roles/elastic_rollup/defaults/main.yml b/elk_metrics_7x/roles/elastic_rollup/defaults/main.yml new file mode 100644 index 00000000..f72162fd --- /dev/null +++ b/elk_metrics_7x/roles/elastic_rollup/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elastic_allow_rollup_purge: false diff --git a/elk_metrics_7x/roles/elastic_rollup/meta/main.yml b/elk_metrics_7x/roles/elastic_rollup/meta/main.yml new file mode 100644 index 00000000..1c53bb47 --- /dev/null +++ b/elk_metrics_7x/roles/elastic_rollup/meta/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x rollup role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_retention diff --git a/elk_metrics_7x/roles/elastic_rollup/tasks/main.yml b/elk_metrics_7x/roles/elastic_rollup/tasks/main.yml new file mode 100644 index 00000000..96caefdd --- /dev/null +++ b/elk_metrics_7x/roles/elastic_rollup/tasks/main.yml @@ -0,0 +1,106 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Get rollup job + uri: + url: "http://{{ coordination_nodes[0] }}/_xpack/rollup/job/rollup_{{ index_name }}" + method: GET + register: check_rollup + environment: + no_proxy: "{{ coordination_nodes[0].split(':')[0] }}" + until: check_rollup is success + retries: 3 + delay: 5 + run_once: true + +- name: Check for existing rollup job + debug: + msg: >- + An existing rollup job was found for {{ index_name }}. In order to + re-create this rollup job the old job will need to be purged. If you're + OK with the old rollup job being purged, add the following option + `elastic_allow_rollup_purge=yes` to the command line and rerun the + playbook. + run_once: true + when: + - check_rollup['json']['jobs'] | length > 0 + - not elastic_allow_rollup_purge | bool + +- name: Create rollup block + block: + - name: Set min retention days fact + set_fact: + min_days_until_rollup: |- + {% set index_retention = [] %} + {% for item in ansible_play_hosts %} + {% set _ = index_retention.append(ansible_local['elastic']['retention']['elastic_' + index_name + '_retention'] | int) %} + {% endfor %} + {{ index_retention | min }} + run_once: true + + - name: Set retention days fact + set_fact: + days_until_rollup: "{{ ((min_days_until_rollup | int) > 1) | ternary(((min_days_until_rollup | int) - 1), min_days_until_rollup) }}" + run_once: true + + - name: Create rollup job + uri: + url: "{{ item.url }}" + method: "{{ item.method }}" + body: "{{ item.index_options | to_json }}" + status_code: "{{ item.status_code }}" + body_format: json + register: elk_indexes + environment: + no_proxy: "{{ coordination_nodes[0].split(':')[0] }}" + until: elk_indexes is success + retries: 5 + delay: 5 + when: + - (days_until_rollup | int) > 0 + with_items: + - url: "http://{{ coordination_nodes[0] }}/_xpack/rollup/job/rollup_{{ index_name }}/_stop" + method: POST + status_code: 200,404 + index_options: {} + - url: "http://{{ coordination_nodes[0] }}/_xpack/rollup/job/rollup_{{ index_name }}" + method: DELETE + status_code: 200,404 + index_options: {} + - url: "http://{{ coordination_nodes[0] }}/rollup_{{ index_name }}" + method: DELETE + status_code: 200,404 + index_options: {} + - url: "http://{{ coordination_nodes[0] }}/_xpack/rollup/job/rollup_{{ index_name }}" + method: PUT + status_code: 200,400 + index_options: + index_pattern: "{{ index_name }}-*" + rollup_index: "rollup_{{ index_name }}" + cron: "*/30 * * * * ?" + page_size: 1000 + groups: + date_histogram: + field: "@timestamp" + interval: "1h" + delay: "{{ days_until_rollup }}d" + - url: "http://{{ coordination_nodes[0] }}/_xpack/rollup/job/rollup_{{ index_name }}/_start" + method: POST + status_code: 200,404 + index_options: {} + run_once: true + when: + - check_rollup['json']['jobs'] | length < 1 or + elastic_allow_rollup_purge | bool diff --git a/elk_metrics_7x/roles/elasticsearch/defaults/main.yml b/elk_metrics_7x/roles/elasticsearch/defaults/main.yml new file mode 100644 index 00000000..d65bc778 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/defaults/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cluster_name: openstack_elk +elastic_log_rotate_path: "/var/log/elasticsearch" + +temp_dir: /var/lib/elasticsearch/tmp +nfs_query: "[?fstype=='nfs' || fstype=='nfs4']" + +# Enable or Disable memory locking. +elastic_memory_lock: true + +# Elasticsearch plugin list. These plugins will be re-installed whenever the +# playbooks are executed, which ensures the plugins are always upgraded. +elastic_plugins: + - ingest-attachment + +# Set the logstash search heap size. If this option is undefined the value will +# be derived automatically using 1/4 of the available RAM for logstash and 1/2 +# of the available RAM for elasticsearch. The value is expected to be in MiB. +# elastic_heap_size: 10240 # type `int` diff --git a/elk_metrics_7x/roles/elasticsearch/handlers/main.yml b/elk_metrics_7x/roles/elasticsearch/handlers/main.yml new file mode 100644 index 00000000..3dacb437 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/handlers/main.yml @@ -0,0 +1,33 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable and restart elasticsearch (systemd) + systemd: + name: "elasticsearch" + enabled: true + state: restarted + daemon_reload: true + when: + - ansible_service_mgr == 'systemd' + listen: Enable and restart elastic + +- name: Enable and restart elasticsearch (upstart) + service: + name: "elasticsearch" + state: restarted + enabled: yes + when: + - ansible_service_mgr == 'upstart' + listen: Enable and restart elastic diff --git a/elk_metrics_7x/roles/elasticsearch/meta/main.yml b/elk_metrics_7x/roles/elasticsearch/meta/main.yml new file mode 100644 index 00000000..496db7f1 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/meta/main.yml @@ -0,0 +1,39 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +galaxy_info: + author: OpenStack + description: Elastic v6.x elasticsearch role + company: Rackspace + license: Apache2 + min_ansible_version: 2.5 + platforms: + - name: Ubuntu + versions: + - trusty + - xenial + - bionic + categories: + - cloud + - development + - elasticsearch + - elastic-stack +dependencies: + - role: elastic_data_hosts + - role: elastic_repositories + - role: elastic_dependencies + service_name: elasticsearch + service_owner: elasticsearch + service_group: elasticsearch diff --git a/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_nfs_setup.yml b/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_nfs_setup.yml new file mode 100644 index 00000000..2234f482 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_nfs_setup.yml @@ -0,0 +1,29 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Ensure nfs client is available if necessary + package: + name: nfs-common + state: present + +- name: Ensure backup filesystems are mounted + mount: + fstype: "{{ item.fstype }}" + src: "{{ item.src }}" + opts: "{{ item.opts }}" + path: "{{ item.path }}" + state: "{{ item.state }}" + with_items: + - "{{ elastic_shared_fs_repos }}" diff --git a/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_plugins.yml b/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_plugins.yml new file mode 100644 index 00000000..5d4bd2df --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/tasks/elasticsearch_plugins.yml @@ -0,0 +1,26 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Remove plugins + command: "/usr/share/elasticsearch/bin/elasticsearch-plugin remove --verbose {{ item }}" + failed_when: false + changed_when: + - remove_plugin.rc == 0 + register: remove_plugin + with_items: "{{ elastic_plugins }}" + +- name: Install plugins + command: "/usr/share/elasticsearch/bin/elasticsearch-plugin install --batch --verbose {{ item }}" + with_items: "{{ elastic_plugins }}" diff --git a/elk_metrics_7x/roles/elasticsearch/tasks/main.yml b/elk_metrics_7x/roles/elasticsearch/tasks/main.yml new file mode 100644 index 00000000..0c08ede8 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/tasks/main.yml @@ -0,0 +1,140 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ item }}" + with_first_found: + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml" + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml" + - "{{ ansible_distribution | lower }}.yml" + - "{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yml" + - "{{ ansible_os_family | lower }}.yml" + tags: + - always + +- name: Set elasticsearch variables + include_vars: "vars_{{ ((inventory_hostname in (groups['kibana'] | default([])) and not inventory_hostname in (groups['elastic-logstash']) | default([]))) | ternary('kibana', 'default') }}.yml" + tags: + - always + +- name: Ensure elasticsearch is installed + package: + name: "{{ elasticsearch_distro_packages }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _package_task + until: _package_task is success + retries: 3 + delay: 2 + notify: + - Enable and restart elastic + tags: + - package_install + +- name: Create elasticsearch systemd service config dir + file: + path: "/etc/systemd/system/elasticsearch.service.d" + state: "directory" + group: "root" + owner: "root" + mode: "0755" + when: + - ansible_service_mgr == 'systemd' + +- name: Apply systemd options + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + when: + - ansible_service_mgr == 'systemd' + with_items: + - src: "systemd.elasticsearch-overrides.conf.j2" + dest: "/etc/systemd/system/elasticsearch.service.d/elasticsearch-overrides.conf" + notify: + - Enable and restart elastic + tags: + - config + +- name: Set sysconfig service defaults + lineinfile: + path: "{{ elasticsearch_sysconfig_path }}" + regexp: '^{{ item.key }}=' + line: '{{ item.key }}={{ item.value }}' + with_items: + - key: MAX_OPEN_FILES + value: 65536 + - key: MAX_LOCKED_MEMORY + value: unlimited + - key: MAX_MAP_COUNT + value: 524288 + +- name: Set service specific haap size + set_fact: + _service_heap_size: "{{ elastic_heap_size }}" + when: + - elastic_heap_size is defined + +- name: Drop jvm conf file(s) + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "jvm.options.j2" + dest: "/etc/elasticsearch/jvm.options" + notify: + - Enable and restart elastic + +- name: Drop elasticsearch conf file + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: "elasticsearch.yml.j2" + dest: "/etc/elasticsearch/elasticsearch.yml" + - src: "es-log4j2.properties.j2" + dest: "/etc/elasticsearch/log4j2.properties" + notify: + - Enable and restart elastic + tags: + - config + +- name: Ensure elasticsearch ownership + file: + path: "/var/lib/elasticsearch/" + owner: elasticsearch + group: elasticsearch + recurse: true + register: e_perms + until: e_perms is success + retries: 3 + delay: 1 + tags: + - config + +- name: Ensure elasticsearch tmp dir + file: + path: "/var/lib/elasticsearch/tmp" + state: directory + owner: "elasticsearch" + group: "elasticsearch" + mode: "0750" + +- include_tasks: "elasticsearch_nfs_setup.yml" + when: + - elastic_shared_fs_repos is defined + - (elastic_shared_fs_repos | json_query(nfs_query)) | length > 0 + +- include_tasks: "elasticsearch_plugins.yml" diff --git a/elk_metrics_7x/roles/elasticsearch/templates/elasticsearch.yml.j2 b/elk_metrics_7x/roles/elasticsearch/templates/elasticsearch.yml.j2 new file mode 100644 index 00000000..3abdee83 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/templates/elasticsearch.yml.j2 @@ -0,0 +1,146 @@ +# ---------------------------------- Cluster ----------------------------------- +cluster.name: {{ cluster_name }} +# ------------------------------------ Node ------------------------------------ +node.name: {{ ansible_nodename }} +# node.rack: r1 +# Set to true to enable machine learning on the node. +node.ml: false +# ----------------------------------- Paths ------------------------------------ +# Path to directory where to store the data (separate multiple locations by comma): +# +# path.data: /path/to/data +path.data: /var/lib/elasticsearch +# +# Path to log files: +# +# +# Path to log files: +# +# path.logs: /path/to/logs +#path.logs: /var/lib/elasticsearch/logs/ +path.logs: /var/log/elasticsearch/ +# +# Path to shared filesystem repos +# +# path.repo: ["/mount/backups", "/mount/longterm_backups"] +# + +{% if elastic_shared_fs_repos is defined and elastic_shared_fs_repos|length > 0 %} +path.repo: {{ elastic_shared_fs_repos | json_query("[*].path") | to_json }} +{% endif %} + +# Set the global default index store. More information on these settings can be +# found here: +# +index.store.type: niofs + +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +bootstrap.memory_lock: {{ elastic_memory_lock }} +# +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# Set the bind address to a specific IP (IPv4 or IPv6): +network.host: ["127.0.0.1", "{{ ansible_host }}", "{{ ansible_hostname }}"] +{% if elasticsearch_publish_host is defined %} +network.publish_host: "{{ elasticsearch_publish_host }}" +{% endif %} +# Set a custom port for HTTP: + +http.port: {{ elastic_port }} +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when new node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +# Node definitions can be seen here: +# +discovery.zen.ping.unicast.hosts: {{ zen_nodes | to_json }} +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): +discovery.zen.minimum_master_nodes: {{ elasticsearch_master_node_count | default(((master_node_count | int) // 2) + 1) }} +# The first set of nodes in the master_node_count are marked as such +node.master: {{ elasticsearch_node_master | default(master_node) }} +# Every node in the master list and every other node after will be a data node +node.data: {{ elasticsearch_node_data | default(data_node) }} +# Ingest nodes can execute pre-processing pipelines. To override automatic +# determination, the option `elasticsearch_node_ingest` can be defined as a +# Boolean which will enable or disable ingest nodes. When using automatic +# determination, ingest nodes will follow data nodes. +# +# NOTE(cloudnull): The use of "search remote connect" will follow the enablement +# of an ingest nodes. +{% if elasticsearch_node_ingest is defined %} +node.ingest: {{ elasticsearch_node_ingest }} +search.remote.connect: {{ elasticsearch_node_ingest }} +{% else %} +node.ingest: {{ data_node }} +search.remote.connect: {{ data_node }} +{% endif %} + +# For more information, see the documentation at: +# +# +# ---------------------------------- Gateway ----------------------------------- +# +# Block initial recovery after a full cluster restart until N nodes are started: +# +gateway.recover_after_nodes: {{ elasticsearch_master_node_count | default(((master_node_count | int) // 2) + 1) }} +# +# For more information, see the documentation at: +# +# +# ---------------------------------- Various ----------------------------------- +# +# Disable starting multiple nodes on a single system: +# +# node.max_local_storage_nodes: 1 +# +# Require explicit names when deleting indices: +# +action.destructive_requires_name: true + +{% set processors = ((elastic_thread_pool_size | int) > 0) | ternary(elastic_thread_pool_size, 1) %} +{% if not (elastic_coordination_node | default(false)) | bool %} +# Thread pool settings. For more on this see the documentation at: +# +thread_pool: + index: + queue_size: {{ (processors | int) * 256 }} + get: + queue_size: {{ (processors | int) * 256 }} + write: + queue_size: {{ (processors | int) * 512 }} +{% else %} +# The number of processors is automatically detected, and the thread pool +# settings are automatically set based on it. In some cases it can be useful to +# override the number of detected processors. This can be done by explicitly +# setting the processors setting. On Kibana hosts where elasticsearch is running +# as a coordination node, the processor count is limited. +processors: {{ processors }} +{% endif %} + + +# Accepts either a percentage or a byte size value. Set to 20%, meaning that 20% +# of the total heap allocated to a node will be used as the indexing buffer size +# shared across all shards. +indices.memory.index_buffer_size: 20% + +# Connection throttling on recovery is limited to 20% of the detected interface +# speed with a cap of 750mb. This will improce search speeds and reduce general +# cluster pressure. +indices.recovery.max_bytes_per_sec: {{ elasticserch_interface_speed }}mb + +# ---------------------------------- X-Pack ------------------------------------ +# X-Pack Monitoring +# https://www.elastic.co/guide/en/elasticsearch/reference/6.3/monitoring-settings.html +xpack.monitoring.collection.enabled: true +xpack.monitoring.collection.interval: 30s +# Set to true to enable machine learning on the node. +xpack.ml.enabled: false diff --git a/elk_metrics_7x/roles/elasticsearch/templates/es-log4j2.properties.j2 b/elk_metrics_7x/roles/elasticsearch/templates/es-log4j2.properties.j2 new file mode 100644 index 00000000..8a33fe3b --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/templates/es-log4j2.properties.j2 @@ -0,0 +1,85 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 128MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.fileIndex = nomax +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified +appender.rolling.strategy.action.condition.nested_condition.age = {{ elastic_logstash_retention | default(1) }}D + + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 128MB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 2 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/elk_metrics_7x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 b/elk_metrics_7x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 new file mode 100644 index 00000000..64dcf40a --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/templates/systemd.elasticsearch-overrides.conf.j2 @@ -0,0 +1,14 @@ +{% include 'templates/systemd.general-overrides.conf.j2' %} + +# This first line clears the source ExecStart +ExecStart= +# This runs our ExecStart as an override. +ExecStart=/usr/share/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid + +{% if elastic_memory_lock | bool %} +# Limit memory usage +LimitMEMLOCK=infinity +{% endif %} + +# Number of File Descriptors +LimitNOFILE=65536 diff --git a/elk_metrics_7x/roles/elasticsearch/templates/systemd.general-overrides.conf.j2 b/elk_metrics_7x/roles/elasticsearch/templates/systemd.general-overrides.conf.j2 new file mode 120000 index 00000000..9ddff7cc --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/templates/systemd.general-overrides.conf.j2 @@ -0,0 +1 @@ +../../../templates/systemd.general-overrides.conf.j2 \ No newline at end of file diff --git a/elk_metrics_7x/roles/elasticsearch/vars/redhat.yml b/elk_metrics_7x/roles/elasticsearch/vars/redhat.yml new file mode 100644 index 00000000..d80edade --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/vars/redhat.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elasticsearch_distro_packages: + - logrotate + - elasticsearch + +elasticsearch_sysconfig_path: /etc/sysconfig/elasticsearch diff --git a/elk_metrics_7x/roles/elasticsearch/vars/suse.yml b/elk_metrics_7x/roles/elasticsearch/vars/suse.yml new file mode 100644 index 00000000..d80edade --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/vars/suse.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elasticsearch_distro_packages: + - logrotate + - elasticsearch + +elasticsearch_sysconfig_path: /etc/sysconfig/elasticsearch diff --git a/elk_metrics_7x/roles/elasticsearch/vars/ubuntu.yml b/elk_metrics_7x/roles/elasticsearch/vars/ubuntu.yml new file mode 100644 index 00000000..0fc5886a --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/vars/ubuntu.yml @@ -0,0 +1,20 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +elasticsearch_distro_packages: + - logrotate + - elasticsearch + +elasticsearch_sysconfig_path: /etc/default/elasticsearch diff --git a/elk_metrics_7x/roles/elasticsearch/vars/vars_default.yml b/elk_metrics_7x/roles/elasticsearch/vars/vars_default.yml new file mode 100644 index 00000000..727a28d3 --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/vars/vars_default.yml @@ -0,0 +1,16 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +place_holder: true diff --git a/elk_metrics_7x/roles/elasticsearch/vars/vars_kibana.yml b/elk_metrics_7x/roles/elasticsearch/vars/vars_kibana.yml new file mode 100644 index 00000000..f76d67cc --- /dev/null +++ b/elk_metrics_7x/roles/elasticsearch/vars/vars_kibana.yml @@ -0,0 +1,26 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set kibana as elasticsearch coordinators +elasticsearch_node_master: false +elasticsearch_node_data: false +elasticsearch_node_ingest: false +elastic_coordination_node: true + +# This variable is redefined because kibana runs elasticsearch but only in a +# load balancer capacity. +elastic_processors_half: "{{ ((ansible_processor_count | int) // 2) }}" +elastic_processors_half_set: "{{ ((elastic_processors_half | int) > 0) | ternary(elastic_processors_half, 1) }}" +elastic_thread_pool_size: "{{ ((elastic_processors_half_set | int) > 4) | ternary(4, elastic_processors_half_set) }}" diff --git a/elk_metrics_7x/setupKibanaDashboard.yml b/elk_metrics_7x/setupKibanaDashboard.yml new file mode 100644 index 00000000..a9b86dfc --- /dev/null +++ b/elk_metrics_7x/setupKibanaDashboard.yml @@ -0,0 +1,86 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Install Kibana Dashboards + hosts: "elastic-logstash" + become: true + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + vars: + distro_packages: + zypper: + - nodejs6 + apt: + - nodejs + + tasks: + - name: Add nodejs 6.x source + shell: "curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -" + when: + - ansible_pkg_mgr == 'apt' + - ossec_nodejs_repo is not defined + + - name: Manual nodejs installation + block: + - name: Add nodejs apt key + apt_key: + url: "{{ ossec_nodejs_repo.key_url }}" + state: "present" + register: _apt_task + until: _apt_task is success + retries: 3 + delay: 2 + + - name: add nodejs repo to apt sources list + apt_repository: + repo: "{{ ossec_nodejs_repo.repo }}" + state: "{{ ossec_nodejs_repo.state }}" + filename: "{{ ossec_nodejs_repo.filename | default(omit) }}" + register: _apt_task + until: _apt_task is success + retries: 3 + delay: 2 + when: + - ansible_pkg_mgr == 'apt' + - ossec_nodejs_repo is defined + + - name: Install nodejs + package: + name: "{{ distro_packages[ansible_pkg_mgr] }}" + state: "{{ elk_package_state | default('present') }}" + update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + register: _pkg_task + until: _pkg_task is success + retries: 3 + delay: 2 + + - name: Install elasticdump + npm: + name: elasticdump + path: /opt/elasticdump + state: "present" + + - name: Copy openstack dashboard json template to /tmp dir + template: + src: templates/openstack-log-dashboard.json + dest: /tmp/openstack-log-dashboard.json + run_once: yes + + - name: Upload Custom Openstack Log Dashboard + shell: "/opt/elasticdump/node_modules/elasticdump/bin/elasticdump --input=/tmp/openstack-log-dashboard.json --output={{ kibana_index_on_elasticsearch }} --type=data" + environment: + no_proxy: "{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}" + run_once: yes diff --git a/elk_metrics_7x/showElasticCluster.yml b/elk_metrics_7x/showElasticCluster.yml new file mode 100644 index 00000000..5bf38ca6 --- /dev/null +++ b/elk_metrics_7x/showElasticCluster.yml @@ -0,0 +1,64 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Dump cluster node info + hosts: elastic-logstash:kibana + become: true + + vars_files: + - vars/variables.yml + + environment: "{{ deployment_environment_variables | default({}) }}" + + roles: + - role: elastic_data_hosts + + # display overall cluster setup + post_tasks: + - block: + - name: "Report number of storage nodes" + debug: + msg: "Storage node count: {{ storage_node_count }}" + + - name: "Report number of master nodes" + debug: + msg: "Master node count: {{ master_node_count }}" + + - name: "Report data nodes" + debug: + msg: "Data nodes: {{ data_nodes }}" + + - name: "Report coordination nodes" + debug: + msg: "Coordination nodes: {{ coordination_nodes }}" + + - name: "Report Zen nodes" + debug: + msg: "Zen nodes: {{ zen_nodes }}" + + delegate_to: elastic_logstash[0] + run_once: true + + # show per node cluster setup + + - name: "Dump elasticsearch node roles" + debug: + msg: "Elastic Node config: master: {{ master_node }} data: {{ data_node }}" + + - name: "Dump per-host elasticsearch data hosts" + debug: + msg: "Elastic data hosts: {{ elasticsearch_data_hosts }}" + + - name: "Dump per-host logstash hosts" + debug: + msg: "Logstash_data_hosts: {{ logstash_data_hosts }}" diff --git a/elk_metrics_7x/site-beats-core.yml b/elk_metrics_7x/site-beats-core.yml new file mode 100644 index 00000000..e91f2294 --- /dev/null +++ b/elk_metrics_7x/site-beats-core.yml @@ -0,0 +1,24 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: installMetricbeat.yml + +- import_playbook: installPacketbeat.yml + +- import_playbook: installAuditbeat.yml + +- import_playbook: installHeartbeat.yml + +- import_playbook: installFilebeat.yml + +- import_playbook: installJournalbeat.yml diff --git a/elk_metrics_7x/site-elka.yml b/elk_metrics_7x/site-elka.yml new file mode 100644 index 00000000..bc02fce9 --- /dev/null +++ b/elk_metrics_7x/site-elka.yml @@ -0,0 +1,18 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: installElastic.yml +- import_playbook: installLogstash.yml +- import_playbook: installCurator.yml +- import_playbook: installKibana.yml +- import_playbook: installAPMserver.yml diff --git a/elk_metrics_7x/site.yml b/elk_metrics_7x/site.yml new file mode 100644 index 00000000..77ea0fe0 --- /dev/null +++ b/elk_metrics_7x/site.yml @@ -0,0 +1,17 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: site-elka.yml +- import_playbook: site-beats-core.yml +# - import_playbook: installMonitorStack.yml +- import_playbook: createElasticIndexes.yml diff --git a/elk_metrics_7x/templates/_macros.j2 b/elk_metrics_7x/templates/_macros.j2 new file mode 100644 index 00000000..8e280588 --- /dev/null +++ b/elk_metrics_7x/templates/_macros.j2 @@ -0,0 +1,484 @@ +{% macro output_elasticsearch(host, data_hosts) -%} +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: {{ (data_hosts | default([])) | to_json }} + + # Set gzip compression level. + compression_level: 3 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + worker: 1 + + # Optional index name. The default is "apm" plus date + # and generates [apm-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never +{%- endmacro %} + +{% macro output_logstash(host, data_hosts, processors, named_index) -%} +output.logstash: + # Boolean flag to enable or disable the output module. + enabled: true + + # The Logstash hosts + hosts: {{ (data_hosts | default([])) | to_json }} + + # Number of workers per Logstash host. + worker: 1 + + # Set gzip compression level. + compression_level: 3 + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + loadbalance: true + + # Number of batches to be sent asynchronously to logstash while processing + # new batches. + pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + slow_start: true + + # The maximum number of events to bulk in a single Logstash request. The + # default is the number of cores multiplied by the number of threads, + # the resultant is then multiplied again by 128 which results in a the defined + # bulk max size. If the Beat sends single events, the events are collected + # into batches. If the Beat publishes a large batch of events (larger than + # the value specified by bulk_max_size), the batch is split. Specifying a + # larger batch size can improve performance by lowering the overhead of + # sending events. However big batch sizes can also increase processing times, + # which might result in API errors, killed connections, timed-out publishing + # requests, and, ultimately, lower throughput. Setting bulk_max_size to values + # less than or equal to 0 disables the splitting of batches. When splitting + # is disabled, the queue decides on the number of events to be contained in a + # batch. + bulk_max_size: {{ (processors | int) * 128 }} + +{% if named_index is defined %} + # Optional index name. The default index name is set to {{ named_index }} + # in all lowercase. + index: '{{ named_index }}' +{% endif %} + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never +{%- endmacro %} + +{% macro setup_dashboards(beat_name) -%} +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: {{ beat_name }} + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 +{%- endmacro %} + +{% macro setup_template(beat_name, host, data_nodes, elasticsearch_replicas) -%} +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +setup.template.enabled: {{ host == data_nodes[0] }} + +# Template name. By default the template name is "{{ beat_name }}-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.name: "{{ beat_name }}-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +setup.template.pattern: "{{ beat_name }}-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +setup.template.fields: "${path.config}/fields.yml" + +# Overwrite existing template +setup.template.overwrite: {{ host == data_nodes[0] }} + +{% set shards = ((data_nodes | length) * 3) | int %} + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + index: + number_of_shards: {{ shards }} + codec: best_compression + # This provides for an index split of up to 2 times the number of available shards + number_of_routing_shards: {{ (shards | int) * 2 }} + # The default number of replicas will be based on the number of data nodes + # within the environment with a limit of 2 replicas. + number_of_replicas: {{ elasticsearch_replicas | int }} + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + _source: + enabled: true +{%- endmacro %} + +{% macro setup_kibana(host) -%} +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + host: "{{ host }}" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] +{%- endmacro %} + +{% macro beat_logging(beat_name) -%} +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, packetbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + path: /var/log/beats + + # The name of the files where the logs are written to. + name: {{ beat_name }}.log + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # unix epoch. Defaults to disabled. + #interval: 0 + +# Set to true to log messages in json format. +#logging.json: false +{%- endmacro %} + +{% macro xpack_monitoring_elasticsearch(host, data_hosts, processors) -%} +# metricbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +xpack.monitoring.enabled: true + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: {{ (data_hosts | default([])) | to_json }} + + # Set gzip compression level. + compression_level: 9 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + headers: + X-Node-Name: {{ host }} + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + max_retries: 5 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + bulk_max_size: {{ (processors | int) * 64 }} + + # Configure http request timeout before failing an request to Elasticsearch. + timeout: 120 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never +{%- endmacro %} diff --git a/elk_metrics_7x/templates/jvm.options.j2 b/elk_metrics_7x/templates/jvm.options.j2 new file mode 100644 index 00000000..f2e76559 --- /dev/null +++ b/elk_metrics_7x/templates/jvm.options.j2 @@ -0,0 +1,95 @@ +## JVM configuration +{% if _service_heap_size is defined %} +{% set heap_size = _service_heap_size %} +{% else %} +{% if (not (elasticsearch_node_master | default(master_node)) | bool) and (not (elasticsearch_node_data | default(data_node)) | bool) %} +{% set heap_size = elastic_heap_size_default | default((_elastic_heap_size_default | int) // 2) %} +{% else %} +{% set heap_size = elastic_heap_size_default | default(_elastic_heap_size_default | int) %} +{% endif %} +{% endif %} +# Xms represents the initial size of total heap space +-Xms{{ heap_size }}m +# Xmx represents the maximum size of total heap space +-Xmx{{ heap_size }}m +# Sets the thread stack size +-Xss1m + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC Configuration +{% if ((heap_size | int) > 6144) and (elastic_g1gc_enabled | bool) %} +-XX:+UseG1GC +-XX:MaxGCPauseMillis=400 +-XX:InitiatingHeapOccupancyPercent=75 +{% else %} +-XX:+UseParNewGC +-XX:+UseConcMarkSweepGC +-XX:CMSInitiatingOccupancyFraction=75 +-XX:+UseCMSInitiatingOccupancyOnly +{% endif %} + +## optimizations + +# disable calls to System#gc +-XX:+DisableExplicitGC + +## locale +# Set the locale language +#-Duser.language=en + +# Set the locale country +#-Duser.country=US + +# Set the locale variant, if any +#-Duser.variant= + +## basic + +# set the I/O temp directory +-Djava.io.tmpdir={{ temp_dir }} + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +#-Djna.nosys=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof + +## GC logging +#-XX:+PrintGCDetails +#-XX:+PrintGCTimeStamps +#-XX:+PrintGCDateStamps +#-XX:+PrintClassHistogram +#-XX:+PrintTenuringDistribution +#-XX:+PrintGCApplicationStoppedTime + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${LS_GC_LOG_FILE} + +#listen on IPv4 addresses +-Djava.net.preferIPv4Stack=true + +# Disable log4j because its not supported by elastic +-Dlog4j2.disable.jmx=true diff --git a/elk_metrics_7x/templates/logstash-pipelines.yml.j2 b/elk_metrics_7x/templates/logstash-pipelines.yml.j2 new file mode 100644 index 00000000..000627c5 --- /dev/null +++ b/elk_metrics_7x/templates/logstash-pipelines.yml.j2 @@ -0,0 +1,597 @@ +# This file is where you define your pipelines. You can define multiple. +# For more information on multiple pipelines, see the documentation: +# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html + +- pipeline.id: "elk_metrics_6x" + queue.type: "persisted" + config.string: | + input { + beats { + id => "inputBeats" + client_inactivity_timeout => 300 + port => {{ logstash_beat_input_port }} + add_field => { + "[@metadata][source_type]" => "beats" + } + } + } +{% if logstash_syslog_input_enabled | bool %} + input { +{% if logstash_syslog_input_mode == 'tcp' %} + tcp { + id => "inputSyslogTcp" + port => {{ logstash_syslog_input_port }} + type => syslog + add_field => { + "[@metadata][source_type]" => "syslog" + } + } +{% elif logstash_syslog_input_mode == 'udp' %} + udp { + id => "inputSyslogUdp" + port => {{ logstash_syslog_input_port }} + type => syslog + add_field => { + "[@metadata][source_type]" => "syslog" + } + } +{% endif %} + } +{% endif %} +{% if logstash_collectd_input_enabled | bool %} + input { + udp { + port => {{ logstash_collectd_port }} + buffer_size => {{ logstash_collectd_buffer_size }} + codec => collectd { } +{% if (logstash_collectd_security_level is defined) and (logstash_collectd_authfile is defined) %} + security_level => {{ logstash_collectd_security_level }} + authfile => {{ logstash_collectd_authfile }} +{% endif %} + add_field => { + "[@metadata][source_type]" => "collectd" + } + } + } +{% endif %} + filter { + if [@metadata][source_type] == "syslog" { + mutate { + add_tag => ["syslog"] + } + } + + if [@metadata][source_type] == "collectd" { + mutate { + add_tag => ["collectd"] + } + } + + # NOTE(mnaser): Filebeat doesn't support shipping to different outputs + # which means we need to parse `auditd` fileset here rather + # than rely on ingest. + if [fileset][module] == "auditd" { + grok { + break_on_match => false + match => { + message => [ + "type=%{WORD:[auditd][log][record_type]}", + "msg=audit\(%{NUMBER:timestamp}:%{NUMBER:[auditd][log][sequence]}\)", + "a0=\"%{DATA:[auditd][log][a0]}\"", + "acct=\"%{DATA:[auditd][log][acct]}\"", + "addr=%{IPORHOST:[auditd][log][addr]}" + ] + } + } + + date { + match => [ "timestamp", "UNIX" ] + remove_field => "timestamp" + } + + if [auditd][log][addr] { + geoip { + source => "[auditd][log][addr]" + target => "[auditd][geoip]" + } + } + + # NOTE(mnaser): We don't match all fields so `grok` thinks that we + # failed. + mutate { + remove_tag => ["_grokparsefailure"] + } + } + + if [@metadata][source_type] == "beats" or [@metadata][source_type] == "syslog" { + if [systemd_slice] { + mutate { + copy => { "systemd_slice" => "systemd_slice_tag" } + } + mutate { + gsub => [ "systemd_slice_tag", ".slice", "" ] + } + if [systemd_slice_tag] != "-" { + mutate { + add_tag => [ + "%{systemd_slice_tag}" + ] + } + mutate { + add_tag => [ + "filebeat" + ] + } + } + mutate { + remove_field => [ "%{systemd_slice_tag}" ] + } + } + if "filebeat" in [tags] { + if "Traceback" in [message] { + mutate { + add_tag => ["traceback"] + remove_tag => ["_grokparsefailure"] + } + } + + if "auth" in [tags] { + grok { + match => { "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" } + } + mutate { + add_field => { "module" => "auth" } + } + } else if "elasticsearch" in [tags] { + grok { + match => { "message" => "\[%{TIMESTAMP_ISO8601:timestamp}\]\[%{LOGLEVEL:loglevel}\s*\]\[%{NOTSPACE:module}\s*\] %{GREEDYDATA:logmessage}" } + } + mutate { + replace => { "module" => "elasticsearch.%{module}" } + } + } else if "ceph" in [tags] { + grok { + match => { "message" => "%{TIMESTAMP_ISO8601:date} %{NOTSPACE:osd_epoch} ?%{SPACE}?%{NOTSPACE:error_bool} %{GREEDYDATA:logmessage}" } + } + if "ceph-osd" in [tags] { + grok { + match => { "message" => "-- (?(%{IPORHOST}\:%{POSINT}/%{POSINT})) (?:[<|>]){1,2} (?(%{IPORHOST}\:%{POSINT}/%{POSINT}))" } + } + } + } else if "libvirt" in [tags] { + grok { + match => { "message" => "(?m)^%{TIMESTAMP_ISO8601:logdate}:%{SPACE}%{NUMBER:code}:?%{SPACE}\[?\b%{NOTSPACE:loglevel}\b\]?%{SPACE}?:?%{SPACE}\[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?" } + add_field => { "received_at" => "%{@timestamp}"} + } + mutate { + uppercase => [ "loglevel" ] + } + } else if "logstash" in [tags] { + grok { + match => { + "message" => "\{\:timestamp=>\"%{TIMESTAMP_ISO8601:timestamp}\", \:message=>\"%{DATA:logmessage}\"(;|)(, \:address=>\"%{URIHOST:address}\", \:exception=>#<\"%{DATA:exception}\">, \:backtrace=>\[%{DATA:backtrace}\]|)(, \:level=>:\"%{LOGLEVEL:loglevel}\"|)\}" + } + } + mutate { + add_field => { "module" => "logstash" } + uppercase => [ "loglevel" ] + } + if [loglevel] == "WARN" { + mutate { + replace => { "loglevel" => "WARNING" } + } + } else if ![loglevel] { + mutate { + add_field => { "loglevel" => "ERROR" } + } + } + } else if "mysql" in [tags] { + grok { + match => { "message" => "# User@Host: %{WORD:user}\[%{WORD}\] @ (%{HOSTNAME:client_hostname}|) \[(%{IP:client_ip}|)\]" } + } + grok { + match => { "message" => "# Thread_id: %{NUMBER:thread_id:int} \s*Schema: (%{WORD:schema}| ) \s*QC_hit: %{WORD:qc_hit}" } + } + grok { + match => { "message" => "# Query_time: %{NUMBER:query_time:float} \s*Lock_time: %{NUMBER:lock_time:float} \s*Rows_sent: %{NUMBER:rows_sent:int} \s*Rows_examined: %{NUMBER:rows_examined:int}" } + } + grok { + match => { "message" => "(?m)SET timestamp=%{NUMBER:timestamp};%{GREEDYDATA:logmessage}" } + } + geoip { + source => "clientip" + } + date { + match => [ "timestamp", "UNIX" ] + } + mutate { + remove_field => "timestamp" + } + mutate { + gsub => [ "logmessage", "^\n", "" ] + add_field => { "module" => "mysql" } + add_field => { "loglevel" => "WARNING" } + } + } else if "nginx" in [tags] { + if "nginx-access" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => "%{IP:client_ip} - %{USER:client_user} \[%{NGINX_TIMESTAMP:timestamp}\] \"%{WORD:verb} %{NOTSPACE:request} HTTP/%{NUMBER:http_version}\" %{INT:response_code} %{INT:bytes} %{QUOTEDSTRING:referer} %{QUOTEDSTRING:user_agent} %{QUOTEDSTRING:gzip_ratio}" + } + } + geoip { + source => "clientip" + } + } + if "nginx-error" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => "%{NGINX_ERROR_TIMESTAMP:timestamp} \[%{LOGLEVEL:loglevel}\] %{GREEDYDATA:error_msg}" + } + } + } + } else if "openstack" in [tags] { + if "Can not find policy directory: policy.d" in [message] { + drop { } + } + grok { + match => { + "message" => [ + "^%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}?%{SPACE}?(?AUDIT|CRITICAL|DEBUG|INFO|TRACE|WARNING|ERROR) \[?\b%{NOTSPACE:module}\b\]?%{SPACE}?%{GREEDYDATA:logmessage}?", + "^%{CISCOTIMESTAMP:journalddate}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{SYSLOGPROG:prog}%{SPACE}%{TIMESTAMP_ISO8601:logdate}%{SPACE}%{NUMBER:pid}%{SPACE}%{NOTSPACE:loglevel}%{SPACE}%{NOTSPACE:module}%{SPACE}%{GREEDYDATA:logmessage}" + ] + } + } + grok { + match => { + "logmessage" => ["\[(%{NOTSPACE:request_id} %{NOTSPACE:user} %{NOTSPACE:tenant} %{NOTSPACE:domain} %{NOTSPACE:user_domain} %{NOTSPACE:project_domain}|\-)\] %{GREEDYDATA:logmessage}?"] + } + overwrite => [ "logmessage" ] + } + date { + match => [ "logdate", ISO8601 ] + remove_field => [ "logdate" ] + } + if "nova" in [tags] { + # Instance ID from logs (i.e. "[instance: 5ee83c6e-3604-467a-be54-e48429086e3f]") + grok { + match => { + "logmessage" => ["(\[instance\: %{NOTSPACE:instance_id}\] )?%{GREEDYDATA:logmessage}?"] + } + overwrite => [ "logmessage" ] + } + + if [module] == "nova.api.openstack.requestlog" { + grok { + match => { "logmessage" => "%{IPORHOST:client_ip} \"%{WORD:verb} %{NOTSPACE:request}\" status\: %{NUMBER:response} len\: %{NUMBER:bytes} microversion\: (%{NUMBER:microversion}|\-) time\: %{NUMBER:duration:float}" } + add_tag => ["api"] + remove_field => [ "logmessage", "message" ] + } + } else if [module] == "nova.api.openstack.placement.requestlog" { + grok { + match => { "logmessage" => "%{IPORHOST:client_ip} \"%{WORD:verb} %{NOTSPACE:request}\" status\: %{NUMBER:response} len\: %{NUMBER:bytes} microversion\: (%{NUMBER:microversion}|\-)" } + add_tag => ["api"] + remove_field => [ "logmessage", "message" ] + } + } + } else if "neutron" in [tags] { + if [module] == "neutron.wsgi" { + grok { + match => { "logmessage" => "%{IPORHOST:client_ip} \"%{WORD:verb} %{NOTSPACE:request} HTTP/%{NUMBER:http_version}\" status\: %{NUMBER:response} len\: %{NUMBER:bytes} time\: %{NUMBER:duration:float}" } + add_tag => ["api"] + remove_field => [ "logmessage", "message" ] + } + } else if "neutron-ha-tool" in [source] { + mutate { + add_tag => ["neutron-ha-tool"] + remove_tag => ["_grokparsefailure"] + } + } + if "starting" in [message] and "_grokparsefailure" in [tags] { + grok { + match => { "logmessage" => "(%{SPACE}\(%{NUMBER:pid}\)) %{GREEDYDATA:servicemessage}" } + } + mutate { + remove_tag => ["_grokparsefailure"] + } + } + } else if "glance" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "%{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["api"] + } + mutate { + replace => { "module" => "glance.%{module}" } + } + } + } else if "cinder" in [tags] { + if [module] == "cinder.eventlet.wsgi.server" { + if "accepted" not in [logmessage] { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "%{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["api"] + } + } + mutate { + replace => { "module" => "cinder.%{module}" } + } + } + } else if "horizon" in [tags] { + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "message" => [ + "%{COMMONAPACHELOG}", + "\[%{APACHE_ERROR_TIMESTAMP:timestamp}\] \[%{DATA:module}:%{DATA:loglevel}\] \[pid %{POSINT:apache_pid}\:tid %{POSINT:apache_tid}\] ?(?:\[client %{IP:clientip}:%{POSINT:clientport}\] )?%{GREEDYDATA:logmessage}", + "%{SYSLOGTIMESTAMP:timestamp}%{SPACE}%{SYSLOGHOST:host}%{SPACE}%{PROG:prog}%{SPACE}%{IP:clientip}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NUMBER}%{SPACE}%{QS}%{SPACE}%{QS}" + ] + } + } + geoip { + source => "clientip" + } + if ![loglevel] { + mutate { + add_field => { "logmessage" => "%{request}" } + add_field => { "module" => "horizon.access" } + add_field => { "loglevel" => "INFO" } + add_tag => [ "apache-access" ] + } + } else { + mutate { + replace => { "module" => "horizon.error.%{module}" } + add_tag => [ "apache-error" ] + uppercase => [ "loglevel" ] + } + } + } else if "heat" in [tags] { + if [module] == "eventlet.wsgi.server" { + if "accepted" not in [logmessage] { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "%{NOTSPACE:requesterip} %{NOTSPACE} %{NOTSPACE} \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes} %{BASE10NUM:httptime}" } + add_tag => ["api"] + } + } + mutate { + replace => { "module" => "heat.%{module}" } + } + } else if [module] == "heat.engine.service" { + grok { + match => { "logmessage" => "%{GREEDYDATA:servicemessage}" } + add_tag => ["api"] + } + } + } else if "swift-account" in [tags] { + grok { + match => { + "message" => "%{SYSLOGTIMESTAMP}%{SPACE}%{HOSTNAME}%{SPACE}%{PROG}%{SPACE}%{SYSLOGTIMESTAMP}%{SPACE}%{S3_REQUEST_LINE}%{SPACE}%{IP}%{SPACE}%{NOTSPACE}%{SPACE}%{NOTSPACE}%{SPACE}%{SYSLOG5424SD}%{SPACE}%{QS}%{SPACE}%{POSINT}%{SPACE}%{NOTSPACE}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{QS}%{SPACE}%{SECOND}%{SPACE}%{QS}%{SPACE}%{NUMBER}%{SPACE}%{NOTSPACE}" + } + } + } else if "swift" in [tags] { + grok { + match => { + "message" => "%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{NOTSPACE:logsource} %{SYSLOGPROG:module}: (?:%{SPACE})?%{GREEDYDATA:logmessage}" + } + } + grok { + patterns_dir => ["/opt/logstash/patterns"] + match => { + "logmessage" => [ + "%{COMBINEDAPACHELOG}", + "%{SWIFTPROXY_ACCESS}", + "%{GREEDYDATA:logmessage} \(txn\: %{DATA:swift_txn}\)" + ] + } + tag_on_failure => [] + overwrite => [ "logmessage" ] + } + + if [request] { + mutate { + replace => { "logmessage" => "%{request}" } + } + } + + mutate { + replace => { "module" => "swift.%{module}" } + } + + if [file] =~ "error.log$" { + mutate { + add_field => { "loglevel" => "NOTICE" } + } + } else { + mutate { + add_field => { "loglevel" => "INFO" } + } + } + } else if "keystone" in [tags] { + if [loglevel] == "INFO" and [module] == "keystone.common.wsgi" { + grok { + match => { "logmessage" => "%{WORD:verb} %{NOTSPACE:request}" } + remove_field => [ "logmessage", "message" ] + } + } + } else if "magnum" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "%{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["api"] + } + mutate { + replace => { "module" => "magnum.%{module}" } + } + } + } else if "octavia" in [tags] { + if [module] == "eventlet.wsgi.server" { + mutate { + gsub => ["logmessage","\"",""] + } + grok { + match => { "logmessage" => "%{NOTSPACE:requesterip} \- \- \[%{NOTSPACE:req_date} %{NOTSPACE:req_time}\] %{NOTSPACE:verb} %{NOTSPACE:url_path} %{NOTSPACE:http_ver} %{NUMBER:response} %{NUMBER:bytes:int} %{BASE10NUM:httptime:float}" } + add_tag => ["api"] + } + mutate { + replace => { "module" => "octavia.%{module}" } + } + } + } + } else if "rabbitmq" in [tags] { + if [message] == "" { + drop { } + } + grok { + match => { "message" => "^\=%{LOGLEVEL:loglevel} REPORT\=\=\=\= %{MONTHDAY:event_day}\-%{MONTH:event_month}\-%{YEAR:event_year}\:\:%{TIME:event_time} \=\=\=\n%{GREEDYDATA:logmessage}" } + } + mutate { + replace => { "module" => "rabbitmq" } + add_field => { "timestamp" => "%{event_day} %{event_month} %{event_year} %{event_time}" } + } + date { + match => [ "timestamp", "dd MMM YYYY HH:mm:ss" ] + remove_field => [ "event_day", "event_month", "event_year", "event_time", "timestamp" ] + } + } + } + } + if [source.ip] { + geoip { + id => "setGeoIpSource" + source => "source.ip" + } + } else if [ip] { + geoip { + id => "setGeoIp" + source => "ip" + } + } + if [message] { + fingerprint { + id => "setSHA1" + target => "[@metadata][fingerprint]" + method => "SHA1" + key => "{{ inventory_hostname | to_uuid }}" + } + } + } + output { + if [@metadata][fingerprint] { + if [@metadata][version] { + elasticsearch { + id => "elasticsearchDocIDOutputPipeline" + document_id => "%{[@metadata][fingerprint]}" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + } + } else if [@metadata][beat] { + elasticsearch { + id => "elasticsearchLegacyDocIDOutputPipeline" + document_id => "%{[@metadata][fingerprint]}" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + } + } else if "syslog" in [tags] { + elasticsearch { + id => "elasticsearchSyslogDocIDOutputPipeline" + document_id => "%{[@metadata][fingerprint]}" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "syslog-%{+YYYY.MM.dd}" + } + } else if "collectd" in [tags] { + elasticsearch { + id => "elasticsearchCollectdDocIDOutputPipeline" + document_id => "%{[@metadata][fingerprint]}" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "collectd-%{+YYYY.MM.dd}" + } + } else { + elasticsearch { + id => "elasticsearchUndefinedDocIDOutputPipeline" + document_id => "%{[@metadata][fingerprint]}" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "undefined-%{+YYYY.MM.dd}" + } + } + } else { + if [@metadata][version] { + elasticsearch { + id => "elasticsearchOutputPipeline" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + } + } else if [@metadata][beat] { + elasticsearch { + id => "elasticsearchLegacyOutputPipeline" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + } + } else if "syslog" in [tags] { + elasticsearch { + id => "elasticsearchSyslogOutputPipeline" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "syslog-%{+YYYY.MM.dd}" + } + } else if "collectd" in [tags] { + elasticsearch { + id => "elasticsearchCollectdOutputPipeline" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "collectd-%{+YYYY.MM.dd}" + } + } else { + elasticsearch { + id => "elasticsearchUndefinedOutputPipeline" + hosts => ["{{ '127.0.0.1:' ~ elastic_port }}"] + sniffing => {{ (elastic_sniffing_enabled | default(not data_node)) | bool | string | lower }} + manage_template => {{ (data_node | bool) | lower }} + index => "undefined-%{+YYYY.MM.dd}" + } + } + } + +{% if logstash_kafka_options is defined %} + kafka { + {% for key, value in logstash_kafka_options.items() %} + {% if value is number %} + {{ key }} => {{ value }} + {% elif value is iterable and value is not string %} + {{ key }} => "{{ value | join(',') }}" + {% else %} + {{ key }} => "{{ value }}" + {% endif %} + {% endfor %} + } +{% endif %} + } diff --git a/elk_metrics_7x/templates/openstack-log-dashboard.json b/elk_metrics_7x/templates/openstack-log-dashboard.json new file mode 100644 index 00000000..cb932438 --- /dev/null +++ b/elk_metrics_7x/templates/openstack-log-dashboard.json @@ -0,0 +1,107 @@ +{ + "_index":".kibana", + "_type":"doc", + "_id":"visualization:439b9700-78a4-11e8-b957-6d408ad5f3d7", + "_score":1, + "_source":{ + "type":"visualization", + "visualization":{ + "title":"[OpenStack] Log Level", + "visState":"{\"title\":\"[OpenStack] Log Level\",\"type\":\"pie\",\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":true,\"labels\":{\"show\":false,\"values\":true,\"last_level\":true,\"truncate\":100}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"loglevel\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"OpenStack LOG\"}}]}", + "uiStateJSON":"{}", + "description":"", + "version":1, + "kibanaSavedObjectMeta":{ + "searchSourceJSON":"{\"index\":\"*\",\"filter\":[],\"query\":{\"query\":\"\",\"language\":\"lucene\"}}" + } + } + } +} +{ + "_index":".kibana", + "_type":"doc", + "_id":"visualization:aee19340-78b6-11e8-b957-6d408ad5f3d7", + "_score":1, + "_source":{ + "type":"visualization", + "updated_at":"2018-06-25T20:31:04.820Z", + "visualization":{ + "title":"[OpenStack] Log Level by Module Name", + "visState":"{\"title\":\"[OpenStack] Log Level by Module Name\",\"type\":\"pie\",\"params\":{\"addLegend\":true,\"addTooltip\":true,\"isDonut\":true,\"labels\":{\"last_level\":true,\"show\":false,\"truncate\":100,\"values\":true},\"legendPosition\":\"right\",\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"module\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}]}", + "uiStateJSON":"{}", + "description":"", + "version":1, + "kibanaSavedObjectMeta":{ + "searchSourceJSON":"{\"index\":\"*\",\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" + } + } + } +} +{ + "_index":".kibana", + "_type":"doc", + "_id":"visualization:7b6488c0-78b5-11e8-b957-6d408ad5f3d7", + "_score":1, + "_source":{ + "type":"visualization", + "updated_at":"2018-06-25T20:22:28.939Z", + "visualization":{ + "title":"[Openstack] Log Level Bar Per Host", + "visState":"{\"title\":\"[Openstack] Log Level Bar Per Host\",\"type\":\"histogram\",\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":true,\"mode\":\"stacked\",\"type\":\"histogram\",\"drawLinesBetweenPoints\":true,\"showCircles\":true,\"data\":{\"id\":\"2\",\"label\":\"Count\"},\"valueAxis\":\"ValueAxis-1\"}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false},\"aggs\":[{\"id\":\"2\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"beat.hostname\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"2\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"loglevel\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"2\"}}]}", + "uiStateJSON":"{}", + "description":"", + "version":1, + "kibanaSavedObjectMeta":{ + "searchSourceJSON":"{\"index\":\"*\",\"filter\":[],\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" + } + } + } +} +{ + "_index":".kibana", + "_type":"doc", + "_id":"search:51d3fb90-78b9-11e8-b957-6d408ad5f3d7", + "_score":1, + "_source":{ + "type":"search", + "updated_at":"2018-06-25T20:51:10.156Z", + "search":{ + "title":"AllLogs", + "description":"", + "hits":0, + "columns":[ + "_source" + ], + "sort":[ + "@timestamp", + "desc" + ], + "version":1, + "kibanaSavedObjectMeta":{ + "searchSourceJSON":"{\"index\":\"*\",\"highlightAll\":true,\"version\":true,\"query\":{\"language\":\"lucene\",\"query\":\"*\"},\"filter\":[]}" + } + } + } +} +{ + "_index":".kibana", + "_type":"doc", + "_id":"dashboard:1ffbe820-78b0-11e8-b957-6d408ad5f3d7", + "_score":1, + "_source":{ + "type":"dashboard", + "updated_at":"2018-06-25T20:52:44.699Z", + "dashboard":{ + "title":"[Openstack] Log Dashboard", + "hits":0, + "description":"", + "panelsJSON":"[{\"panelIndex\":\"1\",\"gridData\":{\"x\":0,\"y\":0,\"w\":6,\"h\":3,\"i\":\"1\"},\"id\":\"439b9700-78a4-11e8-b957-6d408ad5f3d7\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"2\",\"gridData\":{\"x\":0,\"y\":3,\"w\":12,\"h\":5,\"i\":\"2\"},\"id\":\"7b6488c0-78b5-11e8-b957-6d408ad5f3d7\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"3\",\"gridData\":{\"x\":6,\"y\":0,\"w\":6,\"h\":3,\"i\":\"3\"},\"id\":\"aee19340-78b6-11e8-b957-6d408ad5f3d7\",\"type\":\"visualization\",\"version\":\"6.2.4\"},{\"panelIndex\":\"5\",\"gridData\":{\"x\":0,\"y\":8,\"w\":12,\"h\":5,\"i\":\"5\"},\"version\":\"6.2.4\",\"type\":\"search\",\"id\":\"51d3fb90-78b9-11e8-b957-6d408ad5f3d7\"}]", + "optionsJSON":"{\"darkTheme\":false,\"hidePanelTitles\":false,\"useMargins\":true}", + "version":1, + "timeRestore":false, + "kibanaSavedObjectMeta":{ + "searchSourceJSON":"{\"query\":{\"language\":\"lucene\",\"query\":\"\"},\"filter\":[],\"highlightAll\":true,\"version\":true}" + } + } + } +} diff --git a/elk_metrics_7x/templates/systemd.general-overrides.conf.j2 b/elk_metrics_7x/templates/systemd.general-overrides.conf.j2 new file mode 100644 index 00000000..29acc685 --- /dev/null +++ b/elk_metrics_7x/templates/systemd.general-overrides.conf.j2 @@ -0,0 +1,12 @@ +[Service] +# This creates a specific slice to operate from. The accounting options give us +# the ability to see resource usage through the `systemd-cgtop` command and +# further isolate this service from the host machine. +Slice=elastic.slice +CPUAccounting=true +BlockIOAccounting=true +MemoryAccounting=true +TasksAccounting=true + +# Sandbox setup +PrivateTmp=true diff --git a/elk_metrics_7x/tests/_container-setup.yml b/elk_metrics_7x/tests/_container-setup.yml new file mode 100644 index 00000000..a1b724c7 --- /dev/null +++ b/elk_metrics_7x/tests/_container-setup.yml @@ -0,0 +1,73 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Setup host for nspawn + hosts: localhost + connection: local + become: true + vars: + nspawn_networks: + nspawn_address: + bridge: "nspawn0" + private_device: true + enable_dhcp: true + dhcp_range: 10.100.101.2,10.100.101.129 + address: 10.100.101.1 + netmask: 255.255.255.0 + macvlan_mode: bridge + + pre_tasks: + - name: Ensure root ssh key + user: + name: "{{ ansible_env.USER | default('root') }}" + generate_ssh_key: "yes" + ssh_key_bits: 2048 + ssh_key_file: ".ssh/id_rsa" + + - name: Get root ssh key + slurp: + src: '~/.ssh/id_rsa.pub' + register: _root_ssh_key + + - name: Prepare container ssh key fact + set_fact: + nspawn_container_ssh_key: "{{ _root_ssh_key['content'] | b64decode }}" + + - name: Ensure public ssh key is in authorized_keys + authorized_key: + user: "{{ ansible_env.USER | default('root') }}" + key: "{{ nspawn_container_ssh_key }}" + manage_dir: no + + roles: + - role: "nspawn_hosts" + + +- name: Create container(s) + hosts: all_containers + gather_facts: false + become: true + pre_tasks: + - name: Show container facts + debug: + var: hostvars + + roles: + - role: "nspawn_container_create" + + post_tasks: + - name: Rescan quotas + command: "btrfs quota rescan -w /var/lib/machines" + delegate_to: "{{ physical_host }}" diff --git a/elk_metrics_7x/tests/_key-setup.yml b/elk_metrics_7x/tests/_key-setup.yml new file mode 100644 index 00000000..8376ed35 --- /dev/null +++ b/elk_metrics_7x/tests/_key-setup.yml @@ -0,0 +1,41 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Setup host keys + hosts: physical_hosts + connection: local + become: true + tasks: + - name: Ensure root ssh key + user: + name: "{{ ansible_env.USER | default('root') }}" + generate_ssh_key: "yes" + ssh_key_bits: 2048 + ssh_key_file: ".ssh/id_rsa" + + - name: Get root ssh key + slurp: + src: '~/.ssh/id_rsa.pub' + register: _root_ssh_key + + - name: Prepare container ssh key fact + set_fact: + nspawn_container_ssh_key: "{{ _root_ssh_key['content'] | b64decode }}" + + - name: Ensure public ssh key is in authorized_keys + authorized_key: + user: "{{ ansible_env.USER | default('root') }}" + key: "{{ nspawn_container_ssh_key }}" + manage_dir: no diff --git a/elk_metrics_7x/tests/ansible-role-requirements.yml b/elk_metrics_7x/tests/ansible-role-requirements.yml new file mode 100644 index 00000000..85ba806d --- /dev/null +++ b/elk_metrics_7x/tests/ansible-role-requirements.yml @@ -0,0 +1,33 @@ +--- +- name: apt_package_pinning + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning + version: master +- name: config_template + scm: git + src: https://git.openstack.org/openstack/ansible-config_template + version: master +- name: nspawn_container_create + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create + version: master +- name: nspawn_hosts + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts + version: master +- name: plugins + scm: git + src: https://git.openstack.org/openstack/openstack-ansible-plugins + version: master +- name: systemd_mount + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_mount + version: master +- name: systemd_networkd + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_networkd + version: master +- name: systemd_service + scm: git + src: https://git.openstack.org/openstack/ansible-role-systemd_service + version: master diff --git a/elk_metrics_7x/tests/functional.yml b/elk_metrics_7x/tests/functional.yml new file mode 100644 index 00000000..9858d5fd --- /dev/null +++ b/elk_metrics_7x/tests/functional.yml @@ -0,0 +1,161 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: run-setup.yml + +- name: Basic setup + hosts: "all" + become: true + + environment: + # ZUUL_PROJECT is used by tests/get-ansible-role-requirements to + # determine when CI provided repos should be used. + ZUUL_PROJECT: "{{ zuul.project.short_name }}" + ANSIBLE_PACKAGE: "{{ ansible_package | default('') }}" + ANSIBLE_HOST_KEY_CHECKING: "False" + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + ANSIBLE_ACTION_PLUGINS: "${HOME}/ansible_venv/repositories/roles/config_template/action" + ANSIBLE_CONNECTION_PLUGINS: "${HOME}/ansible_venv/repositories/roles/plugins/connection" + ANSIBLE_ROLES_PATH: "${HOME}/ansible_venv/repositories/roles" + + vars: + inventory_file: "inventory/test-{{ (container_inventory | bool) | ternary('container', 'metal') }}-inventory.yml" + + pre_tasks: + - name: Create swap file + command: "dd if=/dev/zero of=/swap.img bs=1M count=4096" + args: + creates: /swap.img + register: swap_create + + - name: Format the swap file + command: mkswap /swap.img + when: + - swap_create is changed + tags: + - swap-format + - skip_ansible_lint + + - name: Enable swap file + command: swapon /swap.img + failed_when: false + tags: + - swap-format + - skip_ansible_lint + + - name: Set system swappiness + sysctl: + name: vm.swappiness + value: 10 + state: present + reload: "yes" + sysctl_file: /etc/sysctl.d/99-elasticsearch.conf + + - name: Create tmp elk_metrics_6x dir + file: + path: "/tmp/elk-metrics-6x-logs" + state: directory + + - name: Flush iptables rules + command: "{{ item }}" + args: + creates: "/tmp/elk-metrics-6x-logs/iptables.flushed" + with_items: + - "iptables -F" + - "iptables -X" + - "iptables -t nat -F" + - "iptables -t nat -X" + - "iptables -t mangle -F" + - "iptables -t mangle -X" + - "iptables -P INPUT ACCEPT" + - "iptables -P FORWARD ACCEPT" + - "iptables -P OUTPUT ACCEPT" + - "touch /tmp/elk-metrics-6x-logs/iptables.flushed" + + - name: First ensure apt cache is always refreshed + apt: + update_cache: yes + when: + - ansible_pkg_mgr == 'apt' + + tasks: + - name: Run embedded ansible installation + become: yes + become_user: root + command: "./bootstrap-embedded-ansible.sh" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x" + + - name: Run ansible-galaxy (tests) + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-galaxy install --force --ignore-errors --roles-path=${HOME}/ansible_venv/repositories/roles -r ansible-role-requirements.yml" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x/tests" + + - name: Run ansible-galaxy (elk_metrics_6x) + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-galaxy install --force --ignore-errors --roles-path=${HOME}/ansible_venv/repositories/roles -r ansible-role-requirements.yml" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x" + + - name: Run environment setup + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-playbook -i {{ inventory_file }} -e @test-vars.yml _key-setup.yml" + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test-container-setup.log" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x/tests" + when: + - ansible_service_mgr != 'systemd' or + not (container_inventory | bool) + + - name: Run environment setup + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-playbook -i {{ inventory_file }} -e @test-vars.yml _container-setup.yml" + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test-container-setup.log" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x/tests" + when: + - ansible_service_mgr == 'systemd' + - container_inventory | bool + + - name: Wait 15 seconds + command: "sleep 15" + changed_when: false + when: + - ansible_service_mgr == 'systemd' + + - name: Run functional test + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-playbook -i tests/{{ inventory_file }} -e @tests/test-vars.yml site.yml" + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test-deployment.log" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x" + + - name: Show cluster state + become: yes + become_user: root + command: "${HOME}/ansible_venv/bin/ansible-playbook -i tests/{{ inventory_file }} -e @tests/test-vars.yml showElasticCluster.yml" + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test-show-cluster.log" + args: + chdir: "src/{{ current_test_repo }}/elk_metrics_6x" diff --git a/elk_metrics_7x/tests/inventory/test-container-inventory.yml b/elk_metrics_7x/tests/inventory/test-container-inventory.yml new file mode 100644 index 00000000..a46f16f5 --- /dev/null +++ b/elk_metrics_7x/tests/inventory/test-container-inventory.yml @@ -0,0 +1,54 @@ +--- +# The hosts group is used to target physical host machines. Enter all physical +# host machines here. +hosts: + children: + physical_hosts: + hosts: + localhost: + ansible_host: 127.0.0.1 + ansible_user: root + vars: + physical_host: localhost + management_cidr: "172.29.236.0/24" + container_networks: + management_address: + address: "172.29.236.1" + netmask: "255.255.255.0" + bridge: "{{ hostvars[physical_host]['ansible_default_ipv4']['alias'] }}" + + +all_containers: + vars: + physical_host: localhost + container_tech: nspawn + container_networks: + management_address: + address: "{{ ansible_host }}" + netmask: "255.255.255.0" + bridge: "{{ hostvars[physical_host]['ansible_default_ipv4']['alias'] }}" + # CI nodes havee limited resources, locking the memory is impossible. + elastic_memory_lock: false + + children: + elastic-logstash: + children: + kibana: + hosts: + elastic0: + ansible_host: 172.29.236.100 + ansible_user: root + + elastic1: + ansible_host: 172.29.236.101 + ansible_user: root + + elastic2: + ansible_host: 172.29.236.102 + ansible_user: root + + apm-server: + hosts: + apm0: + ansible_host: 172.29.236.120 + ansible_user: root diff --git a/elk_metrics_7x/tests/inventory/test-metal-inventory.yml b/elk_metrics_7x/tests/inventory/test-metal-inventory.yml new file mode 100644 index 00000000..97b6f536 --- /dev/null +++ b/elk_metrics_7x/tests/inventory/test-metal-inventory.yml @@ -0,0 +1,24 @@ +--- +# The hosts group is used to target physical host machines. Enter all physical +# host machines here. +hosts: + children: + physical_hosts: + hosts: + localhost: + ansible_host: 127.0.0.1 + ansible_user: root + vars: + physical_host: localhost + + elastic-logstash: + hosts: + localhost: {} + + kibana: + hosts: + localhost: {} + + apm-server: + hosts: + localhost: {} diff --git a/elk_metrics_7x/tests/manual-test.rc b/elk_metrics_7x/tests/manual-test.rc new file mode 100644 index 00000000..2222ac0c --- /dev/null +++ b/elk_metrics_7x/tests/manual-test.rc @@ -0,0 +1,16 @@ +export ANSIBLE_HOST_KEY_CHECKING="False" +export ANSIBLE_ROLES_PATH="${HOME}/ansible_venv/repositories/roles" +export ANSIBLE_ACTION_PLUGINS="${HOME}/ansible_venv/repositories/roles/config_template/action" +export ANSIBLE_CONNECTION_PLUGINS="${HOME}/ansible_venv/repositories/roles/plugins/connection" +export ANSIBLE_LOG_PATH="/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + +if [[ ! -d "/tmp/elk-metrics-6x-logs" ]]; then + mkdir -pv "/tmp/elk-metrics-6x-logs" + chmod 0777 "/tmp/elk-metrics-6x-logs" +fi + +echo "To build a test environment run the following:" +echo -e "# ${HOME}/ansible_venv/bin/ansible-playbook -i tests/inventory/test-container-inventory.yml tests/test.yml --limit localhost\n" + +echo "Run manual functional tests by executing the following:" +echo -e "# ${HOME}/ansible_venv/bin/ansible-playbook -i tests/inventory/test-container-inventory.yml site.yml\n" diff --git a/elk_metrics_7x/tests/post-run.yml b/elk_metrics_7x/tests/post-run.yml new file mode 100644 index 00000000..a50c3539 --- /dev/null +++ b/elk_metrics_7x/tests/post-run.yml @@ -0,0 +1,27 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: run-setup.yml + +- name: Run post tasks + hosts: "all" + tasks: + - name: Copy logs back to the executor + synchronize: + src: "/tmp/elk-metrics-6x-logs" + dest: "{{ zuul.executor.log_root }}/" + mode: pull + rsync_opts: + - "--quiet" diff --git a/elk_metrics_7x/tests/run-cleanup.sh b/elk_metrics_7x/tests/run-cleanup.sh new file mode 100755 index 00000000..478b4abb --- /dev/null +++ b/elk_metrics_7x/tests/run-cleanup.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +export TEST_DIR="$(readlink -f $(dirname ${0})/../../)" + +# Stop beat processes +pushd "${TEST_DIR}/elk_metrics_6x" + for i in $(ls -1 install*beat.yml); do + LOWER_BEAT="$(echo "${i}" | tr '[:upper:]' '[:lower:]')" + BEAT_PARTIAL="$(echo ${LOWER_BEAT} | awk -F'.' '{print $1}')" + BEAT="$(echo ${BEAT_PARTIAL} | awk -F'install' '{print $2}')" + echo "Stopping ${BEAT}" + (systemctl stop "${BEAT}" || true) & + apt remove --purge -y "${BEAT}" || true + if [[ -d "/etc/${BEAT}" ]]; then + rm -rf "/etc/${BEAT}" + fi + if [[ -d "/var/lib/${BEAT}" ]]; then + rm -rf "/var/lib/${BEAT}" + fi + if [[ -d "/etc/systemd/system/${BEAT}.service.d" ]]; then + rm -rf "/etc/systemd/system/${BEAT}.service.d" + fi + done +popd + +for i in $(grep -lri elastic /etc/apt/sources.list.d/); do + rm "${i}" +done + +# Stop and remove containers +for i in {1..3}; do + if machinectl list-images | grep -v ubuntu | awk '/sub/ {print $1}' | xargs -n 1 machinectl kill; then + sleep 1 + fi +done + +for i in {1..3}; do + if machinectl list-images | grep -v ubuntu | awk '/sub/ {print $1}' | xargs -n 1 machinectl remove; then + sleep 1 + fi +done diff --git a/elk_metrics_7x/tests/run-setup.yml b/elk_metrics_7x/tests/run-setup.yml new file mode 100644 index 00000000..bd5d179b --- /dev/null +++ b/elk_metrics_7x/tests/run-setup.yml @@ -0,0 +1,53 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Zuul facts + hosts: "all" + tasks: + - name: Set zuul fact + set_fact: + zuul: + project: + canonical_name: "openstack-ansible-ops" + short_name: "ops" + executor: + log_root: "{{ ansible_env.HOME }}/elk-test-logs" + when: + - zuul is not defined + + - name: Print zuul fact + debug: var=zuul + + - name: Set current test repo (cross-repo) + set_fact: + current_test_repo: "git.openstack.org/{{ osa_test_repo }}" + when: + - osa_test_repo is defined + + - name: Set current test repo (non-cross-repo) + set_fact: + current_test_repo: "{{ zuul.project.canonical_name }}" + when: + - osa_test_repo is not defined + + - name: Set inventory for test + set_fact: + container_inventory: "{{ test_clustered_elk | default(false) | bool }}" + + post_tasks: + - name: Ensure the log directory exists + file: + path: "/tmp/elk-metrics-6x-logs" + state: directory diff --git a/elk_metrics_7x/tests/run-tests.sh b/elk_metrics_7x/tests/run-tests.sh new file mode 100755 index 00000000..559f3c09 --- /dev/null +++ b/elk_metrics_7x/tests/run-tests.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ve + +export TEST_DIR="$(readlink -f $(dirname ${0})/../../)" + +pushd "${HOME}" + if [[ ! -d "src" ]]; then + mkdir src + fi + pushd src + ln -sf "${TEST_DIR}" + popd +popd + +source "${TEST_DIR}/elk_metrics_6x/tests/manual-test.rc" + +source "${TEST_DIR}/elk_metrics_6x/bootstrap-embedded-ansible.sh" +deactivate + +${HOME}/ansible_venv/bin/ansible-galaxy install --force \ + --roles-path="${HOME}/ansible_venv/repositories/roles" \ + --role-file="${TEST_DIR}/elk_metrics_6x/tests/ansible-role-requirements.yml" + +if [[ ! -e "${TEST_DIR}/elk_metrics_6x/tests/src" ]]; then + ln -s ${TEST_DIR}/../ ${TEST_DIR}/elk_metrics_6x/tests/src +fi + +${HOME}/ansible_venv/bin/ansible-playbook -i 'localhost,' \ + -vv \ + -e ansible_connection=local \ + -e test_clustered_elk=${CLUSTERED:-no} \ + ${TEST_DIR}/elk_metrics_6x/tests/test.yml diff --git a/elk_metrics_7x/tests/test-vars.yml b/elk_metrics_7x/tests/test-vars.yml new file mode 100644 index 00000000..b86cac2b --- /dev/null +++ b/elk_metrics_7x/tests/test-vars.yml @@ -0,0 +1,24 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +physical_host: localhost + +# NOTE(cloudnull): Test configs used to minimize the impact of a +# multi-node install with limited resources. +q_storage: 1 +elastic_memory_lower_limit: 512 +elastic_memory_upper_limit: 512 + +osa_test_repo: "openstack/openstack-ansible-ops" diff --git a/elk_metrics_7x/tests/test.yml b/elk_metrics_7x/tests/test.yml new file mode 100644 index 00000000..fba19e51 --- /dev/null +++ b/elk_metrics_7x/tests/test.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2018, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: functional.yml +- import_playbook: testLayout.yml +- import_playbook: testAPI.yml diff --git a/elk_metrics_7x/tests/testAPI.yml b/elk_metrics_7x/tests/testAPI.yml new file mode 100644 index 00000000..383db8bb --- /dev/null +++ b/elk_metrics_7x/tests/testAPI.yml @@ -0,0 +1,123 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Test apm api + hosts: apm-server + gather_facts: false + become: true + + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + + tasks: + - name: Check for open TCP + wait_for: + port: "{{ item.port }}" + host: "{{ item.host }}" + timeout: 120 + with_items: + - port: 8200 + host: 127.0.0.1 + + +- name: Test elasticsearch api + hosts: elastic-logstash + gather_facts: false + become: true + + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + + tasks: + - name: Check http + uri: + url: http://127.0.0.1:{{ item.port }}{{ item.path }} + method: "{{ item.method }}" + status_code: 200 + register: elk_test + until: elk_test is success + retries: 3 + delay: 10 + with_items: + - port: 9200 + path: "/_nodes/stats" + method: "GET" + + - name: Check for open TCP + wait_for: + port: "{{ item.port }}" + host: "{{ item.host }}" + timeout: 120 + with_items: + - port: 9300 + host: 127.0.0.1 + + +- name: Test kibana api + hosts: elastic-logstash + gather_facts: false + become: true + + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + + tasks: + - name: Check http + uri: + url: http://127.0.0.1:{{ item.port }}{{ item.path }} + method: "{{ item.method }}" + status_code: 200 + register: elk_test + until: elk_test is success + retries: 3 + delay: 10 + with_items: + - port: 5601 + path: "/status" + method: "HEAD" + - port: 81 + path: "/status" + method: "HEAD" + + +- name: Test logstash api + hosts: elastic-logstash + gather_facts: false + become: true + + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + + tasks: + - name: Check http + uri: + url: http://127.0.0.1:{{ item.port }}{{ item.path }} + method: "{{ item.method }}" + status_code: 200 + register: elk_test + until: elk_test is success + retries: 3 + delay: 10 + with_items: + - port: 9600 + path: "/_node" + method: "HEAD" + + - name: Check for open TCP + wait_for: + port: "{{ item.port }}" + host: "{{ item.host }}" + timeout: 120 + with_items: + - port: 5044 + host: 127.0.0.1 diff --git a/elk_metrics_7x/tests/testLayout.yml b/elk_metrics_7x/tests/testLayout.yml new file mode 100644 index 00000000..418a3457 --- /dev/null +++ b/elk_metrics_7x/tests/testLayout.yml @@ -0,0 +1,62 @@ +--- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- import_playbook: run-setup.yml + +- name: Test host layout + hosts: localhost + become: true + + environment: + ANSIBLE_LOG_PATH: "/tmp/elk-metrics-6x-logs/ansible-elk-test.log" + + vars: + storage_node_count: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + expected_masters: [1, 1, 3, 3, 3, 3, 3, 5, 5, 5, 5, 7, 7, 7, 7, 9, 9, 9, 9, 11] + + tasks: + + # the elasticserch cluster elects one master from all those which are marked as master-eligible + # 1 node cluster can only have one master + # 2 node clusters have 1 master-eligable nodes to avoid split-brain + # 3 node clusters have 3 master-eligable nodes + # >3 node clusters have (nodes // 2) eligable masters rounded up to the next odd number + - name: Master node count fact + set_fact: + master_node_count: |- + {% set masters = 0 %} + {% if (item | int) < 3 %} + {% set masters = 1 %} + {% elif (item | int) == 3 %} + {% set masters = 3 %} + {% else %} + {% set masters = (item | int ) // 2 %} + {% if ((masters | int) % 2 == 0) %} + {% set masters = (masters | int) + 1 %} + {% endif %} + {% endif %} + {{ masters }} + with_items: "{{ storage_node_count }}" + register: computed_masters + + - name: Gather results + set_fact: + masters: "{{ masters|default([]) + [ { 'nodes': item.item, 'masters': (item.ansible_facts.master_node_count|int) } ] }}" + with_items: "{{ computed_masters.results }}" + + - name: Check results + assert: + that: item.0 == item.1.masters + with_together: + - "{{ expected_masters }}" + - "{{ masters }}" diff --git a/elk_metrics_7x/vars/variables.yml b/elk_metrics_7x/vars/variables.yml new file mode 100644 index 00000000..f751f2d7 --- /dev/null +++ b/elk_metrics_7x/vars/variables.yml @@ -0,0 +1,378 @@ +--- +elastic_memory_lower_limit: "{{ (((ansible_memtotal_mb | int) * 0.15) // 1) | int }}" +elastic_memory_upper_limit: "{{ (((ansible_memtotal_mb | int) * 0.35) // 1) | int }}" + +# Option to set persistent queue storage in gigabytes +q_storage: "{{ (ansible_processor_count | int) * (ansible_processor_threads_per_core | int) * 2 }}" + +apm_port: 8200 +elastic_port: 9200 +elastic_hap_port: 9201 +elastic_create_rollup: false +logstash_beat_input_port: 5044 +logstash_syslog_input_port: 5140 +logstash_syslog_input_mode: udp +logstash_collectd_input_enabled: false +logstash_collectd_port: 25826 +logstash_collectd_buffer_size: 1452 +# Security level can be ["Sign", "Encrypt"]. +logstash_collectd_security_level: Sign +# To enable security the auth file is required. +#logstash_collectd_authfile: /etc/collectd/passwd +kibana_port: 5601 +kibana_nginx_port: 81 + +# This is the URL external services can use to communicate with the +# elasticsearch cluster. +elastic_vip_url: "http://127.0.0.1:19200" + +# Elasticsearch can query the itself and loadbalance requests across the cluster. +# This function is automatically enabled on non-data nodes however this setting +# can be used to override the default behaviour. +#elastic_sniffing_enabled: true + +# Beat options +heartbeat_services: + - group: "{{ groups['galera_all'] | default([]) }}" + name: galera + ports: + - 9200 + type: http + method: HEAD + path: "/" + - group: "{{ groups['galera_all'] | default([]) }}" + name: galera + ports: + - 3306 + type: tcp + - group: "{{ groups['repo_all'] | default([]) }}" + name: repo-git + ports: + - 9418 + type: tcp + - group: "{{ groups['repo_all'] | default([]) }}" + name: repo-server + ports: + - 8181 + type: http + method: HEAD + path: "/" + - group: "{{ groups['repo_all'] | default([]) }}" + name: repo-acng + ports: + - 3142 + type: http + method: HEAD + path: "/acng-report.html" + - group: "{{ groups['glance_api'] | default([]) }}" + name: glance-api + ports: + - 9292 + type: http + method: HEAD + path: "/healthcheck" + - group: "{{ groups['glance_api'] | default([]) }}" + name: glance-registry + ports: + - 9191 + type: http + method: HEAD + path: "/healthcheck" + - group: "{{ groups['gnocchi_all'] | default([]) }}" + name: gnocchi-api + ports: + - 8041 + type: http + method: HEAD + path: "/healthcheck" + - group: "{{ groups['heat_api_cfn'] | default([]) }}" + name: heat-cfn-api + ports: + - 8000 + type: http + method: HEAD + path: "/" + check_response: + status: 300 + - group: "{{ groups['heat_api'] | default([]) }}" + name: heat-api + ports: + - 8004 + type: http + method: HEAD + path: "/" + check_response: + status: 300 + - group: "{{ groups['keystone_all'] | default([]) }}" + name: keystone-api + ports: + - 5000 + type: http + method: HEAD + path: "/" + check_response: + status: 300 + - group: "{{ groups['neutron_server'] | default([]) }}" + name: neutron-server + ports: + - 9696 + type: http + method: GET + path: "/" + - group: "{{ groups['nova_api_metadata'] | default([]) }}" + name: nova-api-metadata + ports: + - 8775 + type: http + method: HEAD + path: "/" + - group: "{{ groups['nova_api_os_compute'] | default([]) }}" + name: nova-api-compute + ports: + - 8774 + type: http + method: HEAD + path: "/" + - group: "{{ groups['nova_api_placement'] | default([]) }}" + name: nova-api-placement + ports: + - 8780 + type: http + method: GET + path: "/" + - group: "{{ groups['nova_console'] | default([]) }}" + name: nova-console + ports: + - 6080 + - 6082 + - 6083 + type: tcp + - group: "{{ groups['cinder_api'] | default([]) }}" + name: cinder-api + ports: + - 8776 + type: http + method: HEAD + path: "/" + check_response: + status: 300 + - group: "{{ groups['horizon_all'] | default([]) }}" + name: horizon + ports: + - 80 + - 443 + type: http + method: HEAD + path: "/" + - group: "{{ groups['sahara_api'] | default([]) }}" + name: sahara-api + ports: + - 8386 + type: http + method: HEAD + path: "/healthcheck" + - group: "{{ groups['swift_proxy'] | default([]) }}" + name: swift-proxy + ports: + - 8080 + type: http + method: HEAD + path: "/healthcheck" + - group: "{{ groups['aodh_api'] | default([]) }}" + name: aodh-api + ports: + - 8042 + type: http + method: HEAD + path: "/" + - group: "{{ groups['ironic_api'] | default([]) }}" + name: ironic-api + ports: + - 6385 + type: http + method: HEAD + path: "/" + - group: "{{ groups['rabbitmq_all'] | default([]) }}" + name: rabbitmq-management + ports: + - 15672 + type: http + method: HEAD + path: "/" + - group: "{{ groups['rabbitmq_all'] | default([]) }}" + name: rabbitmq-access + ports: + - 5672 + - 5671 + type: tcp + - group: "{{ groups['magnum_all'] | default([]) }}" + name: magnum-api + ports: + - 9511 + type: http + method: HEAD + path: "/" + - group: "{{ groups['trove_api'] | default([]) }}" + name: trove-api + ports: + - 8779 + type: http + method: HEAD + path: "/" + - group: "{{ groups['barbican_api'] | default([]) }}" + name: barbican-api + ports: + - 9311 + type: http + method: HEAD + path: "/" + - group: "{{ groups['designate_api'] | default([]) }}" + name: designate-api + ports: + - 9001 + type: http + method: HEAD + path: "/" + - group: "{{ groups['octavia_all'] | default([]) }}" + name: octavia-api + ports: + - 9876 + type: http + method: HEAD + path: "/" + - group: "{{ groups['tacker_all'] | default([]) }}" + name: tacker-api + ports: + - 9890 + type: http + method: HEAD + path: "/" + - group: "{{ groups['neutron_server'] | default([]) }}" + name: opendaylight + ports: + - 8180 + - 8185 + type: tcp + - group: "{{ groups['neutron_server'] | default([]) }}" + name: ceph-rgw + ports: + - 7980 + type: http + method: HEAD + path: "/" + +# Grafana +grafana_dashboards: + - dashboard_id: 5566 + revision_id: 5 + datasource: "metricbeat-Elasticsearch" + - dashboard_id: 5569 + revision_id: 3 + datasource: "filebeat-Elasticsearch" + +grafana_datasources: + - name: "all-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: true + database: "*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "auditbeat-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "auditbeat-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "filebeat-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "filebeat-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "heartbeat-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "heartbeat-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "metricbeat-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "metricbeat-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "packetbeat-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "packetbeat-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">10s" + - name: "monitorstack-Elasticsearch" + type: "elasticsearch" + access: "proxy" + url: "{{ elastic_vip_url }}" + basicAuth: false + basicAuthUser: "" + basicAuthPassword: "" + isDefault: false + database: "monitorstack-*" + jsonData: + esVersion: 56 + keepCookies: [] + maxConcurrentShardRequests: 256 + timeField: "@timestamp" + timeInterval: ">60s"