Add APM Server

The elastic Stack has the ability to get application performance metrics
using the built in APM server. This change implements the APM server in
an existing ELK environment.

Change-Id: Ie6f533b81cfdb0c6a4ba2f33fd3b9f0a3e49a1fc
Signed-off-by: Kevin Carter <kevin.carter@rackspace.com>
This commit is contained in:
Kevin Carter 2018-04-13 09:43:01 -05:00
parent 390314e18b
commit 903b995d32
6 changed files with 482 additions and 2 deletions

View File

@ -9,3 +9,11 @@ elastic-logstash_hosts:
kibana_hosts:
logging01:
ip: 172.22.8.27
apm-server_hosts:
logging01:
ip: 172.22.8.27
logging02:
ip: 172.22.8.28
logging03:
ip: 172.22.8.29

View File

@ -1,5 +1,8 @@
---
component_skel:
apm-server:
belongs_to:
- elk_all
elastic-logstash:
belongs_to:
- elk_all
@ -8,6 +11,11 @@ component_skel:
- elk_all
container_skel:
apm-server_container:
belongs_to:
- apm-server_containers
contains:
- apm-server
elastic-logstash_container:
belongs_to:
- elastic-logstash_containers
@ -20,6 +28,12 @@ container_skel:
- kibana
physical_skel:
apm-server_containers:
belongs_to:
- all_containers
apm-server_hosts:
belongs_to:
- hosts
elastic-logstash_containers:
belongs_to:
- all_containers

View File

@ -0,0 +1,56 @@
---
- name: Install apm-server
hosts: apm-server
become: true
vars:
haproxy_ssl: false
vars_files:
- vars/variables.yml
pre_tasks:
- include_tasks: common_task_install_elk_repo.yml
- name: Ensure apm-server is installed
apt:
name: "{{ item }}"
state: present
update_cache: true
with_items:
- apm-server
post_tasks:
- name: Drop apm-server conf file
template:
src: templates/apm-server.yml.j2
dest: /etc/apm-server/apm-server.yml
- name: Enable and restart APM Server
systemd:
name: "apm-server"
enabled: true
state: restarted
- name: Load apm-server Dashboards
hosts: apm-server[0]
become: true
vars_files:
- vars/variables.yml
tasks:
- name: Load templates
shell: >-
{% set IP_ARR=[] %}
{% for host in groups['elastic-logstash'] %}
{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}
{% endif %}
{% endfor %}
{% set elasticsearch_hosts = [IP_ARR | map('regex_replace', '$', ':' ~ elastic_port|string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' )] %}
apm-server setup
{{ item }}
-E 'apm-server.host=localhost:8200'
-E 'output.elasticsearch.hosts={{ elasticsearch_hosts }}'
-e -v
with_items:
- "--template"
- "--dashboards"

View File

@ -0,0 +1,395 @@
######################## APM Server Configuration #############################
############################# APM Server ######################################
apm-server:
# Defines the host and port the server is listening on
#host: "localhost:8200"
# Maximum permitted size in bytes of an unzipped request accepted by the server to be processed.
#max_unzipped_size: 52428800
# Maximum permitted size in bytes of a request's header accepted by the server to be processed.
#max_header_size: 1048576
# Maximum permitted duration in seconds for reading an entire request.
#read_timeout: 2s
# Maximum permitted duration in seconds for writing a response.
#write_timeout: 2s
# Maximum duration in seconds before releasing resources when shutting down the server.
#shutdown_timeout: 5s
# Maximum number of requests permitted to be sent to the server concurrently.
#concurrent_requests: 40
# Authorization token to be checked. If a token is set here the agents must
# send their token in the following format: Authorization: Bearer <secret-token>.
# It is recommended to use an authorization token in combination with SSL enabled.
secret_token: {{ apm_token }}
#ssl.enabled: false
#ssl.certificate : "path/to/cert"
#ssl.key : "path/to/private_key"
# Please be aware that frontend support is an experimental feature at the moment!
frontend:
# To enable experimental frontend support set this to true.
enabled: true
# Rate limit per second and IP address for requests sent to the frontend endpoint.
#rate_limit: 10
# Comma separated list of permitted origins for frontend. User-agents will send
# a origin header that will be validated against this list.
# An origin is made of a protocol scheme, host and port, without the url path.
# Allowed origins in this setting can have * to match anything (eg.: http://*.example.com)
# If an item in the list is a single '*', everything will be allowed
#allow_origins : ['*']
# Regexp to be matched against a stacktrace frame's `file_name` and `abs_path` attributes.
# If the regexp matches, the stacktrace frame is considered to be a library frame.
#library_pattern: "node_modules|bower_components|~"
# Regexp to be matched against a stacktrace frame's `file_name`.
# If the regexp matches, the stacktrace frame is not used for calculating error groups.
# The default pattern excludes stacktrace frames that have
# - a filename starting with '/webpack'
#exclude_from_grouping: "^/webpack"
# If a source map has previously been uploaded, source mapping is automatically applied
# to all error and transaction documents sent to the frontend endpoint.
#source_mapping:
# Source maps are are fetched from Elasticsearch and then kept in an in-memory cache for a certain time.
# The `cache.expiration` determines how long a source map should be cached before fetching it again from Elasticsearch.
# Note that values configured without a time unit will be interpreted as seconds.
#cache:
#expiration: 5m
# Source maps are stored in the same index as transaction and error documents.
# If the default index pattern at 'outputs.elasticsearch.index' is changed,
# a matching index pattern needs to be specified here.
#index_pattern: "apm-*"
#================================ General ======================================
# Internal queue configuration for buffering events to be published.
#queue:
# Queue type by name (default 'mem')
# The memory queue will present all available events (up to the outputs
# bulk_max_size) to the output, the moment the output is ready to server
# another batch of events.
#mem:
# Max number of events the queue can buffer.
#events: 4096
# Hints the minimum number of events stored in the queue,
# before providing a batch of events to the outputs.
# A value of 0 (the default) ensures events are immediately available
# to be sent to the outputs.
#flush.min_events: 2048
# Maximum duration after which events are available to the outputs,
# if the number of events stored in the queue is < min_flush_events.
#flush.timeout: 1s
# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:
#================================ Outputs ======================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output -------------------------------
output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
{% if internal_lb_vip_address is defined %}
hosts: "http://{{ internal_lb_vip_address }}:{{ elastic_hap_port }}"
{% else %}
hosts: {% set IP_ARR=[] %}{% for host in groups['elastic-logstash'] %}{% if IP_ARR.insert(loop.index,hostvars[host]['ansible_host']) %}{% endif %}{% endfor %}[{{ IP_ARR | map('regex_replace', '$', ':' ~ elastic_port | string()) | map('regex_replace', '$', '"') | map('regex_replace', '^', '"') | list | join(',' ) }}]
{% endif %}
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Number of workers per Elasticsearch host.
#worker: 1
# Optional index name. The default is "apm" plus date
# and generates [apm-]YYYY.MM.DD keys.
# In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
#index: "apm-%{[beat.version]}-%{+yyyy.MM.dd}"
# Optional ingest node pipeline. By default no pipeline will be used.
#pipeline: ""
# Optional HTTP Path
#path: "/elasticsearch"
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#================================= Paths ======================================
# The home path for the apm-server installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution (for example, the sample dashboards).
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:
# The configuration path for the apm-server installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}
# The data path for the apm-server installation. This is the default base path
# for all the files in which apm-server needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data
# The logs path for a apm-server installation. This is the default location for
# the Beat's log files. If not set by a CLI flag or in the configuration file,
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards are disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
setup.dashboards.enabled: true
# The directory from where to read the dashboards. The default is the `kibana`
# folder in the home path.
#setup.dashboards.directory: ${path.home}/kibana
# The URL from where to download the dashboards archive. It is used instead of
# the directory if it has a value.
#setup.dashboards.url:
# The file archive (zip file) from where to read the dashboards. It is used instead
# of the directory when it has a value.
#setup.dashboards.file:
# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
#setup.dashboards.kibana_index: .kibana
# The Elasticsearch index name. This overwrites the index name defined in the
# dashboards and index pattern. Example: testbeat-*
#setup.dashboards.index:
# Always use the Kibana API for loading the dashboards instead of autodetecting
# how to install the dashboards by first querying Elasticsearch.
#setup.dashboards.always_kibana: false
#============================== Template =====================================
# A template is used to set the mapping in Elasticsearch
# By default template loading is enabled and the template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones.
# Set to false to disable template loading.
setup.template.enabled: true
# Template name. By default the template name is "apm-%{[beat.version]}"
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
setup.template.name: "apm-%{[beat.version]}"
# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings.
# The first part is the version of the beat and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the elasticsearch index pattern is modified.
setup.template.pattern: "apm-%{[beat.version]}-*"
# Path to fields.yml file to generate the template
setup.template.fields: "${path.config}/fields.yml"
# Overwrite existing template
setup.template.overwrite: true
# Elasticsearch template settings
setup.template.settings:
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
index:
number_of_shards: 3
codec: best_compression
#number_of_routing_shards: 30
# A dictionary of settings for the _source field. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
#_source:
#enabled: false
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "{{ hostvars[groups['kibana'][0]]['ansible_host'] }}:{{ kibana_port }}"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP Path
#path: ""
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
#================================ Logging ======================================
# There are three options for the log output: syslog, file, stderr.
# Under Windows systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: true
# If enabled, apm-server periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
#logging.metrics.enabled: true
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
path: /var/log/apm-server
# The name of the files where the logs are written to.
name: apm-server
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
#rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
keepfiles: 2
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Set to true to log messages in json format.
#logging.json: false

View File

@ -12,8 +12,11 @@
# server.maxPayloadBytes: 1048576
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://{{ internal_lb_vip_address | default(hostvars[groups['elastic-logstash'][0]]['ansible_host']) }}:{{ elastic_hap_port }}"
{% if internal_lb_vip_address is defined %}
elasticsearch.url: "http://{{ internal_lb_vip_address }}:{{ elastic_hap_port }}"
{% else %}
elasticsearch.url: "http://{{ hostvars[groups['elastic-logstash'][0]]['ansible_host'] }}:{{ elastic_port }}"
{% endif %}
# When this settings value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.

View File

@ -18,6 +18,10 @@ kibana_server_name: "{{ ansible_hostname }}"
logstash_beat_input_port: 5044
# apm
apm_token: SuperSecrete
# Grafana
grafana_dashboards:
- dashboard_id: 5566