
The old install-docker upstream.yaml tasks installed apparmor for docker (it was origianlly a dependency but then docker removed it as an explicit dependency while still explicitly depending on it so we manually installed it). When we started deploying Noble nodes with podman via the install-docker role we didn't get apparmor because podman doesn't appear to depend on it. However when we got to production the production images already come with apparmor which includes profiles for things like podman and rsyslog which have caused problems for us deploying services with podman. Attempt to catch these issues in CI by explicitly installing apparmor. This should be a noop for production beceaus apparmor is already installed. This should help us catch problems with podman in CI before we ever get to production. To ensure that apparmor is working properly we capture apparmor_status output as part of our system-config-run job log collection. Note we remove the zuul lb test for haproxy.log being present as current apparmor problems with the rsyslogd profile prevent that from occuring on noble. The next change will correct that issue and reinstate the test case. Change-Id: Iea5966dbb2dcfbe1e51d9c00bad67a9d37e1b7e1
88 lines
2.4 KiB
YAML
88 lines
2.4 KiB
YAML
- hosts: localhost
|
|
tasks:
|
|
- name: Make log directories for testing hosts
|
|
file:
|
|
path: "{{ zuul.executor.log_root }}/{{ item }}/logs"
|
|
state: directory
|
|
recurse: true
|
|
loop: "{{ query('inventory_hostnames', 'all') }}"
|
|
|
|
- hosts: all
|
|
tasks:
|
|
- include_role:
|
|
name: collect-container-logs
|
|
# The zuul user isn't part of the docker group on our fake
|
|
# production systems. Work around this by operating as root
|
|
# when collecting logs. This collects podman containers
|
|
# running as root; we may need to think about some flags for
|
|
# this role for collecting logs from containers under other
|
|
# users.
|
|
apply:
|
|
become: yes
|
|
vars:
|
|
container_command: "{{ item }}"
|
|
loop:
|
|
- docker
|
|
- podman
|
|
|
|
- name: Get AppArmor Status
|
|
shell: 'apparmor_status | tee /var/log/apparmor_status'
|
|
become: yes
|
|
failed_when: false
|
|
|
|
- include_role:
|
|
name: stage-output
|
|
|
|
- hosts: prod_bastion[0]
|
|
tasks:
|
|
- name: Set log directory
|
|
set_fact:
|
|
log_dir: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
|
|
|
|
- name: Collect tox output
|
|
include_role:
|
|
name: fetch-tox-output
|
|
vars:
|
|
tox_envlist: testinfra
|
|
zuul_work_dir: src/opendev.org/opendev/system-config
|
|
|
|
# Because during the test we run ansible as root, we need
|
|
# to allow ourselves permissions to copy the results
|
|
- name: Open ARA results permissions
|
|
file:
|
|
dest: /var/cache/ansible
|
|
mode: u=rwX,g=rX,o=rX
|
|
recurse: yes
|
|
become: yes
|
|
|
|
- name: Generate ARA static report
|
|
command: /usr/ansible-venv/bin/ara-manage generate /root/.ara/static
|
|
become: yes
|
|
|
|
- name: Collect testing ARA results
|
|
synchronize:
|
|
dest: "{{ log_dir }}/ara-report"
|
|
mode: pull
|
|
src: "/root/.ara/static/"
|
|
verify_host: true
|
|
become: yes
|
|
ignore_errors: true
|
|
|
|
- name: Add ARA artifact
|
|
zuul_return:
|
|
data:
|
|
zuul:
|
|
artifacts:
|
|
- name: ARA report
|
|
url: '{{ groups["prod_bastion"][0] }}/ara-report/'
|
|
|
|
- name: Collect ansible configuration
|
|
synchronize:
|
|
dest: "{{ log_dir }}/etc"
|
|
mode: pull
|
|
src: "/etc/ansible"
|
|
verify_host: true
|
|
rsync_opts:
|
|
- "--exclude=__pycache__"
|
|
ignore_errors: true
|