687 lines
21 KiB
YAML
Raw Normal View History

# NOTE: job dependencies keep this running in parallel and are defined
# in projects.yaml because it's easier to keep an overall view of
# what's happening in there.
# Make sure only one run of a system-config playbook happens at a time
- semaphore:
name: infra-prod-playbook
max: 1
- job:
name: infra-prod-playbook
parent: opendev-infra-prod-base
description: |
Run specified playbook against productions hosts.
This is a parent job designed to be inherited to enabled
CD deployment of our infrastructure. Set playbook_name to
specify the playbook relative to
/home/zuul/src/opendev.org/opendev/system-config/playbooks
on the bastion host.
abstract: true
semaphores: infra-prod-playbook
run: playbooks/zuul/run-production-playbook.yaml
post-run: playbooks/zuul/run-production-playbook-post.yaml
required-projects:
- opendev/system-config
vars:
infra_prod_ansible_forks: 10
infra_prod_playbook_collect_log: false
infra_prod_playbook_encrypt_log: true
nodeset:
nodes: []
- job:
name: infra-prod-bootstrap-bridge
parent: opendev-infra-prod-setup-keys
description: |
Configure the bastion host (bridge)
This job does minimal configuration on the bastion host
(bridge.openstack.org) to allow it to run system-config
playbooks against our production hosts. It sets up Ansible
and root keys on the host.
Note that this is separate to infra-prod-service-bridge;
bridge in it's role as the bastion host actaully runs that
against itself; it includes things not strictly needed to make
the host able to deploy system-config.
run: playbooks/zuul/run-production-bootstrap-bridge.yaml
files:
- playbooks/bootstrap-bridge.yaml
- playbooks/zuul/run-production-bootstrap-bridge.yaml
- playbooks/zuul/run-production-bootstrap-bridge-add-rootkey.yaml
- playbooks/roles/install-ansible/
- playbooks/roles/root-keys/
- inventory/base/hosts.yaml
- inventory/service/group_vars/bastion.yaml
nodeset:
nodes: []
- job:
name: infra-prod-base
parent: infra-prod-playbook
description: Run the base playbook everywhere.
vars:
playbook_name: base.yaml
infra_prod_ansible_forks: 50
files:
- inventory/
- inventory/service/host_vars/
- inventory/service/group_vars/
- playbooks/base.yaml
- playbooks/roles/base/
- job:
name: infra-prod-letsencrypt
parent: infra-prod-playbook
description: Run letsencrypt.yaml playbook.
vars:
playbook_name: letsencrypt.yaml
files:
- inventory/
- playbooks/letsencrypt.yaml
# Any touching of host_vars or group_vars can substantively
# change the certs we're doing, so be greedy here.
- inventory/service/host_vars/
- inventory/service/group_vars/
- playbooks/roles/letsencrypt
- playbooks/roles/logrotate/
- job:
name: infra-prod-manage-projects
parent: infra-prod-playbook
timeout: 4800
description: |
Create and update projects in gerrit and gitea.
allowed-projects:
- opendev/system-config
- openstack/project-config
required-projects:
- opendev/system-config
- openstack/project-config
vars:
playbook_name: manage-projects.yaml
infra_prod_ansible_forks: 10
infra_prod_playbook_collect_log: true
- job:
name: infra-prod-service-base
parent: infra-prod-playbook
description: Base job for most service playbooks.
abstract: true
- job:
name: infra-prod-service-bridge
parent: infra-prod-service-base
description: Run service-bridge.yaml playbook.
vars:
playbook_name: service-bridge.yaml
files:
- inventory/base
- playbooks/service-bridge.yaml
- inventory/service/group_vars/bastion.yaml
- playbooks/roles/logrotate/
- playbooks/roles/edit-secrets-script/
- playbooks/roles/install-kubectl/
- playbooks/roles/iptables/
- playbooks/roles/configure-kubectl/
- playbooks/roles/configure-openstacksdk/
- playbooks/templates/clouds/bridge_all_clouds.yaml.j2
- job:
name: infra-prod-service-gitea-lb
parent: infra-prod-service-base
description: Run service-gitea-lb.yaml playbook.
vars:
playbook_name: service-gitea-lb.yaml
files:
- inventory/base
- playbooks/service-gitea-lb.yaml
- inventory/service/group_vars/gitea-lb.yaml
- playbooks/roles/pip3/
- playbooks/roles/iptables/
- playbooks/roles/install-docker/
- playbooks/roles/haproxy/
- job:
name: infra-prod-service-nameserver
parent: infra-prod-service-base
description: Run service-nameserver.yaml playbook.
vars:
playbook_name: service-nameserver.yaml
files:
- inventory/base
- playbooks/service-nameserver.yaml
- inventory/service/group_vars/adns.yaml
- inventory/service/group_vars/adns-primary.yaml
- inventory/service/group_vars/adns-secondary.yaml
- playbooks/roles/master-nameserver/
- playbooks/roles/nameserver/
- playbooks/roles/iptables/
- job:
name: infra-prod-service-nodepool
parent: infra-prod-service-base
description: Run service-nodepool.yaml playbook.
vars:
playbook_name: service-nodepool.yaml
required-projects:
- opendev/system-config
- openstack/project-config
files:
- inventory/base
- playbooks/service-nodepool.yaml
- inventory/service/host_vars/nb
- inventory/service/host_vars/nl
- inventory/service/group_vars/nodepool
- playbooks/roles/configure-kubectl/
- playbooks/roles/configure-openstacksdk/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/nodepool
- playbooks/templates/clouds/nodepool_
- job:
name: infra-prod-service-etherpad
parent: infra-prod-service-base
description: Run service-etherpad.yaml playbook.
vars:
playbook_name: service-etherpad.yaml
files:
- inventory/base
- playbooks/service-etherpad.yaml
- inventory/service/group_vars/etherpad.yaml
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/etherpad
- playbooks/roles/logrotate
- playbooks/roles/iptables/
- docker/etherpad/
Add a keycloak server This adds a keycloak server so we can start experimenting with it. It's based on the docker-compose file Matthieu made for Zuul (see https://review.opendev.org/819745 ) We should be able to configure a realm and federate with openstackid and other providers as described in the opendev auth spec. However, I am unable to test federation with openstackid due its inability to configure an oauth app at "localhost". Therefore, we will need an actual deployed system to test it. This should allow us to do so. It will also allow use to connect realms to the newly available Zuul admin api on opendev. It should be possible to configure the realm the way we want, then export its configuration into a JSON file and then have our playbooks or the docker-compose file import it. That would allow us to drive change to the configuration of the system through code review. Because of the above limitation with openstackid, I think we should regard the current implementation as experimental. Once we have a realm configuration that we like (which we will create using the GUI), we can chose to either continue to maintain the config with the GUI and appropriate file backups, or switch to a gitops model based on an export. My understanding is that all the data (realms configuration and session) are kept in an H2 database. This is probably sufficient for now and even production use with Zuul, but we should probably switch to mariadb before any heavy (eg gerrit, etc) production use. This is a partial implementation of https://docs.opendev.org/opendev/infra-specs/latest/specs/central-auth.html We can re-deploy with a new domain when it exists. Change-Id: I2e069b1b220dbd3e0a5754ac094c2b296c141753 Co-Authored-By: Matthieu Huin <mhuin@redhat.com>
2021-11-30 13:03:12 -08:00
- job:
name: infra-prod-service-keycloak
parent: infra-prod-service-base
description: Run service-keycloak.yaml playbook.
vars:
playbook_name: service-keycloak.yaml
files:
- inventory/base
- playbooks/service-keycloak.yaml
- inventory/service/group_vars/keycloak.yaml
Add a keycloak server This adds a keycloak server so we can start experimenting with it. It's based on the docker-compose file Matthieu made for Zuul (see https://review.opendev.org/819745 ) We should be able to configure a realm and federate with openstackid and other providers as described in the opendev auth spec. However, I am unable to test federation with openstackid due its inability to configure an oauth app at "localhost". Therefore, we will need an actual deployed system to test it. This should allow us to do so. It will also allow use to connect realms to the newly available Zuul admin api on opendev. It should be possible to configure the realm the way we want, then export its configuration into a JSON file and then have our playbooks or the docker-compose file import it. That would allow us to drive change to the configuration of the system through code review. Because of the above limitation with openstackid, I think we should regard the current implementation as experimental. Once we have a realm configuration that we like (which we will create using the GUI), we can chose to either continue to maintain the config with the GUI and appropriate file backups, or switch to a gitops model based on an export. My understanding is that all the data (realms configuration and session) are kept in an H2 database. This is probably sufficient for now and even production use with Zuul, but we should probably switch to mariadb before any heavy (eg gerrit, etc) production use. This is a partial implementation of https://docs.opendev.org/opendev/infra-specs/latest/specs/central-auth.html We can re-deploy with a new domain when it exists. Change-Id: I2e069b1b220dbd3e0a5754ac094c2b296c141753 Co-Authored-By: Matthieu Huin <mhuin@redhat.com>
2021-11-30 13:03:12 -08:00
- playbooks/roles/keycloak/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- job:
name: infra-prod-service-meetpad
parent: infra-prod-service-base
description: Run service-meetpad.yaml playbook.
vars:
playbook_name: service-meetpad.yaml
files:
- inventory/base
- playbooks/service-meetpad.yaml
- inventory/service/host_vars/meetpad01.opendev.org.yaml
- inventory/service/group_vars/meetpad.yaml
- playbooks/roles/pip3/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/jitsi-meet/
- job:
name: infra-prod-service-mirror-update
parent: infra-prod-service-base
description: Run service-mirror-update.yaml playbook.
vars:
playbook_name: service-mirror-update.yaml
files:
- inventory/base
- inventory/service/group_vars/mirror.yaml
- inventory/service/host_vars/mirror
- playbooks/service-mirror-update.yaml
- playbooks/roles/mirror-update/
- playbooks/roles/reprepro/
- playbooks/roles/iptables/
- playbooks/roles/logrotate/
- roles/kerberos-client/
- roles/openafs-client/
- job:
name: infra-prod-service-mirror
parent: infra-prod-service-base
description: Run service-mirror.yaml playbook.
vars:
playbook_name: service-mirror.yaml
files:
- inventory/base
- playbooks/service-mirror.yaml
- inventory/service/host_vars/mirror
- inventory/service/group_vars/mirror.yaml
- playbooks/roles/mirror/
- playbooks/roles/afs-release/
- playbooks/roles/afsmon/
- playbooks/roles/iptables/
- playbooks/roles/logrotate/
- roles/openafs-client/
- job:
name: infra-prod-service-paste
parent: infra-prod-service-base
description: Run service-paste.yaml playbook.
vars:
playbook_name: service-paste.yaml
files:
- inventory/base
- playbooks/service-paste.yaml
- inventory/service/group_vars/paste.yaml
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/lodgeit/
- playbooks/roles/iptables/
- job:
name: infra-prod-service-static
parent: infra-prod-service-base
description: Run service-static.yaml playbook.
vars:
playbook_name: service-static.yaml
files:
- inventory/base
- playbooks/service-static.yaml
- inventory/service/group_vars/static.yaml
- playbooks/roles/apache-ua-filter/
- playbooks/roles/iptables/
- playbooks/roles/static/
- playbooks/roles/zuul-user/
- roles/openafs-client/
- job:
name: infra-prod-service-tracing
parent: infra-prod-service-base
description: Run service-tracing.yaml playbook.
vars:
playbook_name: service-tracing.yaml
files:
- inventory/base
- playbooks/service-tracing.yaml
- inventory/service/group_vars/tracing.yaml
- playbooks/roles/jaeger/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
Add borg-backup roles This adds roles to implement backup with borg [1]. Our current tool "bup" has no Python 3 support and is not packaged for Ubuntu Focal. This means it is effectively end-of-life. borg fits our model of servers backing themselves up to a central location, is well documented and seems well supported. It also has the clarkb seal of approval :) As mentioned, borg works in the same manner as bup by doing an efficient back up over ssh to a remote server. The core of these roles are the same as the bup based ones; in terms of creating a separate user for each host and deploying keys and ssh config. This chooses to install borg in a virtualenv on /opt. This was chosen for a number of reasons; firstly reading the history of borg there have been incompatible updates (although they provide a tool to update repository formats); it seems important that we both pin the version we are using and keep clients and server in sync. Since we have a hetrogenous distribution collection we don't want to rely on the packaged tools which may differ. I don't feel like this is a great application for a container; we actually don't want it that isolated from the base system because it's goal is to read and copy it offsite with as little chance of things going wrong as possible. Borg has a lot of support for encrypting the data at rest in various ways. However, that introduces the possibility we could lose both the key and the backup data. Really the only thing stopping this is key management, and if we want to go down this path we can do it as a follow-on. The remote end server is configured via ssh command rules to run in append-only mode. This means a misbehaving client can't delete its old backups. In theory we can prune backups on the server side -- something we could not do with bup. The documentation has been updated but is vague on this part; I think we should get some hosts in operation, see how the de-duplication is working out and then decide how we want to mange things long term. Testing is added; a focal and bionic host both run a full backup of themselves to the backup server. Pretty cool, the logs are in /var/log/borg-backup-<host>.log. No hosts are currently in the borg groups, so this can be applied without affecting production. I'd suggest the next steps are to bring up a borg-based backup server and put a few hosts into this. After running for a while, we can add all hosts, and then deprecate the current bup-based backup server in vexxhost and replace that with a borg-based one; giving us dual offsite backups. [1] https://borgbackup.readthedocs.io/en/stable/ Change-Id: I2a125f2fac11d8e3a3279eb7fa7adb33a3acaa4e
2020-07-16 13:43:18 +10:00
- job:
name: infra-prod-service-borg-backup
parent: infra-prod-service-base
description: Run service-borg-backup.yaml playbook.
vars:
playbook_name: service-borg-backup.yaml
files:
- inventory/base
Add borg-backup roles This adds roles to implement backup with borg [1]. Our current tool "bup" has no Python 3 support and is not packaged for Ubuntu Focal. This means it is effectively end-of-life. borg fits our model of servers backing themselves up to a central location, is well documented and seems well supported. It also has the clarkb seal of approval :) As mentioned, borg works in the same manner as bup by doing an efficient back up over ssh to a remote server. The core of these roles are the same as the bup based ones; in terms of creating a separate user for each host and deploying keys and ssh config. This chooses to install borg in a virtualenv on /opt. This was chosen for a number of reasons; firstly reading the history of borg there have been incompatible updates (although they provide a tool to update repository formats); it seems important that we both pin the version we are using and keep clients and server in sync. Since we have a hetrogenous distribution collection we don't want to rely on the packaged tools which may differ. I don't feel like this is a great application for a container; we actually don't want it that isolated from the base system because it's goal is to read and copy it offsite with as little chance of things going wrong as possible. Borg has a lot of support for encrypting the data at rest in various ways. However, that introduces the possibility we could lose both the key and the backup data. Really the only thing stopping this is key management, and if we want to go down this path we can do it as a follow-on. The remote end server is configured via ssh command rules to run in append-only mode. This means a misbehaving client can't delete its old backups. In theory we can prune backups on the server side -- something we could not do with bup. The documentation has been updated but is vague on this part; I think we should get some hosts in operation, see how the de-duplication is working out and then decide how we want to mange things long term. Testing is added; a focal and bionic host both run a full backup of themselves to the backup server. Pretty cool, the logs are in /var/log/borg-backup-<host>.log. No hosts are currently in the borg groups, so this can be applied without affecting production. I'd suggest the next steps are to bring up a borg-based backup server and put a few hosts into this. After running for a while, we can add all hosts, and then deprecate the current bup-based backup server in vexxhost and replace that with a borg-based one; giving us dual offsite backups. [1] https://borgbackup.readthedocs.io/en/stable/ Change-Id: I2a125f2fac11d8e3a3279eb7fa7adb33a3acaa4e
2020-07-16 13:43:18 +10:00
- playbooks/service-borg-backup.yaml
- playbooks/roles/install-borg/
Add borg-backup roles This adds roles to implement backup with borg [1]. Our current tool "bup" has no Python 3 support and is not packaged for Ubuntu Focal. This means it is effectively end-of-life. borg fits our model of servers backing themselves up to a central location, is well documented and seems well supported. It also has the clarkb seal of approval :) As mentioned, borg works in the same manner as bup by doing an efficient back up over ssh to a remote server. The core of these roles are the same as the bup based ones; in terms of creating a separate user for each host and deploying keys and ssh config. This chooses to install borg in a virtualenv on /opt. This was chosen for a number of reasons; firstly reading the history of borg there have been incompatible updates (although they provide a tool to update repository formats); it seems important that we both pin the version we are using and keep clients and server in sync. Since we have a hetrogenous distribution collection we don't want to rely on the packaged tools which may differ. I don't feel like this is a great application for a container; we actually don't want it that isolated from the base system because it's goal is to read and copy it offsite with as little chance of things going wrong as possible. Borg has a lot of support for encrypting the data at rest in various ways. However, that introduces the possibility we could lose both the key and the backup data. Really the only thing stopping this is key management, and if we want to go down this path we can do it as a follow-on. The remote end server is configured via ssh command rules to run in append-only mode. This means a misbehaving client can't delete its old backups. In theory we can prune backups on the server side -- something we could not do with bup. The documentation has been updated but is vague on this part; I think we should get some hosts in operation, see how the de-duplication is working out and then decide how we want to mange things long term. Testing is added; a focal and bionic host both run a full backup of themselves to the backup server. Pretty cool, the logs are in /var/log/borg-backup-<host>.log. No hosts are currently in the borg groups, so this can be applied without affecting production. I'd suggest the next steps are to bring up a borg-based backup server and put a few hosts into this. After running for a while, we can add all hosts, and then deprecate the current bup-based backup server in vexxhost and replace that with a borg-based one; giving us dual offsite backups. [1] https://borgbackup.readthedocs.io/en/stable/ Change-Id: I2a125f2fac11d8e3a3279eb7fa7adb33a3acaa4e
2020-07-16 13:43:18 +10:00
- playbooks/roles/borg-backup/
- playbooks/roles/borg-backup-server/
- playbooks/roles/iptables/
- job:
name: infra-prod-service-registry
parent: infra-prod-service-base
description: Run service-registry.yaml playbook.
vars:
playbook_name: service-registry.yaml
files:
- inventory/base
- playbooks/service-registry.yaml
- inventory/service/group_vars/registry.yaml
- playbooks/roles/pip3/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/registry/
- job:
name: infra-prod-service-zuul-preview
parent: infra-prod-service-base
description: Run service-zuul-preview.yaml playbook.
vars:
playbook_name: service-zuul-preview.yaml
files:
- inventory/base
- playbooks/service-zuul-preview.yaml
- inventory/service/group_vars/zuul-preview.yaml
- playbooks/roles/pip3/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/zuul-preview/
- job:
name: infra-prod-service-zookeeper
parent: infra-prod-service-base
description: Run service-zookeeper.yaml playbook.
vars:
playbook_name: service-zookeeper.yaml
files:
- inventory/base
- inventory/service/group_vars/zookeeper.yaml
- ^inventory/service/host_vars/zk\d+\..*
- playbooks/roles/pip3/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/zookeeper/
- job:
name: infra-prod-service-zuul
parent: infra-prod-service-base
description: |
Run service-zuul.yaml playbook.
This configures the main Zuul cluster. It will perform a
smart-reconfigure of the scheduler if the tenant configuration
is changed.
vars:
playbook_name: service-zuul.yaml
files:
- inventory/base
- playbooks/service-zuul.yaml
- inventory/service/group_vars/zuul
- inventory/service/group_vars/zookeeper.yaml
- inventory/service/host_vars/zk\d+
- inventory/service/host_vars/zuul\d+.opendev.org
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/zookeeper/
- playbooks/roles/zuul
- roles/kerberos-client/
- roles/openafs-client/
- job:
name: infra-prod-service-zuul-db
parent: infra-prod-service-base
description: Run service-zuul-db.yaml playbook.
vars:
playbook_name: service-zuul-db.yaml
files:
- inventory/base
- playbooks/service-zuul-db.yaml
- inventory/service/group_vars/zuul-db.yaml
- playbooks/roles/iptables/
- playbooks/roles/install-docker/
- playbooks/roles/mariadb/
- job:
name: infra-prod-service-zuul-lb
parent: infra-prod-service-base
description: Run service-zuul-lb.yaml playbook.
vars:
playbook_name: service-zuul-lb.yaml
files:
- inventory/base
- playbooks/service-zuul-lb.yaml
- inventory/service/group_vars/zuul-lb.yaml
- playbooks/roles/pip3/
- playbooks/roles/iptables/
- playbooks/roles/install-docker/
- playbooks/roles/haproxy/
- job:
name: infra-prod-service-review
parent: infra-prod-service-base
description: Run service-review.yaml playbook.
vars:
playbook_name: service-review.yaml
files:
- inventory/base
- playbooks/service-review.yaml
- inventory/service/group_vars/review.yaml
- inventory/service/host_vars/review02.opendev.org.yaml
- playbooks/roles/pip3/
- playbooks/roles/install-docker/
- playbooks/roles/iptables/
- playbooks/roles/gerrit/
- zuul.d/docker-images/gerrit.yaml
- job:
name: infra-prod-service-refstack
parent: infra-prod-service-base
description: Run service-refstack.yaml playbook.
vars:
playbook_name: service-refstack.yaml
files:
- inventory/base
- playbooks/service-refstack.yaml
- inventory/service/group_vars/refstack.yaml
- inventory/service/host_vars/refstack[0-9][0-9]
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/refstack/
- playbooks/roles/iptables/
- playbooks/roles/logrotate/
- docker/refstack
- docker/python-base/
- job:
name: infra-prod-service-gitea
parent: infra-prod-service-base
description: Run service-gitea.yaml playbook.
vars:
playbook_name: service-gitea.yaml
files:
- inventory/base
- playbooks/service-gitea.yaml
- inventory/service/group_vars/gitea.yaml
- inventory/service/host_vars/gitea[0-9][0-9]
- playbooks/roles/apache-ua-filter/
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/gitea/
- playbooks/roles/iptables/
- playbooks/roles/logrotate/
- docker/gitea/
- docker/gitea-init/
- docker/jinja-init/
- docker/python-base/
- job:
name: infra-prod-service-eavesdrop
parent: infra-prod-service-base
description: Run service-eavesdrop.yaml playbook.
required-projects:
- opendev/system-config
- openstack/project-config
vars:
playbook_name: service-eavesdrop.yaml
files: &infra_prod_eavesdrop_files
- inventory/base
- playbooks/service-eavesdrop.yaml
- playbooks/run-accessbot.yaml
- inventory/service/group_vars/eavesdrop.yaml
- playbooks/roles/install-docker
- playbooks/roles/iptables/
- playbooks/roles/accessbot
- playbooks/roles/limnoria
- playbooks/roles/ptgbot
- playbooks/roles/statusbot
- playbooks/roles/logrotate
- playbooks/roles/matrix-eavesdrop
- playbooks/roles/matrix-gerritbot
- playbooks/zuul/templates/group_vars/eavesdrop.yaml.j2
- docker/accessbot/
- docker/ircbot
- docker/matrix-eavesdrop
- job:
name: infra-prod-run-accessbot
parent: infra-prod-service-base
description: Run run-accessbot.yaml playbook.
required-projects:
- opendev/system-config
- openstack/project-config
vars:
playbook_name: run-accessbot.yaml
files:
- accessbot/channels.yaml
- playbooks/run-accessbot.yaml
- playbooks/roles/accessbot
- docker/accessbot/
Migrate codesearch site to container The hound project has undergone a small re-birth and moved to https://github.com/hound-search/hound which has broken our deployment. We've talked about leaving codesearch up to gitea, but it's not quite there yet. There seems to be no point working on the puppet now. This builds a container than runs houndd. It's an opendev specific container; the config is pulled from project-config directly. There's some custom scripts that drive things. Some points for reviewers: - update-hound-config.sh uses "create-hound-config" (which is in jeepyb for historical reasons) to generate the config file. It grabs the latest projects.yaml from project-config and exits with a return code to indicate if things changed. - when the container starts, it runs update-hound-config.sh to populate the initial config. There is a testing environment flag and small config so it doesn't have to clone the entire opendev for functional testing. - it runs under supervisord so we can restart the daemon when projects are updated. Unlike earlier versions that didn't start listening till indexing was done, this version now puts up a "Hound is not ready yet" message when while it is working; so we can drop all the magic we were doing to probe if hound is listening via netstat and making Apache redirect to a status page. - resync-hound.sh is run from an external cron job daily, and does this update and restart check. Since it only reloads if changes are made, this should be relatively rare anyway. - There is a PR to monitor the config file (https://github.com/hound-search/hound/pull/357) which would mean the restart is unnecessary. This would be good in the near and we could remove the cron job. - playbooks/roles/codesearch is unexciting and deploys the container, certificates and an apache proxy back to localhost:6080 where hound is listening. I've combined removal of the old puppet bits here as the "-codesearch" namespace was already being used. Change-Id: I8c773b5ea6b87e8f7dfd8db2556626f7b2500473
2020-11-17 17:13:46 +11:00
- job:
name: infra-prod-service-codesearch
parent: infra-prod-service-base
description: Run service-codesearch.yaml playbook.
vars:
playbook_name: service-codesearch.yaml
files:
- docker/hound/
- inventory/base
Migrate codesearch site to container The hound project has undergone a small re-birth and moved to https://github.com/hound-search/hound which has broken our deployment. We've talked about leaving codesearch up to gitea, but it's not quite there yet. There seems to be no point working on the puppet now. This builds a container than runs houndd. It's an opendev specific container; the config is pulled from project-config directly. There's some custom scripts that drive things. Some points for reviewers: - update-hound-config.sh uses "create-hound-config" (which is in jeepyb for historical reasons) to generate the config file. It grabs the latest projects.yaml from project-config and exits with a return code to indicate if things changed. - when the container starts, it runs update-hound-config.sh to populate the initial config. There is a testing environment flag and small config so it doesn't have to clone the entire opendev for functional testing. - it runs under supervisord so we can restart the daemon when projects are updated. Unlike earlier versions that didn't start listening till indexing was done, this version now puts up a "Hound is not ready yet" message when while it is working; so we can drop all the magic we were doing to probe if hound is listening via netstat and making Apache redirect to a status page. - resync-hound.sh is run from an external cron job daily, and does this update and restart check. Since it only reloads if changes are made, this should be relatively rare anyway. - There is a PR to monitor the config file (https://github.com/hound-search/hound/pull/357) which would mean the restart is unnecessary. This would be good in the near and we could remove the cron job. - playbooks/roles/codesearch is unexciting and deploys the container, certificates and an apache proxy back to localhost:6080 where hound is listening. I've combined removal of the old puppet bits here as the "-codesearch" namespace was already being used. Change-Id: I8c773b5ea6b87e8f7dfd8db2556626f7b2500473
2020-11-17 17:13:46 +11:00
- playbooks/service-codesearch.yaml
- inventory/service/host_vars/codesearch01.opendev.yaml
- inventory/service/group_vars/codesearch
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/codesearch
- playbooks/roles/logrotate
- playbooks/roles/iptables
- job:
name: infra-prod-service-grafana
parent: infra-prod-service-base
description: Run service-grafana.yaml playbook.
vars:
playbook_name: service-grafana.yaml
files:
- inventory/base
- playbooks/service-grafana.yaml
- inventory/service/host_vars/grafana01.org.yaml
- inventory/service/group_vars/grafana
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/grafana
- playbooks/roles/logrotate
- playbooks/roles/iptables/
- job:
name: infra-prod-service-graphite
parent: infra-prod-service-base
description: Run service-graphite.yaml playbook.
vars:
playbook_name: service-graphite.yaml
files:
- inventory/base
- playbooks/service-graphite.yaml
- inventory/service/host_vars/graphite02.opendev.org.yaml
- inventory/service/group_vars/graphite
- playbooks/roles/install-docker/
- playbooks/roles/pip3/
- playbooks/roles/graphite/
- playbooks/roles/iptables/
- job:
name: infra-prod-service-lists3
parent: infra-prod-service-base
description: Run service-lists3.yaml playbook.
vars:
playbook_name: service-lists3.yaml
files:
- docker/mailman
- inventory/base
- inventory/service/group_vars/mailman3.yaml
- playbooks/roles/iptables/
- playbooks/roles/base/exim
- playbooks/roles/mailman3/
- playbooks/service-lists3.yaml
# Run AFS changes separately so we can make sure to only do one at a time
# (turns out quorum is nice to have)
- job:
name: infra-prod-service-afs
parent: infra-prod-service-base
description: Run AFS playbook.
vars:
playbook_name: service-afs.yaml
infra_prod_ansible_forks: 1
required-projects:
- opendev/system-config
files:
- inventory/base
- playbooks/service-afs.yaml
- inventory/service/group_vars/afs
- inventory/service/group_vars/mirror-update
- playbooks/roles/iptables/
- playbooks/roles/vos-release/
- playbooks/roles/openafs-server/
- modules/
- manifests/
- roles/kerberos-client/
- roles/openafs-client/
- job:
name: infra-prod-service-kerberos
parent: infra-prod-service-base
description: Run Kerberos playbook.
vars:
playbook_name: service-kerberos.yaml
infra_prod_ansible_forks: 1
required-projects:
- opendev/system-config
files:
- inventory/base
- playbooks/service-kerberos.yaml
- inventory/service/group_vars/kerberos-kdc.yaml
- playbooks/roles/kerberos-kdc/
- roles/kerberos-client/
- playbooks/roles/iptables/
- job:
name: infra-prod-remote-puppet-else
parent: infra-prod-service-base
description: Run remote-puppet-else.yaml playbook.
vars:
playbook_name: remote_puppet_else.yaml
infra_prod_ansible_forks: 50
required-projects:
- opendev/ansible-role-puppet
- opendev/system-config
files:
- Gemfile
- Rakefile
- modules.env
- install_modules.sh
- hiera/
- inventory/
- roles/puppet-install/
- playbooks/install_puppet.yaml
- playbooks/update_puppet_version.yaml
- playbooks/remote_puppet_else.yaml
- playbooks/roles/puppet-run/
- playbooks/roles/install-ansible-roles/
- playbooks/roles/disable-puppet-agent/
- playbooks/roles/puppet-setup-ansible/
- playbooks/roles/iptables/
- modules/
- manifests/
- job:
name: infra-prod-run-cloud-launcher
parent: infra-prod-service-base
description: Run cloud launcher playbook
vars:
playbook_name: run_cloud_launcher.yaml
infra_prod_ansible_forks: 1
required-projects:
- opendev/ansible-role-cloud-launcher
- opendev/system-config
files:
- playbooks/run_cloud_launcher.yaml
- inventory/service/group_vars/bastion.yaml
- job:
name: infra-prod-cloud-linaro
parent: infra-prod-service-base
description: Run management tasks against Linaro
vars:
playbook_name: service-cloud-linaro.yaml
required-projects:
- opendev/system-config
files:
- playbooks/service-cloud-linaro.yaml