Jesse Pretorius 0e8423d536 MNAIO: Make post-install storage provisioning idempotent
The swift and cinder hosts do not use containers for services,
so there is no need to do the current process of shrinking the
volumes. Instead, we ensure that the lxc & machines mounts are
removed, with their respective logical volumes.

When setting up the swift logical volumes, we do not need to
create the mount point directories, because the mount task will
do that for us. As such, we remove that task.

Change-Id: Ibbe6d0fede6b6965415e421161354e311708d113
2018-08-29 17:12:43 +01:00

380 lines
12 KiB
YAML

---
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in witing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather facts
hosts: vm_hosts
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Gather variables for each operating system
include_vars: "{{ item }}"
with_first_found:
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}.yml"
tags:
- always
- name: Get info about the virt storage pools
virt_pool:
command: info
register: _virt_pools
- name: Stop running VMs
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: destroy
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Delete VM LV
lvol:
vg: "{{ default_vm_disk_vg }}"
lv: "{{ hostvars[item]['server_hostname'] }}"
state: absent
force: yes
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Delete VM Disk Image
file:
path: "{{ _virt_pools.pools.default.path | default('/data/images') }}/{{ hostvars[item]['server_hostname'] }}.img"
state: absent
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Undefine the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: undefine
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Create VM LV
lvol:
vg: "{{ default_vm_disk_vg }}"
lv: "{{ hostvars[item]['server_hostname'] }}"
size: "{{ default_vm_storage }}"
when:
- hostvars[item]['server_vm'] | default(false) | bool
- default_vm_disk_mode == "lvm"
with_items: "{{ groups['pxe_servers'] }}"
- name: Setup/clean-up file-based disk images
when:
- default_vm_disk_mode == "file"
block:
- name: Find existing base image files
find:
paths: "{{ _virt_pools.pools.default.path | default('/data/images') }}"
patterns: '*-base.img'
register: _base_images
- name: Enable/disable vm_use_snapshot based on whether there are base image files
set_fact:
vm_use_snapshot: "{{ _base_images['matched'] > 0 }}"
- name: Clean up base image files if they are not being used
file:
path: "{{ item.path }}"
state: absent
with_items: "{{ _base_images.files }}"
when:
- not (vm_use_snapshot | bool)
- name: Create VM Disk Image
command: >-
qemu-img create
-f qcow2
{% if vm_use_snapshot | bool %}
-b {{ _virt_pools.pools.default.path | default('/data/images') }}/{{ hostvars[item]['server_hostname'] }}-base.img
{% endif %}
{{ _virt_pools.pools.default.path | default('/data/images') }}/{{ hostvars[item]['server_hostname'] }}.img
{{ default_vm_storage }}m
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
# Note (odyssey4me):
# This will only work on a host which has
# libguestfs >= 1.35.2 and >= 1.34.1
# Ubuntu bionic works, but xenial does not (even with UCA).
# ref: https://bugs.launchpad.net/ubuntu/+source/libguestfs/+bug/1615337.
- name: Inject the host ssh key into the VM disk image
command: >-
virt-sysprep
--enable customize
--ssh-inject root:file:/root/.ssh/id_rsa.pub
--add {{ _virt_pools.pools.default.path | default('/data/images') }}/{{ hostvars[item]['server_hostname'] }}.img
when:
- vm_use_snapshot | bool
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Wait for guest capabilities to appear
command: "virsh capabilities"
register: virsh_caps
until: "'<guest>' in virsh_caps.stdout"
retries: 6
delay: 10
- name: Define the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: define
xml: >-
{%- if vm_use_snapshot | bool %}
{{ lookup('file', _virt_pools.pools.default.path | default('/data/images') ~ '/' ~ hostvars[item]['server_hostname'] ~ '.xml') }}
{%- else %}
{{ lookup('template', 'kvm/kvm-vm.xml.j2') }}
{%- endif %}
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Get the VM xml
virt:
command: get_xml
name: "{{ hostvars[item]['server_hostname'] }}"
register: vm_xml
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Write the VM xml
copy:
content: "{{ item.get_xml }}"
dest: "/etc/libvirt/qemu/{{ item.item }}.xml"
with_items: "{{ vm_xml.results }}"
- name: Start the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: start
state: running
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Add VM to /etc/hosts file
lineinfile:
path: "/etc/hosts"
line: "{{ hostvars[item]['ansible_host'] }} {{ hostvars[item]['server_hostname'] }}"
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Check VM Connectivity
import_playbook: vm-status.yml
- name: Create vm_servers group
hosts: localhost
gather_facts: false
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: VM Servers group
add_host:
name: "{{ item }}"
groups: vm_servers
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: VM Host Setup
hosts: vm_servers
gather_facts: false
any_errors_fatal: true
tasks:
- name: Copy Host Keys
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0600"
with_items:
- src: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
dest: /root/.ssh/id_rsa
- src: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa.pub"
dest: /root/.ssh/id_rsa.pub
# In vm-post-install-script.sh.j2 we chattr +i the interfaces file to prevent
# the preseed system from overwriting the file after we've modified it. The
# task below simply removes the immutable attribute.
- name: Remove immutable attr from /etc/network/interfaces
hosts: vm_servers
gather_facts: true
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- file:
path: /etc/network/interfaces
attr: ""
when:
- ansible_distribution | lower == "ubuntu"
- ansible_distribution_release | lower == "trusty"
- name: Set MaxSessions and MaxStartups to reduce connection failures
hosts: vm_servers
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Gather variables for each operating system
include_vars: "{{ item }}"
with_first_found:
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}.yml"
tags:
- always
- lineinfile:
path: /etc/ssh/sshd_config
line: MaxStartups 100
state: present
regexp: '^MaxStartups.*$'
notify:
- restart sshd
- lineinfile:
path: /etc/ssh/sshd_config
line: MaxSessions 100
state: present
regexp: '^MaxSessions.*$'
notify:
- restart sshd
handlers:
- name: restart sshd
service:
name: "{{ ssh_service_name }}"
state: restarted
- name: Make space for swift and cinder volumes
hosts: cinder_hosts:swift_hosts
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Unmount unnecessary mounts
mount:
name: "{{ item }}"
state: absent
with_items:
- "/var/lib/lxc"
- "/var/lib/machines"
register: _remove_mounts
- name: Remove unnecessary logical volumes
lvol:
vg: vmvg00
lv: "{{ item }}"
force: true
state: absent
with_items:
- "lxc00"
- "machines00"
register: _remove_lvs
- name: Reload systemd to remove generated unit files for mount
systemd:
daemon_reload: yes
when:
- (_remove_mounts is changed) or (_remove_lvs is changed)
- name: Setup cinder host volume
hosts: cinder_hosts
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Create cinder-volumes lv
lvol:
vg: vmvg00
lv: cinder-volumes00
size: "100%FREE"
shrink: false
- name: Create data cinder-volumes group
lvg:
vg: cinder-volumes
pvs: "/dev/vmvg00/cinder-volumes00"
- name: Setup swift host volume
hosts: swift_hosts
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Create swift disk LV's
lvol:
vg: vmvg00
lv: "{{ item }}"
size: 4G
with_items:
- disk1
- disk2
- disk3
- name: Format swift drives
filesystem:
fstype: xfs
dev: "/dev/vmvg00/{{ item }}"
with_items:
- disk1
- disk2
- disk3
- name: Mount swift drives
mount:
name: "/srv/{{ item }}"
src: "/dev/mapper/vmvg00-{{ item }}"
fstype: xfs
state: mounted
opts: defaults,discard
with_items:
- disk1
- disk2
- disk3