Jesse Pretorius 484059205a MNAIO: Enable using a data disk for file-backed VM's
In order to make use of a data disk, we enable the 'file'
implementation of default_vm_disk_mode to use a data disk
much like the 'lvm' implementation.

To simplify changing from the default_vm_disk_mode of lvm
to file and back again, the setup-host playbook will remove
any previous implementation and replace it. This is useful
when doing testing for these different modes because it
does not require cleaning up by hand.

This patch also fixes the implementation of the virt
storage pool. Currently the tasks only execute if
'virt_data_volume.pools is not defined', but it is always
defined so the tasks never execute. We now ensure that
for both backing stores the 'default' storage pool is
defined, started and set to auto start (as three tasks
because the virt_pool module sucks really bad and can only
do one thing at a time).

The pool implementation for the 'file' backed VM's uses
the largest data disk it can find and creates the /data
mount for it. To cater for a different configuration, we
ensure that all references to the disk files use the path
that is configured in the pool ,rather than assuming the
path.

Change-Id: If7e7e37df4d7c0ebe9d003e5b5b97811d41eff22
2018-08-15 16:58:50 +01:00

375 lines
11 KiB
YAML

---
# Copyright 2018, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in witing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Gather facts
hosts: vm_hosts
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Gather variables for each operating system
include_vars: "{{ item }}"
with_first_found:
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}.yml"
tags:
- always
- name: Get info about the virt storage pools
virt_pool:
command: info
register: _virt_pools
- name: Stop running VMs
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: destroy
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Delete VM LV
lvol:
vg: "{{ default_vm_disk_vg }}"
lv: "{{ hostvars[item]['server_hostname'] }}"
state: absent
force: yes
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Delete VM Disk Image
file:
path: "{{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}.img"
state: absent
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Undefine the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: undefine
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Create VM LV
lvol:
vg: "{{ default_vm_disk_vg }}"
lv: "{{ hostvars[item]['server_hostname'] }}"
size: "{{ default_vm_storage }}"
when:
- hostvars[item]['server_vm'] | default(false) | bool
- default_vm_disk_mode == "lvm"
with_items: "{{ groups['pxe_servers'] }}"
- name: Setup file-based disk images
when:
- default_vm_disk_mode == "file"
block:
- name: Find existing base image files
find:
paths: "{{ _virt_pools.pools.default.path | default('/data') }}"
patterns: '*-base.img'
register: _base_images
- name: Set vm_use_snapshot if it's not defined
set_fact:
vm_use_snapshot: "{{ _base_images['matched'] > 0 }}"
when:
- vm_use_snapshot is not defined
- name: Create VM Disk Image
command: >-
qemu-img create
-f qcow2
{% if vm_use_snapshot | bool %}
-b {{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}-base.img
{% endif %}
{{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}.img
{{ default_vm_storage }}m
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Wait for guest capabilities to appear
command: "virsh capabilities"
register: virsh_caps
until: "'<guest>' in virsh_caps.stdout"
retries: 6
delay: 10
- name: Define the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: define
xml: "{{ lookup('template', 'kvm/kvm-vm.xml.j2') }}"
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Get the VM xml
virt:
command: get_xml
name: "{{ hostvars[item]['server_hostname'] }}"
register: vm_xml
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Write the VM xml
copy:
content: "{{ item.get_xml }}"
dest: "/etc/libvirt/qemu/{{ item.item }}.xml"
with_items: "{{ vm_xml.results }}"
- name: Start the VM
virt:
name: "{{ hostvars[item]['server_hostname'] }}"
command: start
state: running
failed_when: false
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Add VM to /etc/hosts file
lineinfile:
path: "/etc/hosts"
line: "{{ hostvars[item]['ansible_host'] }} {{ hostvars[item]['server_hostname'] }}"
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: Check VM Connectivity
import_playbook: vm-status.yml
- name: Create vm_servers group
hosts: localhost
gather_facts: false
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: VM Servers group
add_host:
name: "{{ item }}"
groups: vm_servers
when:
- hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}"
- name: VM Host Setup
hosts: vm_servers
gather_facts: false
any_errors_fatal: true
tasks:
- name: Copy Host Keys
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0600"
with_items:
- src: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
dest: /root/.ssh/id_rsa
- src: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa.pub"
dest: /root/.ssh/id_rsa.pub
# In vm-post-install-script.sh.j2 we chattr +i the interfaces file to prevent
# the preseed system from overwriting the file after we've modified it. The
# task below simply removes the immutable attribute.
- name: Remove immutable attr from /etc/network/interfaces
hosts: vm_servers
gather_facts: true
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- file:
path: /etc/network/interfaces
attr: ""
when:
- ansible_distribution | lower == "ubuntu"
- ansible_distribution_release | lower == "trusty"
- name: Set MaxSessions and MaxStartups to reduce connection failures
hosts: vm_servers
gather_facts: "{{ gather_facts | default(true) }}"
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Gather variables for each operating system
include_vars: "{{ item }}"
with_first_found:
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_distribution | lower }}.yml"
- "{{ playbook_dir }}/vars/{{ ansible_os_family | lower }}.yml"
tags:
- always
- lineinfile:
path: /etc/ssh/sshd_config
line: MaxStartups 100
state: present
regexp: '^MaxStartups.*$'
notify:
- restart sshd
- lineinfile:
path: /etc/ssh/sshd_config
line: MaxSessions 100
state: present
regexp: '^MaxSessions.*$'
notify:
- restart sshd
handlers:
- name: restart sshd
service:
name: "{{ ssh_service_name }}"
state: restarted
- name: Make space for swift and cinder volumes
hosts: cinder_hosts:swift_hosts
gather_facts: false
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Unmount lxc and machines mounts
mount:
name: "{{ item }}"
state: unmounted
with_items:
- "/var/lib/lxc"
- "/var/lib/machines"
- name: Remove lxc mount directory
mount:
name: "/var/lib/lxc"
state: absent
- name: Shrink machines00 logical volume
lvol:
vg: vmvg00
lv: machines00
size: 8192
shrink: yes
force: yes
when:
- default_container_tech | default('lxc') == 'nspawn'
- name: Remove lxc00 logical volume
lvol:
vg: vmvg00
lv: lxc00
force: true
state: absent
- name: Re-mount machines00 logical volume
mount:
path: /var/lib/machines
src: /dev/mapper/vmvg00-machines00
state: mounted
fstype: btrfs
- name: Setup cinder host volume
hosts: cinder_hosts
gather_facts: false
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Create cinder-volumes lv
lvol:
vg: vmvg00
lv: cinder-volumes00
size: "100%FREE"
shrink: false
- name: Create data cinder-volumes group
lvg:
vg: cinder-volumes
pvs: "/dev/vmvg00/cinder-volumes00"
- name: Setup swift host volume
hosts: swift_hosts
gather_facts: false
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- deploy-vms
tasks:
- name: Remove deleteme lv
lvol:
vg: vmvg00
lv: "{{ item }}"
size: 4G
with_items:
- disk1
- disk2
- disk3
- name: Format swift drives
filesystem:
fstype: xfs
dev: "/dev/vmvg00/{{ item }}"
with_items:
- disk1
- disk2
- disk3
- name: Create drive directories
file:
path: "/srv/{{ item }}"
state: directory
owner: "root"
group: "root"
mode: "0755"
with_items:
- disk1
- disk2
- disk3
- name: Mount swift drives
mount:
name: "/srv/{{ item }}"
src: "/dev/mapper/vmvg00-{{ item }}"
fstype: xfs
state: mounted
with_items:
- disk1
- disk2
- disk3