MNAIO: Enable using a data disk for file-backed VM's

In order to make use of a data disk, we enable the 'file'
implementation of default_vm_disk_mode to use a data disk
much like the 'lvm' implementation.

To simplify changing from the default_vm_disk_mode of lvm
to file and back again, the setup-host playbook will remove
any previous implementation and replace it. This is useful
when doing testing for these different modes because it
does not require cleaning up by hand.

This patch also fixes the implementation of the virt
storage pool. Currently the tasks only execute if
'virt_data_volume.pools is not defined', but it is always
defined so the tasks never execute. We now ensure that
for both backing stores the 'default' storage pool is
defined, started and set to auto start (as three tasks
because the virt_pool module sucks really bad and can only
do one thing at a time).

The pool implementation for the 'file' backed VM's uses
the largest data disk it can find and creates the /data
mount for it. To cater for a different configuration, we
ensure that all references to the disk files use the path
that is configured in the pool ,rather than assuming the
path.

Change-Id: If7e7e37df4d7c0ebe9d003e5b5b97811d41eff22
This commit is contained in:
Jesse Pretorius 2018-08-15 14:18:28 +01:00
parent 8b508a9d90
commit 484059205a
5 changed files with 150 additions and 64 deletions

@ -263,9 +263,9 @@ backing store) for the VM's, then set the following option before executing
If you wish to save the current file-based images in order to implement a If you wish to save the current file-based images in order to implement a
thin-provisioned set of VM's which can be saved and re-used, then use the thin-provisioned set of VM's which can be saved and re-used, then use the
``save-vms.yml`` playbook. This will stop the VM's and save the files to ``save-vms.yml`` playbook. This will stop the VM's and rename the files to
``/var/lib/libvirt/images/*-base.img``. Re-executing the ``deploy-vms.yml`` ``*-base.img``. Re-executing the ``deploy-vms.yml`` playbook afterwards will
playbook afterwards will rebuild the VMs from those images. rebuild the VMs from those images.
.. code-block:: bash .. code-block:: bash

@ -31,6 +31,11 @@
tags: tags:
- always - always
- name: Get info about the virt storage pools
virt_pool:
command: info
register: _virt_pools
- name: Stop running VMs - name: Stop running VMs
virt: virt:
name: "{{ hostvars[item]['server_hostname'] }}" name: "{{ hostvars[item]['server_hostname'] }}"
@ -53,7 +58,7 @@
- name: Delete VM Disk Image - name: Delete VM Disk Image
file: file:
path: "/var/lib/libvirt/images/{{ hostvars[item]['server_hostname'] }}.img" path: "{{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}.img"
state: absent state: absent
when: when:
- hostvars[item]['server_vm'] | default(false) | bool - hostvars[item]['server_vm'] | default(false) | bool
@ -84,7 +89,7 @@
block: block:
- name: Find existing base image files - name: Find existing base image files
find: find:
paths: /var/lib/libvirt/images paths: "{{ _virt_pools.pools.default.path | default('/data') }}"
patterns: '*-base.img' patterns: '*-base.img'
register: _base_images register: _base_images
@ -99,9 +104,9 @@
qemu-img create qemu-img create
-f qcow2 -f qcow2
{% if vm_use_snapshot | bool %} {% if vm_use_snapshot | bool %}
-b /var/lib/libvirt/images/{{ hostvars[item]['server_hostname'] }}-base.img -b {{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}-base.img
{% endif %} {% endif %}
/var/lib/libvirt/images/{{ hostvars[item]['server_hostname'] }}.img {{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}.img
{{ default_vm_storage }}m {{ default_vm_storage }}m
when: when:
- hostvars[item]['server_vm'] | default(false) | bool - hostvars[item]['server_vm'] | default(false) | bool

@ -41,7 +41,7 @@
{% elif default_vm_disk_mode == "file" %} {% elif default_vm_disk_mode == "file" %}
<disk type='file' device='disk'> <disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none' io='native'/> <driver name='qemu' type='qcow2' cache='none' io='native'/>
<source file='/var/lib/libvirt/images/{{ hostvars[item]["server_hostname"] }}.img'/> <source file='{{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]["server_hostname"] }}.img'/>
{% endif %} {% endif %}
<target dev='vda' bus='virtio'/> <target dev='vda' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>

@ -31,6 +31,11 @@
tags: tags:
- always - always
- name: Get info about the virt storage pools
virt_pool:
command: info
register: _virt_pools
- name: Stop running VMs - name: Stop running VMs
command: "virsh destroy {{ hostvars[item]['server_hostname'] }}" command: "virsh destroy {{ hostvars[item]['server_hostname'] }}"
failed_when: false failed_when: false
@ -41,8 +46,8 @@
- name: Save VM Disk Image - name: Save VM Disk Image
command: >- command: >-
mv mv
/var/lib/libvirt/images/{{ hostvars[item]['server_hostname'] }}.img {{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}.img
/var/lib/libvirt/images/{{ hostvars[item]['server_hostname'] }}-base.img {{ _virt_pools.pools.default.path | default('/data') }}/{{ hostvars[item]['server_hostname'] }}-base.img
when: when:
- hostvars[item]['server_vm'] | default(false) | bool - hostvars[item]['server_vm'] | default(false) | bool
with_items: "{{ groups['pxe_servers'] }}" with_items: "{{ groups['pxe_servers'] }}"

@ -242,77 +242,153 @@
with_dict: "{{ mnaio_host_networks }}" with_dict: "{{ mnaio_host_networks }}"
when: "item.value.iface not in vm_networks.list_nets" when: "item.value.iface not in vm_networks.list_nets"
- name: Locate data volume - name: Locate the largest writable data disk if mnaio_data_disk is not set
command: "vgdisplay vg01"
failed_when: false
when:
- default_vm_disk_mode == "lvm"
register: data_volume
- name: Setup the data volume (LVM)
when:
- default_vm_disk_mode == "lvm"
- data_volume.rc != 0
block:
- name: Locate data disk
shell: > shell: >
lsblk -brndo NAME,TYPE,FSTYPE,RO,SIZE | awk '/d[b-z]+ disk +0/{ if ($4>m){m=$4; d=$1}}; END{print d}' lsblk -brndo NAME,TYPE,FSTYPE,RO,SIZE | awk '/d[b-z]+ disk +0/{ if ($4>m){m=$4; d=$1}}; END{print d}'
register: lsblk register: lsblk
changed_when: false
when: when:
- mnaio_data_disk is undefined - mnaio_data_disk is undefined
- name: Create data disk label - name: Get info about existing virt storage pools
command: "parted --script /dev/{{ mnaio_data_disk | default(lsblk.stdout) }} mklabel gpt" virt_pool:
command: info
register: _virt_pools
- name: Create data disk partition - name: If an existing virt pool does not match default_vm_disk_mode, remove it
command: "parted --align optimal --script /dev/{{ mnaio_data_disk | default(lsblk.stdout) }} mkpart data1 ext4 0% 100%" when:
- _virt_pools.pools.default is defined
- (default_vm_disk_mode == "file" and _virt_pools.pools.default.format is defined) or
(default_vm_disk_mode == "lvm" and _virt_pools.pools.default.format is not defined)
block:
- name: Dismount the mount point if default_vm_disk_mode is 'lvm'
mount:
path: /data
state: unmounted
when:
- default_vm_disk_mode == "lvm"
- name: Create data volume group - name: Stop the pool
virt_pool:
command: destroy
name: default
- name: Delete the pool, destroying its contents
virt_pool:
command: delete
name: default
- name: Undefine the pool
virt_pool:
command: undefine
name: default
- name: Remove the mount point if default_vm_disk_mode is 'lvm'
mount:
path: /data
state: absent
when:
- default_vm_disk_mode == "lvm"
- name: Reload systemd to remove generated unit files for mount
systemd:
daemon_reload: yes
when:
- default_vm_disk_mode == "lvm"
- name: Remove the volume group if default_vm_disk_mode is 'file'
lvg:
vg: vg01
state: absent
register: _remove_vg
when:
- default_vm_disk_mode == "file"
- name: Remove the existing disk partition
parted:
device: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}"
number: 1
state: absent
- name: Setup the data disk partition
parted:
device: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}"
label: gpt
number: 1
name: data1
state: present
register: _add_partition
- name: Prepare the data disk for 'lvm' default_vm_disk_mode
when:
- default_vm_disk_mode == "lvm"
block:
- name: Create the volume group
lvg: lvg:
vg: vg01 vg: vg01
pvs: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}1" pvs: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}1"
- name: Locate virt data volume - name: Define the default virt storage pool
virt_pool: virt_pool:
name: "vg01" name: default
command: info state: present
failed_when: false xml: |
<pool type='logical'>
<name>default</name>
<source>
<name>vg01</name>
<format type='lvm2'/>
</source>
<target>
<path>/dev/vg01</path>
</target>
</pool>
- name: Prepare the data disk for 'file' default_vm_disk_mode
when: when:
- default_vm_disk_mode == "lvm" - default_vm_disk_mode == "file"
register: virt_data_volume
- name: Create /etc/libvirt/storage directory
file:
path: "/etc/libvirt/storage/"
state: "directory"
- name: Create virt data volume
block: block:
- name: Create virt pool - name: Prepare the data disk file system
virt_pool: filesystem:
command: create fstype: ext4
name: vg01 dev: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}1"
force: yes
- name: Get virt pool xml
virt_pool:
command: get_xml
name: vg01
register: virt_pool_xml
- name: Write data volume xml
copy:
content: "{{ virt_pool_xml.get_xml }}"
dest: "/etc/libvirt/storage/vg01.xml"
- name: Define virt data volume
virt_pool:
command: define
name: vg01
xml: "/etc/libvirt/storage/vg01.xml"
autostart: true
when: when:
- default_vm_disk_mode == "lvm" - _add_partition is changed
- virt_data_volume.pools is not defined
- name: Mount the data disk
mount:
src: "/dev/{{ mnaio_data_disk | default(lsblk.stdout) }}1"
path: /data
state: mounted
fstype: ext4
- name: Define the default virt storage pool
virt_pool:
name: default
state: present
xml: |
<pool type='dir'>
<name>default</name>
<target>
<path>/data</path>
<permissions>
<mode>0755</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>
- name: Set default virt storage pool to active
virt_pool:
name: default
state: active
- name: Set default virt storage pool to autostart
virt_pool:
name: default
autostart: yes
- name: Load virtio kernel modules - name: Load virtio kernel modules
shell: | shell: |