From 85627eca0cdfdb157e68560bc090054389cc0fa8 Mon Sep 17 00:00:00 2001
From: Chinasubbareddy Mallavarapu <cr3938@att.com>
Date: Thu, 5 Nov 2020 22:10:14 +0000
Subject: [PATCH] [ceph] OSH:Make sure loopback devices persistent across
 reboots

Change-Id: I85e4c29925ac63ea7656901f5b03b919984cf12f
---
 .../install/developer/cleaning-deployment.rst |  6 +++
 .../common/setup-ceph-loopback-device.sh      | 46 +++++++++++++++++--
 2 files changed, 49 insertions(+), 3 deletions(-)

diff --git a/doc/source/install/developer/cleaning-deployment.rst b/doc/source/install/developer/cleaning-deployment.rst
index 358fd9dc41..00811b7b22 100644
--- a/doc/source/install/developer/cleaning-deployment.rst
+++ b/doc/source/install/developer/cleaning-deployment.rst
@@ -75,6 +75,12 @@ containers before removing the directories used on the host by pods.
        losetup -d "$CEPH_OSD_DB_WAL_DEVICE"
     fi
   fi
+  echo "let's disable the service"
+  sudo systemctl disable loops-setup
+  echo "let's remove the service to setup loopback devices"
+  if [ -f "/etc/systemd/system/loops-setup.service" ]; then
+    rm /etc/systemd/system/loops-setup.service
+  fi
 
   # NOTE(portdirect): Clean up mounts left behind by kubernetes pods
   sudo findmnt --raw | awk '/^\/var\/lib\/kubelet\/pods/ { print $1 }' | xargs -r -L1 -P16 sudo umount -f -l
diff --git a/tools/deployment/common/setup-ceph-loopback-device.sh b/tools/deployment/common/setup-ceph-loopback-device.sh
index 67dc6d7953..aa870d18d0 100755
--- a/tools/deployment/common/setup-ceph-loopback-device.sh
+++ b/tools/deployment/common/setup-ceph-loopback-device.sh
@@ -1,4 +1,7 @@
 #!/bin/bash
+
+set -e
+
 function setup_loopback_devices() {
   osd_data_device="$1"
   osd_wal_db_device="$2"
@@ -6,10 +9,47 @@ function setup_loopback_devices() {
   sudo mkdir -p /var/lib/openstack-helm/$namespace
   sudo truncate -s 10G /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
   sudo truncate -s 8G /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
-  sudo losetup $osd_data_device /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
-  sudo losetup $osd_wal_db_device /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
-  #lets verify the devices
+  sudo -E bash -c "cat <<EOF > /etc/systemd/system/loops-setup.service
+[Unit]
+Description=Setup loop devices
+DefaultDependencies=no
+Conflicts=umount.target
+Before=local-fs.target
+After=systemd-udevd.service
+Required=systemd-udevd.service
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup $osd_data_device '/var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img'
+ExecStart=/sbin/losetup $osd_wal_db_device '/var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img'
+ExecStop=/sbin/losetup -d $osd_data_device
+ExecStop=/sbin/losetup -d $osd_wal_db_device
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=local-fs.target
+Also=systemd-udevd.service
+EOF"
+
+  sudo systemctl daemon-reload
+  sudo systemctl start loops-setup
+  sudo systemctl status loops-setup
+  sudo systemctl enable loops-setup
+  # let's verify the devices
   sudo losetup -a
+  if losetup |grep -i $osd_data_device; then
+    echo "ceph osd data disk got created successfully"
+  else
+    echo "could not find ceph osd data disk so exiting"
+    exit 1
+  fi
+  if losetup |grep -i $osd_wal_db_device; then
+    echo "ceph osd wal/db disk got created successfully"
+  else
+    echo "could not find ceph osd wal/db  disk so exiting"
+    exit 1
+  fi
 }
 
 while [[ "$#" > 0 ]]; do case $1 in