Cleanup old scripts

* get-values-overrides.sh and wait-for-pods.sh are not
  needed anymore because they are now a part of
  openstack-helm-plugin
* The functionality of setup-gateway.sh and
  setup-ceph-loopbash-device.sh moved to deploy-env role

Change-Id: Ia4c4142f98bd914e91830109ae5a0adf83f9d6cf
This commit is contained in:
Vladimir Kozhukalov 2025-02-26 14:14:50 -06:00
parent 090878eba4
commit 86f5b0e36f
4 changed files with 0 additions and 234 deletions

View File

@ -1,18 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
export HELM_CHART_ROOT_PATH=${HELM_CHART_ROOT_PATH:-"../openstack-helm"}
${OSH_INFRA_PATH:-"../openstack-helm-infra"}/tools/deployment/common/get-values-overrides.sh $1 $2

View File

@ -1,87 +0,0 @@
#!/bin/bash
set -e
function setup_loopback_devices() {
osd_data_device="$1"
osd_wal_db_device="$2"
namespace=${CEPH_NAMESPACE}
sudo mkdir -p /var/lib/openstack-helm/$namespace
sudo truncate -s 10G /var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img
sudo truncate -s 8G /var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img
sudo -E bash -c "cat <<EOF > /etc/systemd/system/loops-setup.service
[Unit]
Description=Setup loop devices
DefaultDependencies=no
Conflicts=umount.target
Before=local-fs.target
After=systemd-udevd.service
Requires=systemd-udevd.service
[Service]
Type=oneshot
ExecStart=/sbin/losetup $osd_data_device '/var/lib/openstack-helm/$namespace/ceph-osd-data-loopbackfile.img'
ExecStart=/sbin/losetup $osd_wal_db_device '/var/lib/openstack-helm/$namespace/ceph-osd-db-wal-loopbackfile.img'
ExecStop=/sbin/losetup -d $osd_data_device
ExecStop=/sbin/losetup -d $osd_wal_db_device
TimeoutSec=60
RemainAfterExit=yes
[Install]
WantedBy=local-fs.target
Also=systemd-udevd.service
EOF"
sudo systemctl daemon-reload
sudo systemctl start loops-setup
sudo systemctl status loops-setup
sudo systemctl enable loops-setup
# let's verify the devices
sudo losetup -a
if losetup |grep -i $osd_data_device; then
echo "ceph osd data disk got created successfully"
else
echo "could not find ceph osd data disk so exiting"
exit 1
fi
if losetup |grep -i $osd_wal_db_device; then
echo "ceph osd wal/db disk got created successfully"
else
echo "could not find ceph osd wal/db disk so exiting"
exit 1
fi
}
while [[ "$#" > 0 ]]; do case $1 in
-d|--ceph-osd-data) OSD_DATA_DEVICE="$2"; shift;shift;;
-w|--ceph-osd-dbwal) OSD_DB_WAL_DEVICE="$2";shift;shift;;
-v|--verbose) VERBOSE=1;shift;;
*) echo "Unknown parameter passed: $1"; shift;;
esac; done
# verify params
if [ -z "$OSD_DATA_DEVICE" ]; then
OSD_DATA_DEVICE=/dev/loop0
echo "Ceph osd data device is not set so using ${OSD_DATA_DEVICE}"
else
ceph_osd_disk_name=`basename "$OSD_DATA_DEVICE"`
if losetup -a|grep $ceph_osd_disk_name; then
echo "Ceph osd data device is already in use, please double check and correct the device name"
exit 1
fi
fi
if [ -z "$OSD_DB_WAL_DEVICE" ]; then
OSD_DB_WAL_DEVICE=/dev/loop1
echo "Ceph osd db/wal device is not set so using ${OSD_DB_WAL_DEVICE}"
else
ceph_dbwal_disk_name=`basename "$OSD_DB_WAL_DEVICE"`
if losetup -a|grep $ceph_dbwal_disk_name; then
echo "Ceph osd dbwal device is already in use, please double check and correct the device name"
exit 1
fi
fi
: "${CEPH_NAMESPACE:="ceph"}"
# setup loopback devices for ceph osds
setup_loopback_devices $OSD_DATA_DEVICE $OSD_DB_WAL_DEVICE

View File

@ -1,77 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# By default we set enable-chassis-as-gw on all OVN controllers which means
# all nodes are connected to the provider network, but for test environment this is not
# the case.
if [[ "$FEATURES" =~ (,|^)ovn(,|$) ]]; then
HOSTNAME=$(hostname -f)
kubectl -n openstack get po --selector application=ovn,component=ovn-controller -o name | while read po; do
kubectl -n openstack exec $po -c controller -- bash -c "if [[ \$(hostname -f) != ${HOSTNAME} ]]; then ovs-vsctl set open . external-ids:ovn-cms-options=availability-zones=nova; fi"
done
fi
sleep 10
export OS_CLOUD=openstack_helm
openstack network agent list
openstack availability zone list --network
# Assign IP address to br-ex
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
sudo ip addr replace ${OSH_BR_EX_ADDR} dev br-ex
sudo ip link set br-ex up
: ${OPENSTACK_RELEASE:=xena}
: ${CONTAINER_DISTRO_NAME:=ubuntu}
: ${CONTAINER_DISTRO_VERSION:=focal}
: ${DNSMASQ_IMAGE:=docker.io/openstackhelm/neutron:${OPENSTACK_RELEASE}-${CONTAINER_DISTRO_NAME}_${CONTAINER_DISTRO_VERSION}}
# NOTE(portdirect): With Docker >= 1.13.1 the default FORWARD chain policy is
# configured to DROP, for the l3 agent to function as expected and for
# VMs to reach the outside world correctly this needs to be set to ACCEPT.
sudo iptables -P FORWARD ACCEPT
# Setup masquerading on default route dev to public subnet by searching for the
# interface with default routing, if multiple default routes exist then select
# the one with the lowest metric.
DEFAULT_ROUTE_DEV=$(route -n | awk '/^0.0.0.0/ { print $5 " " $NF }' | sort | awk '{ print $NF; exit }')
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_ROUTE_DEV} -s ${OSH_EXT_SUBNET} -j MASQUERADE
# Increase the number of inotify user instances
# otherwise we get the error "failed to create inotify: Too many open files"
# when trying to start the dnsmasq
sudo sysctl fs.inotify.max_user_instances=256
container_id="$(sudo docker ps -f name=br-ex-dns-server -q -a)"
# NOTE(portdirect): Setup DNS for public endpoints
if [ -z $container_id ]; then
sudo docker run -d \
--name br-ex-dns-server \
--net host \
--cap-add=NET_ADMIN \
--volume /etc/kubernetes/kubelet-resolv.conf:/etc/kubernetes/kubelet-resolv.conf:ro \
--entrypoint dnsmasq \
${DNSMASQ_IMAGE} \
--keep-in-foreground \
--no-hosts \
--bind-interfaces \
--resolv-file=/etc/kubernetes/kubelet-resolv.conf \
--address="/svc.cluster.local/${OSH_BR_EX_ADDR%/*}" \
--listen-address="${OSH_BR_EX_ADDR%/*}"
else
echo "external bridge for dns already exists"
fi
sleep 3
sudo docker top br-ex-dns-server

View File

@ -1,52 +0,0 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -e
if [[ "$2" -gt 0 ]];then
seconds=$2
else
seconds=900
fi
end=$(date +%s)
timeout=${2:-$seconds}
end=$((end + timeout))
while true; do
kubectl get pods --namespace=$1 -o json | jq -r \
'.items[].status.phase' | grep Pending > /dev/null && \
PENDING="True" || PENDING="False"
query='.items[]|select(.status.phase=="Running")'
query="$query|.status.containerStatuses[].ready"
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
grep false > /dev/null && READY="False" || READY="True"
kubectl get jobs --namespace=$1 -o json | jq -r \
'.items[] | .spec.completions == .status.succeeded' | \
grep false > /dev/null && JOBR="False" || JOBR="True"
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
break || true
sleep 5
now=$(date +%s)
if [ $now -gt $end ] ; then
echo "Containers failed to start after $timeout seconds"
echo
kubectl get pods --namespace $1 -o wide
echo
if [ $PENDING == "True" ] ; then
echo "Some pods are in pending state:"
kubectl get pods --field-selector=status.phase=Pending -n $1 -o wide
fi
[ $READY == "False" ] && echo "Some pods are not ready"
[ $JOBR == "False" ] && echo "Some jobs have not succeeded"
exit -1
fi
done