diff --git a/ceph/templates/bin/_common_functions.sh.tpl b/ceph/templates/bin/_common_functions.sh.tpl new file mode 100644 index 0000000000..9e852384a2 --- /dev/null +++ b/ceph/templates/bin/_common_functions.sh.tpl @@ -0,0 +1,223 @@ +#!/bin/bash +set -ex + +# log arguments with timestamp +function log { + if [ -z "$*" ]; then + return 1 + fi + + TIMESTAMP=$(date '+%F %T') + echo "${TIMESTAMP} $0: $*" + return 0 +} + +# ceph config file exists or die +function check_config { + if [[ ! -e /etc/ceph/${CLUSTER}.conf ]]; then + log "ERROR- /etc/ceph/${CLUSTER}.conf must exist; get it from your existing mon" + exit 1 + fi +} + +# ceph admin key exists or die +function check_admin_key { + if [[ ! -e $ADMIN_KEYRING ]]; then + log "ERROR- $ADMIN_KEYRING must exist; get it from your existing mon" + exit 1 + fi +} + +# Given two strings, return the length of the shared prefix +function prefix_length { + local maxlen=${#1} + for ((i=maxlen-1;i>=0;i--)); do + if [[ "${1:0:i}" == "${2:0:i}" ]]; then + echo $i + return + fi + done +} + +# Test if a command line tool is available +function is_available { + command -v $@ &>/dev/null +} + +# create the mandatory directories +function create_mandatory_directories { + # Let's create the bootstrap directories + for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING; do + mkdir -p $(dirname $keyring) + done + + # Let's create the ceph directories + for directory in mon osd mds radosgw tmp mgr; do + mkdir -p /var/lib/ceph/$directory + done + + # Make the monitor directory + mkdir -p "$MON_DATA_DIR" + + # Create socket directory + mkdir -p /var/run/ceph + + # Creating rados directories + mkdir -p /var/lib/ceph/radosgw/${RGW_NAME} + + # Create the MDS directory + mkdir -p /var/lib/ceph/mds/${CLUSTER}-${MDS_NAME} + + # Create the MGR directory + mkdir -p /var/lib/ceph/mgr/${CLUSTER}-$MGR_NAME + + # Adjust the owner of all those directories + chown -R ceph. /var/run/ceph/ /var/lib/ceph/* +} + + +# Calculate proper device names, given a device and partition number +function dev_part { + local osd_device=${1} + local osd_partition=${2} + + if [[ -L ${osd_device} ]]; then + # This device is a symlink. Work out it's actual device + local actual_device=$(readlink -f ${osd_device}) + local bn=$(basename ${osd_device}) + if [[ "${actual_device:0-1:1}" == [0-9] ]]; then + local desired_partition="${actual_device}p${osd_partition}" + else + local desired_partition="${actual_device}${osd_partition}" + fi + # Now search for a symlink in the directory of $osd_device + # that has the correct desired partition, and the longest + # shared prefix with the original symlink + local symdir=$(dirname ${osd_device}) + local link="" + local pfxlen=0 + for option in $(ls $symdir); do + if [[ $(readlink -f $symdir/$option) == $desired_partition ]]; then + local optprefixlen=$(prefix_length $option $bn) + if [[ $optprefixlen > $pfxlen ]]; then + link=$symdir/$option + pfxlen=$optprefixlen + fi + fi + done + if [[ $pfxlen -eq 0 ]]; then + >&2 log "Could not locate appropriate symlink for partition ${osd_partition} of ${osd_device}" + exit 1 + fi + echo "$link" + elif [[ "${osd_device:0-1:1}" == [0-9] ]]; then + echo "${osd_device}p${osd_partition}" + else + echo "${osd_device}${osd_partition}" + fi +} + +function osd_trying_to_determine_scenario { + if [ -z "${OSD_DEVICE}" ]; then + log "Bootstrapped OSD(s) found; using OSD directory" + source osd_directory.sh + osd_directory + elif $(parted --script ${OSD_DEVICE} print | egrep -sq '^ 1.*ceph data'); then + log "Bootstrapped OSD found; activating ${OSD_DEVICE}" + source osd_disk_activate.sh + osd_activate + else + log "Device detected, assuming ceph-disk scenario is desired" + log "Preparing and activating ${OSD_DEVICE}" + osd_disk + fi +} + +function get_osd_dev { + for i in ${OSD_DISKS} + do + osd_id=$(echo ${i}|sed 's/\(.*\):\(.*\)/\1/') + osd_dev="/dev/$(echo ${i}|sed 's/\(.*\):\(.*\)/\2/')" + if [ ${osd_id} = ${1} ]; then + echo -n "${osd_dev}" + fi + done +} + +function unsupported_scenario { + echo "ERROR: '${CEPH_DAEMON}' scenario or key/value store '${KV_TYPE}' is not supported by this distribution." + echo "ERROR: for the list of supported scenarios, please refer to your vendor." + exit 1 +} + +function is_integer { + # This function is about saying if the passed argument is an integer + # Supports also negative integers + # We use $@ here to consider everything given as parameter and not only the + # first one : that's mainly for splited strings like "10 10" + [[ $@ =~ ^-?[0-9]+$ ]] +} + +# Transform any set of strings to lowercase +function to_lowercase { + echo "${@,,}" +} + +# Transform any set of strings to uppercase +function to_uppercase { + echo "${@^^}" +} + +# Replace any variable separated with comma with space +# e.g: DEBUG=foo,bar will become: +# echo ${DEBUG//,/ } +# foo bar +function comma_to_space { + echo "${@//,/ }" +} + +# Get based distro by discovering the package manager +function get_package_manager { + if is_available rpm; then + OS_VENDOR=redhat + elif is_available dpkg; then + OS_VENDOR=ubuntu + fi +} + +# Determine if current distribution is an Ubuntu-based distribution +function is_ubuntu { + get_package_manager + [[ "$OS_VENDOR" == "ubuntu" ]] +} + +# Determine if current distribution is a RedHat-based distribution +function is_redhat { + get_package_manager + [[ "$OS_VENDOR" == "redhat" ]] +} + +# Wait for a file to exist, regardless of the type +function wait_for_file { + timeout 10 bash -c "while [ ! -e ${1} ]; do echo 'Waiting for ${1} to show up' && sleep 1 ; done" +} + +function valid_scenarios { + log "Valid values for CEPH_DAEMON are $(to_uppercase $ALL_SCENARIOS)." + log "Valid values for the daemon parameter are $ALL_SCENARIOS" +} + +function invalid_ceph_daemon { + if [ -z "$CEPH_DAEMON" ]; then + log "ERROR- One of CEPH_DAEMON or a daemon parameter must be defined as the name of the daemon you want to deploy." + valid_scenarios + exit 1 + else + log "ERROR- unrecognized scenario." + valid_scenarios + fi +} + +function get_osd_path { + echo "$OSD_PATH_BASE-$1/" +} diff --git a/ceph/templates/bin/_entrypoint.sh.tpl b/ceph/templates/bin/_entrypoint.sh.tpl new file mode 100644 index 0000000000..8c5cac4289 --- /dev/null +++ b/ceph/templates/bin/_entrypoint.sh.tpl @@ -0,0 +1,136 @@ +#!/bin/bash +set -ex +export LC_ALL=C + +source variables_entrypoint.sh +source common_functions.sh +source debug.sh + +########################### +# CONFIGURATION GENERATOR # +########################### + +# Load in the bootstrapping routines +# based on the data store +case "$KV_TYPE" in + etcd) + # TAG: kv_type_etcd + source /config.kv.etcd.sh + ;; + k8s|kubernetes) + # TAG: kv_type_k8s + source /config.k8s.sh + ;; + *) + source /config.static.sh + ;; +esac + + +############### +# CEPH_DAEMON # +############### + +# Normalize DAEMON to lowercase +CEPH_DAEMON=$(to_lowercase ${CEPH_DAEMON}) + +create_mandatory_directories + +# If we are given a valid first argument, set the +# CEPH_DAEMON variable from it +case "$CEPH_DAEMON" in + populate_kvstore) + # TAG: populate_kvstore + source populate_kv.sh + populate_kv + ;; + mon) + # TAG: mon + source start_mon.sh + start_mon + ;; + osd) + # TAG: osd + source start_osd.sh + start_osd + ;; + osd_directory) + # TAG: osd_directory + source start_osd.sh + OSD_TYPE="directory" + start_osd + ;; + osd_directory_single) + # TAG: osd_directory_single + source start_osd.sh + OSD_TYPE="directory_single" + start_osd + ;; + osd_ceph_disk) + # TAG: osd_ceph_disk + source start_osd.sh + OSD_TYPE="disk" + start_osd + ;; + osd_ceph_disk_prepare) + # TAG: osd_ceph_disk_prepare + source start_osd.sh + OSD_TYPE="prepare" + start_osd + ;; + osd_ceph_disk_activate) + # TAG: osd_ceph_disk_activate + source start_osd.sh + OSD_TYPE="activate" + start_osd + ;; + osd_ceph_activate_journal) + # TAG: osd_ceph_activate_journal + source start_osd.sh + OSD_TYPE="activate_journal" + start_osd + ;; + mds) + # TAG: mds + source start_mds.sh + start_mds + ;; + rgw) + # TAG: rgw + source start_rgw.sh + start_rgw + ;; + rgw_user) + # TAG: rgw_user + source start_rgw.sh + create_rgw_user + ;; + restapi) + # TAG: restapi + source start_restapi.sh + start_restapi + ;; + nfs) + # TAG: nfs + echo "Temporarily disabled due to broken package dependencies with nfs-ganesha" + echo "For more info see: https://github.com/ceph/ceph-docker/pull/564" + exit 1 + source start_nfs.sh + start_nfs + ;; + zap_device) + # TAG: zap_device + source zap_device.sh + zap_device + ;; + mon_health) + # TAG: mon_health + source watch_mon_health.sh + watch_mon_health + ;; + *) + invalid_ceph_daemon + ;; +esac + +exit 0 diff --git a/ceph/templates/bin/_osd_activate_journal.sh.tpl b/ceph/templates/bin/_osd_activate_journal.sh.tpl new file mode 100644 index 0000000000..4c2cca33f7 --- /dev/null +++ b/ceph/templates/bin/_osd_activate_journal.sh.tpl @@ -0,0 +1,20 @@ +#!/bin/bash +set -ex + +function osd_activate_journal { + if [[ -z "${OSD_JOURNAL}" ]];then + log "ERROR- You must provide a device to build your OSD journal ie: /dev/sdb2" + exit 1 + fi + + # watch the udev event queue, and exit if all current events are handled + udevadm settle --timeout=600 + + # wait till partition exists + wait_for ${OSD_JOURNAL} + + chown ceph. ${OSD_JOURNAL} + ceph-disk -v --setuser ceph --setgroup disk activate-journal ${OSD_JOURNAL} + + start_osd +} diff --git a/ceph/templates/bin/_osd_common.sh.tpl b/ceph/templates/bin/_osd_common.sh.tpl new file mode 100644 index 0000000000..7d5682c225 --- /dev/null +++ b/ceph/templates/bin/_osd_common.sh.tpl @@ -0,0 +1,31 @@ +# Start the latest OSD +# In case of forego, we don't run ceph-osd, start_forego will do it later +function start_osd() { + mode=$1 #forego or empty + + OSD_ID=$(cat /var/lib/ceph/osd/$(ls -ltr /var/lib/ceph/osd/ | tail -n1 | awk -v pattern="$CLUSTER" '$0 ~ pattern {print $9}')/whoami) + OSD_PATH=$(get_osd_path $OSD_ID) + OSD_KEYRING="$OSD_PATH/keyring" + OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }') + ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=$OSD_KEYRING osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION} + + # ceph-disk activiate has exec'ed /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} + # wait till docker stop or ceph-osd is killed + OSD_PID=$(ps -ef |grep ceph-osd |grep osd.${OSD_ID} |awk '{print $2}') + if [ -n "${OSD_PID}" ]; then + log "OSD (PID ${OSD_PID}) is running, waiting till it exits" + while [ -e /proc/${OSD_PID} ]; do sleep 1;done + fi + + if [[ "$mode" == "forego" ]]; then + echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk" | tee -a /etc/forego/${CLUSTER}/Procfile + else + log "SUCCESS" + exec /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk + fi +} + +# Starting forego +function start_forego() { + exec /usr/local/bin/forego start -f /etc/forego/${CLUSTER}/Procfile +} diff --git a/ceph/templates/bin/_osd_directory.sh.tpl b/ceph/templates/bin/_osd_directory.sh.tpl new file mode 100644 index 0000000000..23159003e7 --- /dev/null +++ b/ceph/templates/bin/_osd_directory.sh.tpl @@ -0,0 +1,77 @@ +#!/bin/bash +set -ex + +function osd_directory { + if [[ ! -d /var/lib/ceph/osd ]]; then + log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?" + log "ERROR- use -v :/var/lib/ceph/osd" + exit 1 + fi + + if [ -z "${HOSTNAME}" ]; then + log "HOSTNAME not set; This will prevent to add an OSD into the CRUSH map" + exit 1 + fi + + # check if anything is present, if not, create an osd and its directory + if [[ -n "$(find /var/lib/ceph/osd -prune -empty)" ]]; then + log "Creating osd with ceph --cluster ${CLUSTER} osd create" + OSD_ID=$(ceph --cluster ${CLUSTER} osd create) + if is_integer "$OSD_ID"; then + log "OSD created with ID: ${OSD_ID}" + else + log "OSD creation failed: ${OSD_ID}" + exit 1 + fi + + OSD_PATH=$(get_osd_path $OSD_ID) + + # create the folder and own it + mkdir -p $OSD_PATH + chown ceph. $OSD_PATH + log "created folder $OSD_PATH" + fi + + # create the directory and an empty Procfile + mkdir -p /etc/forego/${CLUSTER} + echo "" > /etc/forego/${CLUSTER}/Procfile + + for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do + OSD_PATH=$(get_osd_path $OSD_ID) + OSD_KEYRING="$OSD_PATH/keyring" + + if [ -n "${JOURNAL_DIR}" ]; then + OSD_J="${JOURNAL_DIR}/journal.${OSD_ID}" + chown -R ceph. ${JOURNAL_DIR} + else + if [ -n "${JOURNAL}" ]; then + OSD_J=${JOURNAL} + chown -R ceph. $(dirname ${JOURNAL_DIR}) + else + OSD_J=${OSD_PATH}/journal + fi + fi + # check to see if our osd has been initialized + if [ ! -e ${OSD_PATH}/keyring ]; then + chown ceph. $OSD_PATH + # create osd key and file structure + ceph-osd ${CLI_OPTS} -i $OSD_ID --mkfs --mkkey --mkjournal --osd-journal ${OSD_J} --setuser ceph --setgroup ceph + if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then + log "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING '" + exit 1 + fi + timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1 + # add the osd key + ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING auth add osd.${OSD_ID} -i ${OSD_KEYRING} osd 'allow *' mon 'allow profile osd' || log $1 + log "done adding key" + chown ceph. ${OSD_KEYRING} + chmod 0600 ${OSD_KEYRING} + # add the osd to the crush map + OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }') + ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=${OSD_KEYRING} osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION} + fi + echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --osd-journal ${OSD_J} -k $OSD_KEYRING" | tee -a /etc/forego/${CLUSTER}/Procfile + done + log "SUCCESS" + start_forego +} diff --git a/ceph/templates/bin/_osd_directory_single.sh.tpl b/ceph/templates/bin/_osd_directory_single.sh.tpl new file mode 100644 index 0000000000..8fadc9dd61 --- /dev/null +++ b/ceph/templates/bin/_osd_directory_single.sh.tpl @@ -0,0 +1,33 @@ +#!/bin/bash +set -ex + +function osd_directory_single { + if [[ ! -d /var/lib/ceph/osd ]]; then + log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?" + log "ERROR- use -v :/var/lib/ceph/osd" + exit 1 + fi + + # pick one osd and make sure no lock is held + for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do + OSD_PATH=$(get_osd_path $OSD_ID) + OSD_KEYRING="$OSD_PATH/keyring" + + if [[ -n "$(find $OSD_PATH -prune -empty)" ]]; then + log "Looks like OSD: ${OSD_ID} has not been bootstrapped yet, doing nothing, moving on to the next discoverable OSD" + else + # check if the osd has a lock, if yes moving on, if not we run it + # many thanks to Julien Danjou for the python piece + if python -c "import sys, fcntl, struct; l = fcntl.fcntl(open('${OSD_PATH}/fsid', 'a'), fcntl.F_GETLK, struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)); l_type, l_whence, l_start, l_len, l_pid, l_sysid = struct.unpack('hhllhh', l); sys.exit(0 if l_type == fcntl.F_UNLCK else 1)"; then + log "Looks like OSD: ${OSD_ID} is not started, starting it..." + log "SUCCESS" + exec ceph-osd $DAEMON_OPTS -i ${OSD_ID} -k $OSD_KEYRING + break + fi + fi + done + log "Looks like all the OSDs are already running, doing nothing" + log "Exiting the container" + log "SUCCESS" + exit 0 +} diff --git a/ceph/templates/bin/_osd_disk_activate.sh.tpl b/ceph/templates/bin/_osd_disk_activate.sh.tpl new file mode 100644 index 0000000000..3a3df2c3bd --- /dev/null +++ b/ceph/templates/bin/_osd_disk_activate.sh.tpl @@ -0,0 +1,51 @@ +#!/bin/bash +set -ex + +function osd_activate { + if [[ -z "${OSD_DEVICE}" ]];then + log "ERROR- You must provide a device to build your OSD ie: /dev/sdb" + exit 1 + fi + + CEPH_DISK_OPTIONS="" + DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}1) + LOCKBOX_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}3 || true) + JOURNAL_PART=$(dev_part ${OSD_DEVICE} 2) + ACTUAL_OSD_DEVICE=$(readlink -f ${OSD_DEVICE}) # resolve /dev/disk/by-* names + + # watch the udev event queue, and exit if all current events are handled + udevadm settle --timeout=600 + + # wait till partition exists then activate it + if [[ -n "${OSD_JOURNAL}" ]]; then + wait_for_file ${OSD_DEVICE} + chown ceph. ${OSD_JOURNAL} + else + wait_for_file $(dev_part ${OSD_DEVICE} 1) + chown ceph. $JOURNAL_PART + fi + + DATA_PART=$(dev_part ${OSD_DEVICE} 1) + MOUNTED_PART=${DATA_PART} + + if [[ ${OSD_DMCRYPT} -eq 1 ]]; then + echo "Mounting LOCKBOX directory" + # NOTE(leseb): adding || true so when this bug will be fixed the entrypoint will not fail + # Ceph bug tracker: http://tracker.ceph.com/issues/18945 + mkdir -p /var/lib/ceph/osd-lockbox/${DATA_UUID} + mount /dev/disk/by-partuuid/${LOCKBOX_UUID} /var/lib/ceph/osd-lockbox/${DATA_UUID} || true + CEPH_DISK_OPTIONS="$CEPH_DISK_OPTIONS --dmcrypt" + MOUNTED_PART="/dev/mapper/${DATA_UUID}" + fi + + ceph-disk -v --setuser ceph --setgroup disk activate ${CEPH_DISK_OPTIONS} --no-start-daemon ${DATA_PART} + + OSD_ID=$(grep "${MOUNTED_PART}" /proc/mounts | awk '{print $2}' | grep -oh '[0-9]*') + OSD_PATH=$(get_osd_path $OSD_ID) + OSD_KEYRING="$OSD_PATH/keyring" + OSD_WEIGHT=$(df -P -k $OSD_PATH | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }') + ceph ${CLI_OPTS} --name=osd.${OSD_ID} --keyring=$OSD_KEYRING osd crush create-or-move -- ${OSD_ID} ${OSD_WEIGHT} ${CRUSH_LOCATION} + + log "SUCCESS" + exec /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk +} diff --git a/ceph/templates/bin/_osd_disk_prepare.sh.tpl b/ceph/templates/bin/_osd_disk_prepare.sh.tpl new file mode 100644 index 0000000000..229360ac54 --- /dev/null +++ b/ceph/templates/bin/_osd_disk_prepare.sh.tpl @@ -0,0 +1,74 @@ +#!/bin/bash +set -ex + +function osd_disk_prepare { + if [[ -z "${OSD_DEVICE}" ]];then + log "ERROR- You must provide a device to build your OSD ie: /dev/sdb" + exit 1 + fi + + if [[ ! -e "${OSD_DEVICE}" ]]; then + log "ERROR- The device pointed by OSD_DEVICE ($OSD_DEVICE) doesn't exist !" + exit 1 + fi + + if [ ! -e $OSD_BOOTSTRAP_KEYRING ]; then + log "ERROR- $OSD_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-osd -o $OSD_BOOTSTRAP_KEYRING'" + exit 1 + fi + timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-osd --keyring $OSD_BOOTSTRAP_KEYRING health || exit 1 + + # check device status first + if ! parted --script ${OSD_DEVICE} print > /dev/null 2>&1; then + if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then + log "It looks like ${OSD_DEVICE} isn't consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway" + ceph-disk -v zap ${OSD_DEVICE} + else + log "Regarding parted, device ${OSD_DEVICE} is inconsistent/broken/weird." + log "It would be too dangerous to destroy it without any notification." + log "Please set OSD_FORCE_ZAP to '1' if you really want to zap this disk." + exit 1 + fi + fi + + # then search for some ceph metadata on the disk + if [[ "$(parted --script ${OSD_DEVICE} print | egrep '^ 1.*ceph data')" ]]; then + if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then + log "It looks like ${OSD_DEVICE} is an OSD, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway" + ceph-disk -v zap ${OSD_DEVICE} + else + log "INFO- It looks like ${OSD_DEVICE} is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content" + log "You can also use the zap_device scenario on the appropriate device to zap it" + log "Moving on, trying to activate the OSD now." + return + fi + fi + + if [[ ${OSD_BLUESTORE} -eq 1 ]]; then + ceph-disk -v prepare ${CLI_OPTS} --bluestore ${OSD_DEVICE} + elif [[ ${OSD_DMCRYPT} -eq 1 ]]; then + get_admin_key + check_admin_key + # the admin key must be present on the node + # in order to store the encrypted key in the monitor's k/v store + ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} --lockbox-uuid ${OSD_LOCKBOX_UUID} --dmcrypt ${OSD_DEVICE} ${OSD_JOURNAL} + echo "Unmounting LOCKBOX directory" + # NOTE(leseb): adding || true so when this bug will be fixed the entrypoint will not fail + # Ceph bug tracker: http://tracker.ceph.com/issues/18944 + DATA_UUID=$(blkid -o value -s PARTUUID ${OSD_DEVICE}1) + umount /var/lib/ceph/osd-lockbox/${DATA_UUID} || true + else + ceph-disk -v prepare ${CLI_OPTS} --journal-uuid ${OSD_JOURNAL_UUID} ${OSD_DEVICE} ${OSD_JOURNAL} + fi + + # watch the udev event queue, and exit if all current events are handled + udevadm settle --timeout=600 + + if [[ -n "${OSD_JOURNAL}" ]]; then + wait_for_file ${OSD_JOURNAL} + chown ceph. ${OSD_JOURNAL} + else + wait_for_file $(dev_part ${OSD_DEVICE} 2) + chown ceph. $(dev_part ${OSD_DEVICE} 2) + fi +} diff --git a/ceph/templates/bin/_osd_disks.sh.tpl b/ceph/templates/bin/_osd_disks.sh.tpl new file mode 100644 index 0000000000..7316970930 --- /dev/null +++ b/ceph/templates/bin/_osd_disks.sh.tpl @@ -0,0 +1,66 @@ +#!/bin/bash +set -ex + +function osd_disks { + if [[ ! -d /var/lib/ceph/osd ]]; then + log "ERROR- could not find the osd directory, did you bind mount the OSD data directory?" + log "ERROR- use -v :/var/lib/ceph/osd" + exit 1 + fi + if [[ -z ${OSD_DISKS} ]]; then + log "ERROR- could not find the osd devices, did you configure OSD disks?" + log "ERROR- use -e OSD_DISKS=\"0:sdd 1:sde 2:sdf\"" + exit 1 + fi + + # Create the directory and an empty Procfile + mkdir -p /etc/forego/${CLUSTER} + echo "" > /etc/forego/${CLUSTER}/Procfile + + # check if anything is there, if not create an osd with directory + if [[ -z "$(find /var/lib/ceph/osd -prune -empty)" ]]; then + log "Mount existing and prepared OSD disks for ceph-cluster ${CLUSTER}" + for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do + OSD_PATH=$(get_osd_path $OSD_ID) + OSD_KEYRING="$OSD_PATH/keyring" + OSD_DEV=$(get_osd_dev ${OSD_ID}) + if [[ -z ${OSD_DEV} ]]; then + log "No device mapping for ${CLUSTER}-${OSD_ID} for ceph-cluster ${CLUSTER}" + exit 1 + fi + mount ${MOUNT_OPTS} $(dev_part ${OSD_DEV} 1) $OSD_PATH + xOSD_ID=$(cat $OSD_PATH/whoami) + if [[ "${OSD_ID}" != "${xOSD_ID}" ]]; then + log "Device ${OSD_DEV} is corrupt for $OSD_PATH" + exit 1 + fi + echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd ${CLI_OPTS} -f -i ${OSD_ID} --setuser ceph --setgroup disk" | tee -a /etc/forego/${CLUSTER}/Procfile + done + exec /usr/local/bin/forego start -f /etc/forego/${CLUSTER}/Procfile + fi + + # + # As per the exec in the first statement, we only reach here if there is some OSDs + # + for OSD_DISK in ${OSD_DISKS}; do + OSD_DEV="/dev/$(echo ${OSD_DISK}|sed 's/\(.*\):\(.*\)/\2/')" + + if [[ "$(parted --script ${OSD_DEV} print | egrep '^ 1.*ceph data')" ]]; then + if [[ ${OSD_FORCE_ZAP} -eq 1 ]]; then + ceph-disk -v zap ${OSD_DEV} + else + log "ERROR- It looks like the device ($OSD_DEV) is an OSD, set OSD_FORCE_ZAP=1 to use this device anyway and zap its content" + exit 1 + fi + fi + + ceph-disk -v prepare ${CLI_OPTS} ${OSD_DEV} ${OSD_JOURNAL} + + # prepare the OSDs configuration and start them later + start_osd forego + done + + log "SUCCESS" + # Actually, starting them as per forego configuration + start_forego +} diff --git a/ceph/templates/bin/_remove-mon.sh.tpl b/ceph/templates/bin/_remove-mon.sh.tpl new file mode 100644 index 0000000000..626bc5c043 --- /dev/null +++ b/ceph/templates/bin/_remove-mon.sh.tpl @@ -0,0 +1,5 @@ +#!/bin/bash + +set -ex + +ceph mon remove $(hostname -s) diff --git a/ceph/templates/bin/_start_mds.sh.tpl b/ceph/templates/bin/_start_mds.sh.tpl new file mode 100644 index 0000000000..3e92562ba4 --- /dev/null +++ b/ceph/templates/bin/_start_mds.sh.tpl @@ -0,0 +1,58 @@ +#!/bin/bash +set -ex + +function start_mds { + get_config + check_config + + # Check to see if we are a new MDS + if [ ! -e $MDS_KEYRING ]; then + + if [ -e $ADMIN_KEYRING ]; then + KEYRING_OPT="--name client.admin --keyring $ADMIN_KEYRING" + elif [ -e $MDS_BOOTSTRAP_KEYRING ]; then + KEYRING_OPT="--name client.bootstrap-mds --keyring $MDS_BOOTSTRAP_KEYRING" + else + log "ERROR- Failed to bootstrap MDS: could not find admin or bootstrap-mds keyring. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-mds -o $MDS_BOOTSTRAP_KEYRING" + exit 1 + fi + + timeout 10 ceph ${CLI_OPTS} $KEYRING_OPT health || exit 1 + + # Generate the MDS key + ceph ${CLI_OPTS} $KEYRING_OPT auth get-or-create mds.$MDS_NAME osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o $MDS_KEYRING + chown ceph. $MDS_KEYRING + chmod 600 $MDS_KEYRING + + fi + + # NOTE (leseb): having the admin keyring is really a security issue + # If we need to bootstrap a MDS we should probably create the following on the monitors + # I understand that this handy to do this here + # but having the admin key inside every container is a concern + + # Create the Ceph filesystem, if necessary + if [ $CEPHFS_CREATE -eq 1 ]; then + + get_admin_key + check_admin_key + + if [[ "$(ceph ${CLI_OPTS} fs ls | grep -c name:.${CEPHFS_NAME},)" -eq 0 ]]; then + # Make sure the specified data pool exists + if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_DATA_POOL} > /dev/null 2>&1; then + ceph ${CLI_OPTS} osd pool create ${CEPHFS_DATA_POOL} ${CEPHFS_DATA_POOL_PG} + fi + + # Make sure the specified metadata pool exists + if ! ceph ${CLI_OPTS} osd pool stats ${CEPHFS_METADATA_POOL} > /dev/null 2>&1; then + ceph ${CLI_OPTS} osd pool create ${CEPHFS_METADATA_POOL} ${CEPHFS_METADATA_POOL_PG} + fi + + ceph ${CLI_OPTS} fs new ${CEPHFS_NAME} ${CEPHFS_METADATA_POOL} ${CEPHFS_DATA_POOL} + fi + fi + + log "SUCCESS" + # NOTE: prefixing this with exec causes it to die (commit suicide) + /usr/bin/ceph-mds $DAEMON_OPTS -i ${MDS_NAME} +} diff --git a/ceph/templates/bin/_start_mon.sh.tpl b/ceph/templates/bin/_start_mon.sh.tpl new file mode 100644 index 0000000000..4a62a49757 --- /dev/null +++ b/ceph/templates/bin/_start_mon.sh.tpl @@ -0,0 +1,141 @@ +#!/bin/bash +set -ex + +IPV4_REGEXP='[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' +IPV4_NETWORK_REGEXP="$IPV4_REGEXP/[0-9]\{1,2\}" + +function flat_to_ipv6 { + # Get a flat input like fe800000000000000042acfffe110003 and output fe80::0042:acff:fe11:0003 + # This input usually comes from the ipv6_route or if_inet6 files from /proc + + # First, split the string in set of 4 bytes with ":" as separator + value=$(echo "$@" | sed -e 's/.\{4\}/&:/g' -e '$s/\:$//') + + # Let's remove the useless 0000 and "::" + value=${value//0000/:}; + while $(echo $value | grep -q ":::"); do + value=${value//::/:}; + done + echo $value +} + +function get_ip { + NIC=$1 + # IPv4 is the default unless we specify it + IP_VERSION=${2:-4} + # We should avoid reporting any IPv6 "scope local" interface that would make the ceph bind() call to fail + if is_available ip; then + ip -$IP_VERSION -o a s $NIC | grep "scope global" | awk '{ sub ("/..", "", $4); print $4 }' || true + else + case "$IP_VERSION" in + 6) + # We don't want local scope, so let's remove field 4 if not 00 + ip=$(flat_to_ipv6 $(grep $NIC /proc/net/if_inet6 | awk '$4==00 {print $1}')) + # IPv6 IPs should be surrounded by brackets to let ceph-monmap being happy + echo "[$ip]" + ;; + *) + grep -o "$IPV4_REGEXP" /proc/net/fib_trie | grep -vEw "^127|255$|0$" | head -1 + ;; + esac + fi +} + +function get_network { + NIC=$1 + # IPv4 is the default unless we specify it + IP_VERSION=${2:-4} + + case "$IP_VERSION" in + 6) + if is_available ip; then + ip -$IP_VERSION route show dev $NIC | grep proto | awk '{ print $1 }' | grep -v default | grep -vi ^fe80 || true + else + # We don't want the link local routes + line=$(grep $NIC /proc/1/task/1/net/ipv6_route | awk '$2==40' | grep -v ^fe80 || true) + base=$(echo $line | awk '{ print $1 }') + base=$(flat_to_ipv6 $base) + mask=$(echo $line | awk '{ print $2 }') + echo "$base/$((16#$mask))" + fi + ;; + *) + if is_available ip; then + ip -$IP_VERSION route show dev $NIC | grep proto | awk '{ print $1 }' | grep -v default | grep "/" || true + else + grep -o "$IPV4_NETWORK_REGEXP" /proc/net/fib_trie | grep -vE "^127|^0" | head -1 + fi + ;; + esac +} + +function start_mon { + if [[ ${NETWORK_AUTO_DETECT} -eq 0 ]]; then + if [[ -z "$CEPH_PUBLIC_NETWORK" ]]; then + log "ERROR- CEPH_PUBLIC_NETWORK must be defined as the name of the network for the OSDs" + exit 1 + fi + + if [[ -z "$MON_IP" ]]; then + log "ERROR- MON_IP must be defined as the IP address of the monitor" + exit 1 + fi + else + NIC_MORE_TRAFFIC=$(grep -vE "lo:|face|Inter" /proc/net/dev | sort -n -k 2 | tail -1 | awk '{ sub (":", "", $1); print $1 }') + IP_VERSION=4 + if [ ${NETWORK_AUTO_DETECT} -gt 1 ]; then + MON_IP=$(get_ip ${NIC_MORE_TRAFFIC} ${NETWORK_AUTO_DETECT}) + CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC} ${NETWORK_AUTO_DETECT}) + IP_VERSION=${NETWORK_AUTO_DETECT} + else # Means -eq 1 + MON_IP="[$(get_ip ${NIC_MORE_TRAFFIC} 6)]" + CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC} 6) + IP_VERSION=6 + if [ -z "$MON_IP" ]; then + MON_IP=$(get_ip ${NIC_MORE_TRAFFIC}) + CEPH_PUBLIC_NETWORK=$(get_network ${NIC_MORE_TRAFFIC}) + IP_VERSION=4 + fi + fi + fi + + if [[ -z "$MON_IP" || -z "$CEPH_PUBLIC_NETWORK" ]]; then + log "ERROR- it looks like we have not been able to discover the network settings" + exit 1 + fi + + get_mon_config $IP_VERSION + + # If we don't have a monitor keyring, this is a new monitor + if [ ! -e "$MON_DATA_DIR/keyring" ]; then + if [ ! -e $MON_KEYRING ]; then + log "ERROR- $MON_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o $MON_KEYRING' or use a KV Store" + exit 1 + fi + + if [ ! -e $MONMAP ]; then + log "ERROR- $MONMAP must exist. You can extract it from your current monitor by running 'ceph mon getmap -o $MONMAP' or use a KV Store" + exit 1 + fi + + # Testing if it's not the first monitor, if one key doesn't exist we assume none of them exist + for keyring in $OSD_BOOTSTRAP_KEYRING $MDS_BOOTSTRAP_KEYRING $RGW_BOOTSTRAP_KEYRING $ADMIN_KEYRING; do + ceph-authtool $MON_KEYRING --import-keyring $keyring + done + + # Prepare the monitor daemon's directory with the map and keyring + ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} --mkfs -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR" + else + log "Trying to get the most recent monmap..." + # Ignore when we timeout, in most cases that means the cluster has no quorum or + # no mons are up and running yet + timeout 5 ceph ${CLI_OPTS} mon getmap -o $MONMAP || true + ceph-mon --setuser ceph --setgroup ceph --cluster ${CLUSTER} -i ${MON_NAME} --inject-monmap $MONMAP --keyring $MON_KEYRING --mon-data "$MON_DATA_DIR" + timeout 7 ceph ${CLI_OPTS} mon add "${MON_NAME}" "${MON_IP}:6789" || true + fi + + log "SUCCESS" + + # start MON + exec /usr/bin/ceph-mon $DAEMON_OPTS -i ${MON_NAME} --mon-data "$MON_DATA_DIR" --public-addr "${MON_IP}:6789" +} diff --git a/ceph/templates/bin/_start_osd.sh.tpl b/ceph/templates/bin/_start_osd.sh.tpl new file mode 100644 index 0000000000..6c78c4ee57 --- /dev/null +++ b/ceph/templates/bin/_start_osd.sh.tpl @@ -0,0 +1,61 @@ +#!/bin/bash +set -ex + +if is_redhat; then + source /etc/sysconfig/ceph +elif is_ubuntu; then + source /etc/default/ceph +fi + +function start_osd { + get_config + check_config + + if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then + get_admin_key + check_admin_key + fi + + case "$OSD_TYPE" in + directory) + source osd_directory.sh + source osd_common.sh + osd_directory + ;; + directory_single) + source osd_directory_single.sh + osd_directory_single + ;; + disk) + osd_disk + ;; + prepare) + source osd_disk_prepare.sh + osd_disk_prepare + ;; + activate) + source osd_disk_activate.sh + osd_activate + ;; + devices) + source osd_disks.sh + source osd_common.sh + osd_disks + ;; + activate_journal) + source osd_activate_journal.sh + source osd_common.sh + osd_activate_journal + ;; + *) + osd_trying_to_determine_scenario + ;; + esac +} + +function osd_disk { + source osd_disk_prepare.sh + source osd_disk_activate.sh + osd_disk_prepare + osd_activate +} diff --git a/ceph/templates/bin/_start_rgw.sh.tpl b/ceph/templates/bin/_start_rgw.sh.tpl new file mode 100644 index 0000000000..06efc2acd2 --- /dev/null +++ b/ceph/templates/bin/_start_rgw.sh.tpl @@ -0,0 +1,55 @@ +#!/bin/bash +set -ex + +function start_rgw { + get_config + check_config + + if [ ${CEPH_GET_ADMIN_KEY} -eq 1 ]; then + get_admin_key + check_admin_key + fi + + # Check to see if our RGW has been initialized + if [ ! -e $RGW_KEYRING ]; then + + if [ ! -e $RGW_BOOTSTRAP_KEYRING ]; then + log "ERROR- $RGW_BOOTSTRAP_KEYRING must exist. You can extract it from your current monitor by running 'ceph auth get client.bootstrap-rgw -o $RGW_BOOTSTRAP_KEYRING'" + exit 1 + fi + + timeout 10 ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING health || exit 1 + + # Generate the RGW key + ceph ${CLI_OPTS} --name client.bootstrap-rgw --keyring $RGW_BOOTSTRAP_KEYRING auth get-or-create client.rgw.${RGW_NAME} osd 'allow rwx' mon 'allow rw' -o $RGW_KEYRING + chown ceph. $RGW_KEYRING + chmod 0600 $RGW_KEYRING + fi + + log "SUCCESS" + + RGW_FRONTENDS="civetweb port=$RGW_CIVETWEB_PORT" + if [ "$RGW_REMOTE_CGI" -eq 1 ]; then + RGW_FRONTENDS="fastcgi socket_port=$RGW_REMOTE_CGI_PORT socket_host=$RGW_REMOTE_CGI_HOST" + fi + + exec /usr/bin/radosgw $DAEMON_OPTS -n client.rgw.${RGW_NAME} -k $RGW_KEYRING --rgw-socket-path="" --rgw-zonegroup="$RGW_ZONEGROUP" --rgw-zone="$RGW_ZONE" --rgw-frontends="$RGW_FRONTENDS" +} + +function create_rgw_user { + + # Check to see if our RGW has been initialized + if [ ! -e /var/lib/ceph/radosgw/keyring ]; then + log "ERROR- /var/lib/ceph/radosgw/keyring must exist. Please get it from your Rados Gateway" + exit 1 + fi + + mv /var/lib/ceph/radosgw/keyring $RGW_KEYRING + + USER_KEY="" + if [ -n "${RGW_USER_SECRET_KEY}" ]; then + USER_KEY="--access-key=${RGW_USER_USER_KEY} --secret=${RGW_USER_SECRET_KEY}" + fi + + exec radosgw-admin user create --uid=${RGW_USER} ${USER_KEY} --display-name="RGW ${RGW_USER} User" -c /etc/ceph/${CLUSTER}.conf +} diff --git a/ceph/templates/bin/_watch_mon_health.sh.tpl b/ceph/templates/bin/_watch_mon_health.sh.tpl new file mode 100644 index 0000000000..41c64b17d3 --- /dev/null +++ b/ceph/templates/bin/_watch_mon_health.sh.tpl @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex + +function watch_mon_health { + + while [ true ] + do + log "checking for zombie mons" + /check_zombie_mons.py || true + log "sleep 30 sec" + sleep 30 + done +} diff --git a/ceph/templates/configmap-bin.yaml b/ceph/templates/configmap-bin.yaml index e7a892123f..83b9f2e77b 100644 --- a/ceph/templates/configmap-bin.yaml +++ b/ceph/templates/configmap-bin.yaml @@ -33,3 +33,33 @@ data: ceph-namespace-client-key.sh: |+ {{ tuple "bin/_ceph-namespace-client-key.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{- end }} + common_functions.sh: |+ +{{ tuple "bin/_common_functions.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + entrypoint.sh: |+ +{{ tuple "bin/_entrypoint.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_activate_journal.sh: |+ +{{ tuple "bin/_osd_activate_journal.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_common.sh: |+ +{{ tuple "bin/_osd_common.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_directory.sh: |+ +{{ tuple "bin/_osd_directory.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_directory_single.sh: |+ +{{ tuple "bin/_osd_directory_single.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_disk_activate.sh: |+ +{{ tuple "bin/_osd_disk_activate.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_disk_prepare.sh: |+ +{{ tuple "bin/_osd_disk_prepare.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + osd_disks.sh: |+ +{{ tuple "bin/_osd_disks.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + remove-mon.sh: |+ +{{ tuple "bin/_remove-mon.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + start_mon.sh: |+ +{{ tuple "bin/_start_mon.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + start_osd.sh: |+ +{{ tuple "bin/_start_osd.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + start_mds.sh: |+ +{{ tuple "bin/_start_mds.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + start_rgw.sh: |+ +{{ tuple "bin/_start_rgw.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + watch_mon_health.sh: |+ +{{ tuple "bin/_watch_mon_health.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} diff --git a/ceph/templates/daemonset-osd.yaml b/ceph/templates/daemonset-osd.yaml index c52dfdcba5..5d611b7676 100644 --- a/ceph/templates/daemonset-osd.yaml +++ b/ceph/templates/daemonset-osd.yaml @@ -78,6 +78,22 @@ spec: - name: ceph mountPath: /var/lib/ceph readOnly: false + - name: ceph-bin + mountPath: /entrypoint.sh + subPath: entrypoint.sh + readOnly: true + - name: ceph-bin + mountPath: /start_osd.sh + subPath: start_osd.sh + readOnly: true + - name: ceph-bin + mountPath: /osd_directory.sh + subPath: osd_directory.sh + readOnly: true + - name: ceph-bin + mountPath: /common_functions.sh + subPath: common_functions.sh + readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf subPath: ceph.conf @@ -111,6 +127,10 @@ spec: - name: ceph hostPath: path: {{ .Values.ceph.storage.var_directory }} + - name: ceph-bin + configMap: + name: ceph-bin + defaultMode: 0555 - name: ceph-etc configMap: name: ceph-etc diff --git a/ceph/templates/deployment-mds.yaml b/ceph/templates/deployment-mds.yaml index 85716df212..201dbdb590 100644 --- a/ceph/templates/deployment-mds.yaml +++ b/ceph/templates/deployment-mds.yaml @@ -64,6 +64,18 @@ spec: command: - /entrypoint.sh volumeMounts: + - name: ceph-bin + mountPath: /entrypoint.sh + subPath: entrypoint.sh + readOnly: true + - name: ceph-bin + mountPath: /start_mds.sh + subPath: start_mds.sh + readOnly: true + - name: ceph-bin + mountPath: /common_functions.sh + subPath: common_functions.sh + readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf subPath: ceph.conf @@ -102,6 +114,10 @@ spec: configMap: name: ceph-etc defaultMode: 0444 + - name: ceph-bin + configMap: + name: ceph-bin + defaultMode: 0555 - name: ceph-client-admin-keyring secret: secretName: {{ .Values.secrets.keyrings.admin }} diff --git a/ceph/templates/deployment-moncheck.yaml b/ceph/templates/deployment-moncheck.yaml index c321dd34d1..371abe7016 100644 --- a/ceph/templates/deployment-moncheck.yaml +++ b/ceph/templates/deployment-moncheck.yaml @@ -64,6 +64,18 @@ spec: command: - /entrypoint.sh volumeMounts: + - name: ceph-bin + mountPath: /entrypoint.sh + subPath: entrypoint.sh + readOnly: true + - name: ceph-bin + mountPath: /watch_mon_health.sh + subPath: watch_mon_health.sh + readOnly: true + - name: ceph-bin + mountPath: /common_functions.sh + subPath: common_functions.sh + readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf subPath: ceph.conf @@ -93,6 +105,10 @@ spec: configMap: name: ceph-etc defaultMode: 0444 + - name: ceph-bin + configMap: + name: ceph-bin + defaultMode: 0555 - name: ceph-client-admin-keyring secret: secretName: {{ .Values.secrets.keyrings.admin }} diff --git a/ceph/templates/deployment-rgw.yaml b/ceph/templates/deployment-rgw.yaml index b0471a9290..774fbe507a 100644 --- a/ceph/templates/deployment-rgw.yaml +++ b/ceph/templates/deployment-rgw.yaml @@ -65,6 +65,18 @@ spec: command: - /entrypoint.sh volumeMounts: + - name: ceph-bin + mountPath: /entrypoint.sh + subPath: entrypoint.sh + readOnly: true + - name: ceph-bin + mountPath: /start_rgw.sh + subPath: start_rgw.sh + readOnly: true + - name: ceph-bin + mountPath: /common_functions.sh + subPath: common_functions.sh + readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf subPath: ceph.conf @@ -101,6 +113,10 @@ spec: port: {{ .Values.network.port.rgw_target }} timeoutSeconds: 5 volumes: + - name: ceph-bin + configMap: + name: ceph-bin + defaultMode: 0555 - name: ceph-etc configMap: name: ceph-etc diff --git a/ceph/templates/statefulset-mon.yaml b/ceph/templates/statefulset-mon.yaml index b6376d90eb..8111f9fa1b 100644 --- a/ceph/templates/statefulset-mon.yaml +++ b/ceph/templates/statefulset-mon.yaml @@ -96,6 +96,22 @@ spec: command: - "/remove-mon.sh" volumeMounts: + - name: ceph-bin + mountPath: /entrypoint.sh + subPath: entrypoint.sh + readOnly: true + - name: ceph-bin + mountPath: /start_mon.sh + subPath: start_mon.sh + readOnly: true + - name: ceph-bin + mountPath: /remove-mon.sh + subPath: remove-mon.sh + readOnly: true + - name: ceph-bin + mountPath: /common_functions.sh + subPath: common_functions.sh + readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf subPath: ceph.conf @@ -130,6 +146,10 @@ spec: port: 6789 timeoutSeconds: 5 volumes: + - name: ceph-bin + configMap: + name: ceph-bin + defaultMode: 0555 - name: ceph-etc configMap: name: ceph-etc