labs: merge node-specific config/scripts.* files

Currently, we have a file directing the build for each node. However,
rebuilding just one node hardly ever makes sense, so we might as well
remove the extra files and have the user just rebuild the cluster.

With this patch, every distribution will have a basedisk scripts
configuration file (as before) and one scripts configuration file for
building the cluster (or several for building additional, different
clusters).

Change-Id: I7f50d49d7337b790416a2a76c86be510990859fd
This commit is contained in:
Roger Luethi 2015-02-22 16:40:35 +01:00
parent a72d1d713f
commit 86e1462b62
8 changed files with 135 additions and 99 deletions

4
labs/config/config.base Normal file
View File

@ -0,0 +1,4 @@
# Base disk VM configuration. Used by osbash/wbatch (host and guest).
# Port forwarding
VM_SSH_PORT=2229

View File

@ -1,16 +0,0 @@
# Scripts for compute node
cmd init_node
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle compute_node_init
cmd queue ubuntu/setup_nova_compute.sh
cmd snapshot_cycle nova-compute_installed
cmd queue ubuntu/setup_neutron_compute.sh
cmd queue ubuntu/setup_cinder_volumes.sh
cmd snapshot_cycle compute_node_installed
# Take snapshot of changes on controller VM, too
cmd queue shutdown_controller.sh
cmd boot
cmd wait_for_shutdown -n controller
cmd snapshot -n controller controller_-_compute_node_installed
cmd boot -n controller

View File

@ -1,26 +0,0 @@
# Scripts for controller node
cmd init_node
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle controller_node_init
cmd queue ubuntu/apt_install_mysql.sh
cmd queue ubuntu/install_rabbitmq.sh
cmd snapshot_cycle pre-openstack_installed
cmd queue ubuntu/setup_keystone.sh
cmd snapshot_cycle keystone_installed
cmd queue ubuntu/setup_glance.sh
cmd snapshot_cycle glance_installed
cmd queue ubuntu/setup_nova_controller.sh
cmd snapshot_cycle nova-controller_installed
cmd queue ubuntu/setup_neutron_controller.sh
cmd snapshot_cycle neutron-controller_installed
cmd queue ubuntu/setup_cinder_controller.sh
cmd snapshot_cycle cinder_installed
cmd queue ubuntu/setup_horizon.sh
cmd snapshot_cycle horizon_installed
cmd queue config_external_network.sh
cmd queue config_tenant_network.sh
cmd snapshot_cycle openstack_networks_configured
cmd queue setup_lbaas_controller.sh
cmd snapshot_cycle controller_node_installed
cmd boot

View File

@ -1,15 +0,0 @@
# Scripts for network node
cmd init_node
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle network_node_init
cmd queue ubuntu/setup_neutron_network.sh
cmd snapshot_cycle neutron_configured
cmd queue ubuntu/setup_lbaas_network.sh
cmd snapshot_cycle network_node_installed
# Take snapshot of changes on controller VM, too
cmd queue shutdown_controller.sh
cmd boot
cmd wait_for_shutdown -n controller
cmd snapshot -n controller controller_-_network_node_installed
cmd boot -n controller

View File

@ -0,0 +1,82 @@
#==============================================================================
# Scripts for controller node
cmd init_node -n controller
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle -n controller controller_node_init
cmd queue ubuntu/apt_install_mysql.sh
cmd queue ubuntu/install_rabbitmq.sh
cmd snapshot_cycle -n controller pre-openstack_installed
cmd queue ubuntu/setup_keystone.sh
cmd snapshot_cycle -n controller keystone_installed
cmd queue ubuntu/setup_glance.sh
cmd snapshot_cycle -n controller glance_installed
cmd queue ubuntu/setup_nova_controller.sh
cmd snapshot_cycle -n controller nova-controller_installed
cmd queue ubuntu/setup_neutron_controller.sh
cmd snapshot_cycle -n controller neutron-controller_installed
cmd queue ubuntu/setup_cinder_controller.sh
cmd snapshot_cycle -n controller cinder_installed
cmd queue ubuntu/setup_horizon.sh
cmd snapshot_cycle -n controller horizon_installed
cmd queue config_external_network.sh
cmd queue config_tenant_network.sh
cmd snapshot_cycle -n controller openstack_networks_configured
cmd queue setup_lbaas_controller.sh
cmd snapshot_cycle -n controller controller_node_installed
cmd boot -n controller
#==============================================================================
# Scripts for compute node
cmd init_node -n compute
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle -n compute compute_node_init
cmd queue ubuntu/setup_nova_compute.sh
cmd snapshot_cycle -n compute nova-compute_installed
cmd queue ubuntu/setup_neutron_compute.sh
cmd queue ubuntu/setup_cinder_volumes.sh
cmd snapshot_cycle -n compute compute_node_installed
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Take snapshot of database changes on controller VM, too
cmd queue shutdown_controller.sh
cmd boot -n compute
cmd wait_for_shutdown -n controller
cmd snapshot -n controller controller_-_compute_node_installed
cmd boot -n controller
#==============================================================================
# Scripts for network node
cmd init_node -n network
cmd queue etc_hosts.sh
cmd queue osbash/enable_vagrant_ssh_keys.sh
cmd snapshot_cycle -n network network_node_init
cmd queue ubuntu/setup_neutron_network.sh
cmd snapshot_cycle -n network neutron_configured
cmd queue ubuntu/setup_lbaas_network.sh
cmd snapshot_cycle -n network network_node_installed
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Take snapshot of database changes on controller VM, too
cmd queue shutdown_controller.sh
cmd boot -n network
cmd wait_for_shutdown -n controller
cmd snapshot -n controller controller_-_network_node_installed
cmd boot -n controller
#==============================================================================

View File

@ -38,6 +38,16 @@ function get_base_disk_path {
echo "$DISK_DIR/$(get_base_disk_name)"
}
# From DISTRO string (e.g., ubuntu-14.04-server-amd64), get first component
function get_distro_name {
# Match up to first dash
local re='([^-]*)'
if [[ $DISTRO =~ $re ]]; then
echo "${BASH_REMATCH[1]}"
fi
}
#-------------------------------------------------------------------------------
# ssh
#-------------------------------------------------------------------------------
@ -150,7 +160,13 @@ function ssh_exec_script {
# Wait for sshd, prepare autostart dirs, and execute autostart scripts on VM
function ssh_process_autostart {
local ssh_port=$1
# Run this function in sub-shell to protect our caller's environment
# (which might be _our_ enviroment if we get called again)
(
source "$CONFIG_DIR/config.$vm_name"
local ssh_port=$VM_SSH_PORT
wait_for_ssh "$ssh_port"
vm_ssh "$ssh_port" "rm -rf lib config autostart"
@ -162,6 +178,8 @@ function ssh_process_autostart {
rm -f "$script_path" >&2
done
touch "$STATUS_DIR/done"
)
}
#-------------------------------------------------------------------------------
@ -317,9 +335,8 @@ function command_from_config {
# Boot with queued autostart files now, wait for end of scripts
# processing
get_cmd_options $args
echo >&2 "VM_UI=$vm_ui _vbox_boot_with_autostart $vm_name " \
"$VM_SSH_PORT"
VM_UI=$vm_ui _vbox_boot_with_autostart "$vm_name" "$VM_SSH_PORT"
echo >&2 "VM_UI=$vm_ui _vbox_boot_with_autostart $vm_name"
VM_UI=$vm_ui _vbox_boot_with_autostart "$vm_name"
;;
snapshot)
# Format: snapshot [-n <node_name>] <snapshot_name>
@ -341,7 +358,7 @@ function command_from_config {
local shot_name=$args
echo >&2 snapshot_cycle "$vm_name" "$shot_name"
_autostart_queue "osbash/shutdown.sh"
_vbox_boot_with_autostart "$vm_name" "$VM_SSH_PORT"
_vbox_boot_with_autostart "$vm_name"
vm_wait_for_shutdown "$vm_name"
vm_snapshot "$vm_name" "$shot_name"
;;

View File

@ -24,13 +24,12 @@ function _vbox_configure_ifs {
# Boot node VM; wait until autostart files are processed and VM is shut down
function _vbox_boot_with_autostart {
local vm_name=$1
local ssh_port=$2
vbox_boot "$vm_name"
# Wait for ssh connection and execute scripts in autostart directory
# (for wbatch, osbashauto does the processing instead)
${WBATCH:+:} ssh_process_autostart "$ssh_port" &
${WBATCH:+:} ssh_process_autostart "$vm_name" &
wait_for_autofiles
echo >&2 "VM \"$vm_name\": autostart files executed"
@ -38,6 +37,11 @@ function _vbox_boot_with_autostart {
# Create a new node VM and run basic configuration scripts
function vm_init_node {
# XXX Run this function in sub-shell to protect our caller's environment
# (which might be _our_ enviroment if we get called again)
(
source "$CONFIG_DIR/config.$vm_name"
vm_name=$1
vm_create "$vm_name"
@ -62,25 +66,21 @@ function vm_init_node {
vm_attach_disk_multi "$vm_name" "$BASE_DISK"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Rename to pass the node name to the script
autostart_and_rename osbash init_xxx_node.sh "init_${NODE_NAME}_node.sh"
}
function vm_build_node {
# XXX Run this function in sub-shell to protect our caller's environment
# (which might be _our_ enviroment if we get called again)
(
NODE_NAME=$1
source "$CONFIG_DIR/config.$NODE_NAME"
${WBATCH:-:} wbatch_begin_node "$NODE_NAME"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
autostart_reset
autostart_from_config "scripts.$NODE_NAME"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
${WBATCH:-:} wbatch_end_file
autostart_and_rename osbash init_xxx_node.sh "init_${vm_name}_node.sh"
)
}
function vm_build_nodes {
CONFIG_NAME=$(get_distro_name "$DISTRO")_$1
echo "Configuration file: $CONFIG_NAME"
${WBATCH:-:} wbatch_begin_node "$CONFIG_NAME"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
autostart_reset
autostart_from_config "scripts.$CONFIG_NAME"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
${WBATCH:-:} wbatch_end_file
}
# vim: set ai ts=4 sw=4 et ft=sh:

View File

@ -21,8 +21,7 @@ source "$OSBASH_LIB_DIR/virtualbox.functions"
source "$OSBASH_LIB_DIR/virtualbox.install_base"
function usage {
echo "Usage: $0 {-b|-w} [-g GUI] [-n] {basedisk|NODE [NODE..]}"
echo " $0 [-e EXPORT] [-n] NODE [NODE..]"
echo "Usage: $0 {-b|-w} [-g GUI] [-n] {basedisk|cluster}"
# Don't advertise export until it is working properly
#echo " $0 [-e EXPORT] [-n] NODE [NODE..]"
echo ""
@ -34,8 +33,9 @@ function usage {
#echo "-e EXPORT Export node VMs"
echo ""
echo "basedisk Build configured basedisk"
echo "NODE Build controller, compute, network, cluster [all three]"
echo " (builds basedisk if necessary)"
echo "cluster Build OpenStack cluster [all nodes]" \
"(and basedisk if necessary)"
echo
echo "GUI gui, sdl, or headless"
echo " (choose GUI type for VirtualBox)"
#echo "EXPORT ova (OVA package file) or dir (VM clone directory)"
@ -47,7 +47,7 @@ function print_config {
echo "Target is base disk: $BASE_DISK"
else
echo "Base disk: $BASE_DISK"
echo "Nodes: $nodes"
echo "Distribution name: $(get_distro_name "$DISTRO")"
fi
if [ -n "${EXPORT_OVA:-}" ]; then
@ -121,16 +121,8 @@ shift $(( OPTIND - 1 ));
if [ $# -eq 0 ]; then
# No argument given
usage
elif [ "$1" = basedisk ]; then
# Building the base disk only
CMD=$1
else
CMD=nodes
if [ "$1" = cluster ]; then
nodes="controller compute network"
else
nodes="$@"
fi
CMD=$1
fi
# Install over ssh by default
@ -213,10 +205,8 @@ MGMT_NET_IF=$(create_network "$MGMT_NET")
DATA_NET_IF=$(create_network "$DATA_NET")
API_NET_IF=$(create_network "$API_NET")
#-------------------------------------------------------------------------------
source "$OSBASH_LIB_DIR/virtualbox.install_node"
for node in $nodes; do
vm_build_node "$node"
done
source "$OSBASH_LIB_DIR/virtualbox.install_nodes"
vm_build_nodes "$CMD"
#-------------------------------------------------------------------------------
ENDTIME=$(date +%s)
echo >&2 "$(date) osbash finished successfully"