From 86e1462b62372e51995f07e6789ea451852d7ba1 Mon Sep 17 00:00:00 2001 From: Roger Luethi Date: Sun, 22 Feb 2015 16:40:35 +0100 Subject: [PATCH] labs: merge node-specific config/scripts.* files Currently, we have a file directing the build for each node. However, rebuilding just one node hardly ever makes sense, so we might as well remove the extra files and have the user just rebuild the cluster. With this patch, every distribution will have a basedisk scripts configuration file (as before) and one scripts configuration file for building the cluster (or several for building additional, different clusters). Change-Id: I7f50d49d7337b790416a2a76c86be510990859fd --- labs/config/config.base | 4 + labs/config/scripts.compute | 16 ---- labs/config/scripts.controller | 26 ------ labs/config/scripts.network | 15 ---- labs/config/scripts.ubuntu_cluster | 82 +++++++++++++++++++ labs/lib/osbash/functions.host | 27 ++++-- ....install_node => virtualbox.install_nodes} | 38 ++++----- labs/osbash.sh | 26 ++---- 8 files changed, 135 insertions(+), 99 deletions(-) create mode 100644 labs/config/config.base delete mode 100644 labs/config/scripts.compute delete mode 100644 labs/config/scripts.controller delete mode 100644 labs/config/scripts.network create mode 100644 labs/config/scripts.ubuntu_cluster rename labs/lib/osbash/{virtualbox.install_node => virtualbox.install_nodes} (86%) diff --git a/labs/config/config.base b/labs/config/config.base new file mode 100644 index 00000000..44ee0811 --- /dev/null +++ b/labs/config/config.base @@ -0,0 +1,4 @@ +# Base disk VM configuration. Used by osbash/wbatch (host and guest). + +# Port forwarding +VM_SSH_PORT=2229 diff --git a/labs/config/scripts.compute b/labs/config/scripts.compute deleted file mode 100644 index 3fcf3857..00000000 --- a/labs/config/scripts.compute +++ /dev/null @@ -1,16 +0,0 @@ -# Scripts for compute node -cmd init_node -cmd queue etc_hosts.sh -cmd queue osbash/enable_vagrant_ssh_keys.sh -cmd snapshot_cycle compute_node_init -cmd queue ubuntu/setup_nova_compute.sh -cmd snapshot_cycle nova-compute_installed -cmd queue ubuntu/setup_neutron_compute.sh -cmd queue ubuntu/setup_cinder_volumes.sh -cmd snapshot_cycle compute_node_installed -# Take snapshot of changes on controller VM, too -cmd queue shutdown_controller.sh -cmd boot -cmd wait_for_shutdown -n controller -cmd snapshot -n controller controller_-_compute_node_installed -cmd boot -n controller diff --git a/labs/config/scripts.controller b/labs/config/scripts.controller deleted file mode 100644 index 4492daff..00000000 --- a/labs/config/scripts.controller +++ /dev/null @@ -1,26 +0,0 @@ -# Scripts for controller node -cmd init_node -cmd queue etc_hosts.sh -cmd queue osbash/enable_vagrant_ssh_keys.sh -cmd snapshot_cycle controller_node_init -cmd queue ubuntu/apt_install_mysql.sh -cmd queue ubuntu/install_rabbitmq.sh -cmd snapshot_cycle pre-openstack_installed -cmd queue ubuntu/setup_keystone.sh -cmd snapshot_cycle keystone_installed -cmd queue ubuntu/setup_glance.sh -cmd snapshot_cycle glance_installed -cmd queue ubuntu/setup_nova_controller.sh -cmd snapshot_cycle nova-controller_installed -cmd queue ubuntu/setup_neutron_controller.sh -cmd snapshot_cycle neutron-controller_installed -cmd queue ubuntu/setup_cinder_controller.sh -cmd snapshot_cycle cinder_installed -cmd queue ubuntu/setup_horizon.sh -cmd snapshot_cycle horizon_installed -cmd queue config_external_network.sh -cmd queue config_tenant_network.sh -cmd snapshot_cycle openstack_networks_configured -cmd queue setup_lbaas_controller.sh -cmd snapshot_cycle controller_node_installed -cmd boot diff --git a/labs/config/scripts.network b/labs/config/scripts.network deleted file mode 100644 index de0c4eda..00000000 --- a/labs/config/scripts.network +++ /dev/null @@ -1,15 +0,0 @@ -# Scripts for network node -cmd init_node -cmd queue etc_hosts.sh -cmd queue osbash/enable_vagrant_ssh_keys.sh -cmd snapshot_cycle network_node_init -cmd queue ubuntu/setup_neutron_network.sh -cmd snapshot_cycle neutron_configured -cmd queue ubuntu/setup_lbaas_network.sh -cmd snapshot_cycle network_node_installed -# Take snapshot of changes on controller VM, too -cmd queue shutdown_controller.sh -cmd boot -cmd wait_for_shutdown -n controller -cmd snapshot -n controller controller_-_network_node_installed -cmd boot -n controller diff --git a/labs/config/scripts.ubuntu_cluster b/labs/config/scripts.ubuntu_cluster new file mode 100644 index 00000000..351abc6b --- /dev/null +++ b/labs/config/scripts.ubuntu_cluster @@ -0,0 +1,82 @@ +#============================================================================== +# Scripts for controller node +cmd init_node -n controller + +cmd queue etc_hosts.sh +cmd queue osbash/enable_vagrant_ssh_keys.sh +cmd snapshot_cycle -n controller controller_node_init + +cmd queue ubuntu/apt_install_mysql.sh +cmd queue ubuntu/install_rabbitmq.sh +cmd snapshot_cycle -n controller pre-openstack_installed + +cmd queue ubuntu/setup_keystone.sh +cmd snapshot_cycle -n controller keystone_installed + +cmd queue ubuntu/setup_glance.sh +cmd snapshot_cycle -n controller glance_installed + +cmd queue ubuntu/setup_nova_controller.sh +cmd snapshot_cycle -n controller nova-controller_installed + +cmd queue ubuntu/setup_neutron_controller.sh +cmd snapshot_cycle -n controller neutron-controller_installed + +cmd queue ubuntu/setup_cinder_controller.sh +cmd snapshot_cycle -n controller cinder_installed + +cmd queue ubuntu/setup_horizon.sh +cmd snapshot_cycle -n controller horizon_installed + +cmd queue config_external_network.sh +cmd queue config_tenant_network.sh +cmd snapshot_cycle -n controller openstack_networks_configured + +cmd queue setup_lbaas_controller.sh +cmd snapshot_cycle -n controller controller_node_installed + +cmd boot -n controller +#============================================================================== +# Scripts for compute node +cmd init_node -n compute + +cmd queue etc_hosts.sh +cmd queue osbash/enable_vagrant_ssh_keys.sh +cmd snapshot_cycle -n compute compute_node_init + +cmd queue ubuntu/setup_nova_compute.sh +cmd snapshot_cycle -n compute nova-compute_installed + +cmd queue ubuntu/setup_neutron_compute.sh +cmd queue ubuntu/setup_cinder_volumes.sh +cmd snapshot_cycle -n compute compute_node_installed +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Take snapshot of database changes on controller VM, too +cmd queue shutdown_controller.sh +cmd boot -n compute + +cmd wait_for_shutdown -n controller +cmd snapshot -n controller controller_-_compute_node_installed +cmd boot -n controller +#============================================================================== +# Scripts for network node +cmd init_node -n network + +cmd queue etc_hosts.sh +cmd queue osbash/enable_vagrant_ssh_keys.sh +cmd snapshot_cycle -n network network_node_init + +cmd queue ubuntu/setup_neutron_network.sh +cmd snapshot_cycle -n network neutron_configured + +cmd queue ubuntu/setup_lbaas_network.sh +cmd snapshot_cycle -n network network_node_installed +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +# Take snapshot of database changes on controller VM, too +cmd queue shutdown_controller.sh +cmd boot -n network + +cmd wait_for_shutdown -n controller +cmd snapshot -n controller controller_-_network_node_installed +cmd boot -n controller +#============================================================================== diff --git a/labs/lib/osbash/functions.host b/labs/lib/osbash/functions.host index 9a54123e..2fe7b3ec 100644 --- a/labs/lib/osbash/functions.host +++ b/labs/lib/osbash/functions.host @@ -38,6 +38,16 @@ function get_base_disk_path { echo "$DISK_DIR/$(get_base_disk_name)" } +# From DISTRO string (e.g., ubuntu-14.04-server-amd64), get first component +function get_distro_name { + # Match up to first dash + local re='([^-]*)' + + if [[ $DISTRO =~ $re ]]; then + echo "${BASH_REMATCH[1]}" + fi +} + #------------------------------------------------------------------------------- # ssh #------------------------------------------------------------------------------- @@ -150,7 +160,13 @@ function ssh_exec_script { # Wait for sshd, prepare autostart dirs, and execute autostart scripts on VM function ssh_process_autostart { - local ssh_port=$1 + # Run this function in sub-shell to protect our caller's environment + # (which might be _our_ enviroment if we get called again) + ( + + source "$CONFIG_DIR/config.$vm_name" + + local ssh_port=$VM_SSH_PORT wait_for_ssh "$ssh_port" vm_ssh "$ssh_port" "rm -rf lib config autostart" @@ -162,6 +178,8 @@ function ssh_process_autostart { rm -f "$script_path" >&2 done touch "$STATUS_DIR/done" + + ) } #------------------------------------------------------------------------------- @@ -317,9 +335,8 @@ function command_from_config { # Boot with queued autostart files now, wait for end of scripts # processing get_cmd_options $args - echo >&2 "VM_UI=$vm_ui _vbox_boot_with_autostart $vm_name " \ - "$VM_SSH_PORT" - VM_UI=$vm_ui _vbox_boot_with_autostart "$vm_name" "$VM_SSH_PORT" + echo >&2 "VM_UI=$vm_ui _vbox_boot_with_autostart $vm_name" + VM_UI=$vm_ui _vbox_boot_with_autostart "$vm_name" ;; snapshot) # Format: snapshot [-n ] @@ -341,7 +358,7 @@ function command_from_config { local shot_name=$args echo >&2 snapshot_cycle "$vm_name" "$shot_name" _autostart_queue "osbash/shutdown.sh" - _vbox_boot_with_autostart "$vm_name" "$VM_SSH_PORT" + _vbox_boot_with_autostart "$vm_name" vm_wait_for_shutdown "$vm_name" vm_snapshot "$vm_name" "$shot_name" ;; diff --git a/labs/lib/osbash/virtualbox.install_node b/labs/lib/osbash/virtualbox.install_nodes similarity index 86% rename from labs/lib/osbash/virtualbox.install_node rename to labs/lib/osbash/virtualbox.install_nodes index f740915d..6fd72e6e 100644 --- a/labs/lib/osbash/virtualbox.install_node +++ b/labs/lib/osbash/virtualbox.install_nodes @@ -24,13 +24,12 @@ function _vbox_configure_ifs { # Boot node VM; wait until autostart files are processed and VM is shut down function _vbox_boot_with_autostart { local vm_name=$1 - local ssh_port=$2 vbox_boot "$vm_name" # Wait for ssh connection and execute scripts in autostart directory # (for wbatch, osbashauto does the processing instead) - ${WBATCH:+:} ssh_process_autostart "$ssh_port" & + ${WBATCH:+:} ssh_process_autostart "$vm_name" & wait_for_autofiles echo >&2 "VM \"$vm_name\": autostart files executed" @@ -38,6 +37,11 @@ function _vbox_boot_with_autostart { # Create a new node VM and run basic configuration scripts function vm_init_node { + # XXX Run this function in sub-shell to protect our caller's environment + # (which might be _our_ enviroment if we get called again) + ( + source "$CONFIG_DIR/config.$vm_name" + vm_name=$1 vm_create "$vm_name" @@ -62,25 +66,21 @@ function vm_init_node { vm_attach_disk_multi "$vm_name" "$BASE_DISK" #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Rename to pass the node name to the script - autostart_and_rename osbash init_xxx_node.sh "init_${NODE_NAME}_node.sh" -} - -function vm_build_node { - # XXX Run this function in sub-shell to protect our caller's environment - # (which might be _our_ enviroment if we get called again) - ( - - NODE_NAME=$1 - source "$CONFIG_DIR/config.$NODE_NAME" - - ${WBATCH:-:} wbatch_begin_node "$NODE_NAME" - #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - autostart_reset - autostart_from_config "scripts.$NODE_NAME" - #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ${WBATCH:-:} wbatch_end_file + autostart_and_rename osbash init_xxx_node.sh "init_${vm_name}_node.sh" ) } +function vm_build_nodes { + CONFIG_NAME=$(get_distro_name "$DISTRO")_$1 + echo "Configuration file: $CONFIG_NAME" + + ${WBATCH:-:} wbatch_begin_node "$CONFIG_NAME" + #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + autostart_reset + autostart_from_config "scripts.$CONFIG_NAME" + #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + ${WBATCH:-:} wbatch_end_file +} + # vim: set ai ts=4 sw=4 et ft=sh: diff --git a/labs/osbash.sh b/labs/osbash.sh index a7d7e415..fd0f0e93 100755 --- a/labs/osbash.sh +++ b/labs/osbash.sh @@ -21,8 +21,7 @@ source "$OSBASH_LIB_DIR/virtualbox.functions" source "$OSBASH_LIB_DIR/virtualbox.install_base" function usage { - echo "Usage: $0 {-b|-w} [-g GUI] [-n] {basedisk|NODE [NODE..]}" - echo " $0 [-e EXPORT] [-n] NODE [NODE..]" + echo "Usage: $0 {-b|-w} [-g GUI] [-n] {basedisk|cluster}" # Don't advertise export until it is working properly #echo " $0 [-e EXPORT] [-n] NODE [NODE..]" echo "" @@ -34,8 +33,9 @@ function usage { #echo "-e EXPORT Export node VMs" echo "" echo "basedisk Build configured basedisk" - echo "NODE Build controller, compute, network, cluster [all three]" - echo " (builds basedisk if necessary)" + echo "cluster Build OpenStack cluster [all nodes]" \ + "(and basedisk if necessary)" + echo echo "GUI gui, sdl, or headless" echo " (choose GUI type for VirtualBox)" #echo "EXPORT ova (OVA package file) or dir (VM clone directory)" @@ -47,7 +47,7 @@ function print_config { echo "Target is base disk: $BASE_DISK" else echo "Base disk: $BASE_DISK" - echo "Nodes: $nodes" + echo "Distribution name: $(get_distro_name "$DISTRO")" fi if [ -n "${EXPORT_OVA:-}" ]; then @@ -121,16 +121,8 @@ shift $(( OPTIND - 1 )); if [ $# -eq 0 ]; then # No argument given usage -elif [ "$1" = basedisk ]; then - # Building the base disk only - CMD=$1 else - CMD=nodes - if [ "$1" = cluster ]; then - nodes="controller compute network" - else - nodes="$@" - fi + CMD=$1 fi # Install over ssh by default @@ -213,10 +205,8 @@ MGMT_NET_IF=$(create_network "$MGMT_NET") DATA_NET_IF=$(create_network "$DATA_NET") API_NET_IF=$(create_network "$API_NET") #------------------------------------------------------------------------------- -source "$OSBASH_LIB_DIR/virtualbox.install_node" -for node in $nodes; do - vm_build_node "$node" -done +source "$OSBASH_LIB_DIR/virtualbox.install_nodes" +vm_build_nodes "$CMD" #------------------------------------------------------------------------------- ENDTIME=$(date +%s) echo >&2 "$(date) osbash finished successfully"