
Add script launch_instance.sh for basic instance VM tests. The script tries to deal with a number of failures that have turned up in testing (e.g., services failing to start, instance not launching). The changeset includes three scripts in a new tools directory. 1) To run a test once, use test-once.sh: $ ./tools/test-once.sh scripts/test/launch_instance.sh 2) To restore (and boot) the cluster to an earlier snapshot, use restore-cluster.sh. The argument selects the snapshot used for the controller node VM. To select the most recently used snapshot: $ ./tools/restore-cluster.sh current To select the controller snapshot, "controller_node_installed": $ ./tools/restore-cluster.sh controller_node_installed 3) To run the same test repeatedly, use repeat-test.sh. The test script name is hard-coded (launch_instance.sh). The argument determines whether the cluster is rebuilt for each test or if a snapshot of the cluster is restored. The controller snapshot is hardcoded (controller_node_installed); this particular snapshot is of interest because it does not seem to result in a reliable cluster. Log files are stored in log/test-results. Repeat-test.sh also saves log files from each node's /var/log/upstart to help with analyzing failures. $ ./tools/repeat-test.sh restore After running a number of tests, you can get some simple stats using a command like this: $ grep -h SUM log/test-results/*/test.log|LC_ALL=C sort|uniq -c Co-Author: Pranav Salunke <dguitarbite@gmail.com> Change-Id: I20b7273683b281bf7822ef66e311b955b8c5ec8a
76 lines
1.9 KiB
Bash
Executable File
76 lines
1.9 KiB
Bash
Executable File
#!/bin/bash
|
|
set -o errexit -o nounset
|
|
TOP_DIR=$(cd "$(dirname "$0")/.." && pwd)
|
|
source "$TOP_DIR/config/paths"
|
|
source "$CONFIG_DIR/deploy.osbash"
|
|
source "$OSBASH_LIB_DIR/functions.host"
|
|
|
|
CONTROLLER_VM=controller
|
|
NETWORK_VM=network
|
|
COMPUTE_VM=compute
|
|
|
|
function usage {
|
|
# Setting to empty string selects latest (current snapshot)
|
|
echo "Usage: $0 {current|<controller_snapshot_name>} {list-snapshots}"
|
|
echo " current: restore to currently active snapshot"
|
|
echo " list-snapshots: to list the snapshots of the vms"
|
|
exit
|
|
}
|
|
|
|
function cluster_restore {
|
|
vboxmanage controlvm $CONTROLLER_VM poweroff >/dev/null 2>&1 || rc=$?
|
|
sleep 1
|
|
if [ -n "$CONTROLLER_SNAPSHOT" ]; then
|
|
echo "Restoring $CONTROLLER_SNAPSHOT."
|
|
vboxmanage snapshot $CONTROLLER_VM restore "$CONTROLLER_SNAPSHOT"
|
|
else
|
|
echo "Restoring current snapshot."
|
|
vboxmanage snapshot $CONTROLLER_VM restorecurrent
|
|
fi
|
|
|
|
vboxmanage controlvm $COMPUTE_VM poweroff >/dev/null 2>&1 || rc=$?
|
|
sleep 1
|
|
vboxmanage snapshot $COMPUTE_VM restorecurrent
|
|
|
|
vboxmanage controlvm $NETWORK_VM poweroff >/dev/null 2>&1 || rc=$?
|
|
sleep 1
|
|
vboxmanage snapshot $NETWORK_VM restorecurrent
|
|
}
|
|
|
|
function cluster_start {
|
|
vboxmanage startvm $CONTROLLER_VM -t headless
|
|
vboxmanage startvm $COMPUTE_VM -t headless
|
|
vboxmanage startvm $NETWORK_VM -t headless
|
|
}
|
|
|
|
function list_snapshots {
|
|
|
|
for node in $CONTROLLER_VM $COMPUTE_VM $NETWORK_VM; do
|
|
echo -e "\n$node node's Snapshot"
|
|
vboxmanage snapshot $node list
|
|
echo
|
|
echo
|
|
sleep 1
|
|
done
|
|
|
|
exit 0
|
|
}
|
|
|
|
# Call the main brains
|
|
if [ $# -eq 0 ]; then
|
|
usage
|
|
elif [ "$1" = "list-snapshots" ]; then
|
|
list_snapshots
|
|
elif [ "$1" = "current" ]; then
|
|
CONTROLLER_SNAPSHOT=""
|
|
else
|
|
CONTROLLER_SNAPSHOT=$1
|
|
fi
|
|
|
|
|
|
echo "Restoring cluster snapshots."
|
|
cluster_restore
|
|
|
|
echo "Starting VMs."
|
|
cluster_start >/dev/null
|