diff --git a/doc/training-guides/basic-install-guide/lab_compute-node.xml b/doc/training-guides/basic-install-guide/lab_compute-node.xml
index 2adeca7a..28ed830f 100644
--- a/doc/training-guides/basic-install-guide/lab_compute-node.xml
+++ b/doc/training-guides/basic-install-guide/lab_compute-node.xml
@@ -24,18 +24,90 @@
>https://docs.google.com/drawings/d/1GX3FXmkz3c_tUDpZXUVMpyIxicWuHs5fNsHvYNjwNNk/edit?usp=sharing
Vboxnet0, Vboxnet1, Vboxnet2 - are virtual networks setup up by virtual
- box with your host machine. This is the way your host can
+ >Vboxnet2 - are virtual networks set up up by VirtualBox
+ with your host machine. This is the way the host can
communicate with the virtual machines. These networks are in turn
- used by virtual box VM’s for OpenStack networks, so that
+ used by VirtualBox VMs for OpenStack networks, so that
OpenStack’s services can communicate with each other.
- Compute Node
- Start your Controller Node (the one you setup in the previous
- section).
- Preparing Ubuntu 12.04
+ Compute node
+ Start the controller node which was set up in a previous section.
+
+ On reboot the node VM may lose internet and network
+ connectivity. Restart the networking service and use the
+ ping command to verify the network
+ connectivity for the given VM.
+
+
+ Take regular snapshots of the VirtualBox virtual machines after
+ each section. In case the VM is broken, you may revert back to the
+ snapshot to save time and effort.
+
+ Controller node
+
+ Start the controller node which was set up in a previous section.
+
+ Preparing Ubuntu 14.04
+ Networking:
+ Configure your network by editing the
+ /etc/network/interfaces file
- After you install Ubuntu Server, go in sudo mode
+ Open /etc/network/interfaces and edit the
+ file as mentioned:
+
+# This file is for the OpenStack compute node for OpenStack training project.
+# Note: Selection of the IP addresses is important.
+# Any changes to the IP addresses may break OpenStack related services.
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface - VirtualBox NAT connection
+# (VirtualBox Network Adapter 1)
+auto eth0
+iface eth0 inet dhcp
+
+# VirtualBox vboxnet0 - OpenStack management network
+# (VirtualBox Network Adapter 2)
+auto eth1
+iface eth1 inet static
+address 10.10.10.53
+netmask 255.255.255.0
+
+# VirtualBox vboxnet2 - OpenStack VM data/communication network
+# (VirtualBox Network Adapter 3)
+auto eth2
+iface eth2 inet static
+address 10.20.20.53
+netmask 255.255.255.0
+
+
+
+ After saving the interfaces file, restart the networking
+ service:
+ # service networking restart
+ # ifconfig
+
+
+ The expected network interface should match with the required IP
+ addresses as configured above.
+
+
+ SSH from host
+
+
+ To SSH into the compute node from the host machine, type the
+ command mentioned below.
+ $ ssh compute@10.10.10.51
+ $ sudo su
+
+
+
+ Preparing Ubuntu 14.04
+
+
+ After installing Ubuntu Server, switch to the root user
$ sudo su
@@ -43,35 +115,44 @@
Add Icehouse repositories:
- # apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
- # echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/icehouse main >> /etc/apt/sources.list.d/icehouse.list
+ #apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
+ #add-apt-repository cloud-archive:icehouse
Update your system:
- # apt-get update
- # apt-get upgrade
- # apt-get dist-update
+ #apt-get update
+ #apt-get upgrade
+ #apt-get dist-upgrade
- Install NTP and other services:
+ Restart the machine for the changes to apply
+ # reboot
+
+
+ Install vlan and bridge-utils packages:
+ # apt-get install vlan bridge-utils
+
+
+ Install NTP:
- # apt-get install ntp vlan bridge-utils
+ # apt-get install ntp
- Configure NTP Server to Controller Node:
-
- # sed -i 's/server 0.ubuntu.pool.ntp.org/#server0.ubuntu.pool.ntp.org/g' /etc/ntp.conf
- # sed -i 's/server 1.ubuntu.pool.ntp.org/#server1.ubuntu.pool.ntp.org/g' /etc/ntp.conf
- # sed -i 's/server 2.ubuntu.pool.ntp.org/#server2.ubuntu.pool.ntp.org/g' /etc/ntp.conf
- # sed -i 's/server 3.ubuntu.pool.ntp.org/#server3.ubuntu.pool.ntp.org/g' /etc/ntp.conf
-
-
-
- Enable IP Forwarding by adding the following to /etc/sysctl.conf
+ Configure NTP Server to controller node:
+
+ # sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 3.ubuntu.pool.ntp.org/#server 3.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server ntp.ubuntu.com/server 10.10.10.51/g'/etc/ntp.conf
+
+
+
+ Enable IP forwarding by adding the following to /etc/sysctl.conf:
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
@@ -87,202 +168,119 @@ net.ipv4.conf.default.rp_filter=0
# sysctl -p
-
- KVM
-
+
+Nova and KVM
+
- Install KVM:
-
- # apt-get install -y kvm libvirt-bin pm-utils
-
+ Install the Compute packages:
+ #apt-get install nova-compute-kvm python-guestfs
+ #dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-$(uname -r)
- Edit /etc/libvirt/qemu.conf
- cgroup_device_acl = [
-"/dev/null", "/dev/full", "/dev/zero",
-"/dev/random", "/dev/urandom",
-"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
-"/dev/rtc", "/dev/hpet","/dev/net/tun"
-]
-
-
- Delete Default Virtual Bridge
-
- # virsh net-destroy default
- # virsh net-undefine default
-
-
-
- To Enable Live Migration Edit /etc/libvirt/libvirtd.conf
- listen_tls = 0
-listen_tcp = 1
-auth_tcp = "none"
-
-
- Edit /etc/init/libvirt-bin.conf
- env libvirtd_opts="-d -l"
-
-
- Edit /etc/default/libvirt-bin
- libvirtd_opts="-d -l"
-
-
- Restart libvirt
-
- # service dbus restart
- # service libvirt-bin restart
-
-
-
- Neutron and OVS
-
-
- Install Open vSwitch
- # apt-get install -y openvswitch-switch openvswitch-datapath-dkms
-
-
- Create bridges:
-
- # ovs-vsctl add-br br-int
-
-
-
- Neutron
- Install the Neutron Open vSwitch agent:
-
- # apt-get -y install neutron-plugin-openvswitch-agent
-
-
-
- Edit /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
- #Under the database section
+ Configure /etc/nova/nova.conf
+ [DEFAULT]
+auth_strategy = keystone
+rpc_backend = rabbit
+rabbit_host = 10.10.10.51
+my_ip = 10.10.10.53
+vnc_enabled = True
+vncserver_listen = 0.0.0.0
+vncserver_proxyclient_address = 10.10.10.53
+novncproxy_base_url = http://10.10.10.51:6080/vnc_auto.html
+glance_host = 10.10.10.51
+network_api_class = nova.network.neutronv2.api.API
+neutron_url = http://10.10.10.51:9696
+neutron_auth_strategy = keystone
+neutron_admin_tenant_name = service
+neutron_admin_username = neutron
+neutron_admin_password = service_pass
+neutron_admin_auth_url = http://10.10.10.51:35357/v2.0
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
[database]
-connection = mysql://neutronUser:neutronPass@10.10.10.51/neutron
-#Under the OVS section
-[ovs]
-tenant_network_type = gre
-tunnel_id_ranges = 1:1000
-integration_bridge = br-int
-tunnel_bridge = br-tun
-local_ip = 10.10.10.53
-enable_tunneling = True
-tunnel_type=gre
-[agent]
-tunnel_types = gre
-#Firewall driver for realizing quantum security group function
-[SECURITYGROUP]
-firewall_driver =
-neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-
- Edit /etc/neutron/neutron.conf
- rabbit_host = 10.10.10.51
-#And update the keystone_authtoken section
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://novaUSER:novaPass@10.10.10.51/nova
+
[keystone_authtoken]
-auth_host = 10.10.10.51
-auth_port = 35357
-auth_protocol = http
-admin_tenant_name = service
-admin_user = quantum
-admin_password = service_pass
-signing_dir = /var/lib/quantum/keystone-signing
-[database]
-connection = mysql://neutronUser:neutronPass@192.168.100.51/neutron
-
-
- Restart all the services:
-
- # service neutron-plugin-openvswitch-agent restart
-
-
-
- Nova
-
-
- Install Nova
-
- # apt-get install nova-compute-kvm python-guestfs
- # chmod 0644 /boot/vmlinuz*
-
-
-
- Edit /etc/nova/api-paste.ini
- [filter:authtoken]
-paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+auth_uri = http://10.10.10.51:5000
auth_host = 10.10.10.51
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
-admin_password = service_pass
-signing_dirname = /tmp/keystone-signing-nova
-# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
-auth_version = v2.0
+admin_password = service_pass
- Edit /etc/nova/nova-compute.conf
+ Edit /etc/nova/nova-compute.conf
+ [libvirt]
+virt_type = qemu
+
+
+ Restart Nova Compute Service
+ #service nova-compute restart
+
+
+
+ Neutron and OVS
+
+
+ Install Open vSwitch
+ # apt-get install -y neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent
+
+
+ Edit /etc/neutron/plugins/ml2/ml2_conf.ini
+ [ml2]
+type_drivers = gre
+tenant_network_types = gre
+mechanism_drivers = openvswitch
+
+[ml2_type_gre]
+tunnel_id_ranges = 1:1000
+
+[ovs]
+local_ip = 10.20.20.53
+tunnel_type = gre
+enable_tunneling = True
+
+[securitygroup]
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+
+ Edit /etc/neutron/neutron.conf
[DEFAULT]
-libvirt_type=qemu
-libvirt_ovs_bridge=br-int
-libvirt_vif_type=ethernet
-libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
-libvirt_use_virtio_for_bridges=True
+auth_strategy = keystone
+rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rabbit_host = 10.10.10.51
+rabbit_password = RABBIT_PASS
+core_plugin = ml2
+service_plugins = router
+allow_overlapping_ips = True
+
+[keystone_authtoken]
+...
+auth_uri = http://10.10.10.51:5000
+auth_host = 10.10.10.51
+auth_protocol = http
+auth_port = 35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = service_pass
- Edit /etc/nova/nova.conf
- [DEFAULT]
-logdir=/var/log/nova
-state_path=/var/lib/nova
-lock_path=/run/lock/nova
-verbose=True
-api_paste_config=/etc/nova/api-paste.ini
-compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
-rabbit_host=10.10.10.51
-nova_url=http://10.10.10.51:8774/v1.1/
-sql_connection=mysql://novaUser:novaPass@10.10.10.51/nova
-root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
-# Auth
-use_deprecated_auth=false
-auth_strategy=keystone
-# Imaging service
-glance_api_servers=10.10.10.51:9292
-image_service=nova.image.glance.GlanceImageService
-# Vnc configuration
-novnc_enabled=true
-novncproxy_base_url=http://10.10.10.51:6080/vnc_auto.html
-novncproxy_port=6080
-vncserver_proxyclient_address=10.10.10.53
-vncserver_listen=0.0.0.0
-# Network settings
-network_api_class=nova.network.neutronv2.api.API
-neutron_url=http://10.10.10.51:9696
-neutron_auth_strategy=keystone
-neutron_admin_tenant_name=service
-neutron_admin_username=neutron
-neutron_admin_password=service_pass
-neutron_admin_auth_url=http://10.10.10.51:35357/v2.0
-libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
-linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
-#If you want Neutron + Nova Security groups
-firewall_driver=nova.virt.firewall.NoopFirewallDriver
-security_group_api=neutron
-#If you want Nova Security groups only, comment the two lines above and uncomment line -1-.
-#-1-firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
-#Metadata
-service_neutron_metadata_proxy = True
-neutron_metadata_proxy_shared_secret = helloOpenStack
-# Compute #
-compute_driver=libvirt.LibvirtDriver
-# Cinder #
-volume_api_class=nova.volume.cinder.API
-osapi_volume_listen_port=5900
-cinder_catalog_info=volume:cinder:internalURL
+ Restart all the services:
+
+ # service openvswitch-switch restart
+
- Restart nova services
- # cd /etc/init.d/; for i in $( ls nova-* ); do service $i restart; done
+ Add the integration bridge
+ #ovs-vsctl add-br br-int
+ Nova
List nova services (Check for the Smiley Faces to know if the services are running):
# nova-manage service list
diff --git a/doc/training-guides/basic-install-guide/lab_control-node.xml b/doc/training-guides/basic-install-guide/lab_control-node.xml
index 16187535..4c9982eb 100644
--- a/doc/training-guides/basic-install-guide/lab_control-node.xml
+++ b/doc/training-guides/basic-install-guide/lab_control-node.xml
@@ -140,7 +140,23 @@ netmask 255.255.255.0
# reboot
- My SQL
+ Install vlan and bridge-utils packages:
+
+ # apt-get install vlan bridge-utils
+
+ Install NTP:
+
+ # apt-get install ntp
+
+ Configure NTP Server to controller node:
+
+ # sed -i 's/server 0.ubuntu.pool.ntp.org/#server 0.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 1.ubuntu.pool.ntp.org/#server 1.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 2.ubuntu.pool.ntp.org/#server 2.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server 3.ubuntu.pool.ntp.org/#server 3.ubuntu.pool.ntp.org/g' /etc/ntp.conf
+ # sed -i 's/server ntp.ubuntu.com/server 10.10.10.51/g'/etc/ntp.conf
+
+ MySQL
Install MySQL: