From ba268f3a8ba4e500b6b7a6c8c70987a1246ff2b9 Mon Sep 17 00:00:00 2001 From: Pranav Salunke Date: Mon, 18 Aug 2014 16:31:00 +0530 Subject: [PATCH] Updates Controller Node Updates to controller node. Mostly fix ML2 plugin for neutron and also update the configuration files and install commands as per the changes in OpenStack with reference to install-guides in openstack-manuals. Change-Id: I0d6fa91b4245ed38ff86d86724bd6cc11d4a32dc --- .../basic-install-guide/lab_control-node.xml | 356 ++++++------------ 1 file changed, 125 insertions(+), 231 deletions(-) diff --git a/doc/training-guides/basic-install-guide/lab_control-node.xml b/doc/training-guides/basic-install-guide/lab_control-node.xml index 0c6fe8e6..36f90c2e 100644 --- a/doc/training-guides/basic-install-guide/lab_control-node.xml +++ b/doc/training-guides/basic-install-guide/lab_control-node.xml @@ -68,20 +68,20 @@ iface lo inet loopback # The primary network interface - Virtual Box NAT connection # (Virtual Box Network Adapter 3) -auto eth2 -iface eth2 inet dhcp +auto eth0 +iface eth0 inet dhcp # Virtual Box vboxnet0 - OpenStack Management Network # (Virtual Box Network Adapter 1) -auto eth0 -iface eth0 inet static +auto eth1 +iface eth1 inet static address 10.10.10.51 netmask 255.255.255.0 # Virtual Box vboxnet2 - for exposing OpenStack API over external network # (Virtual Box Network Adapter 2) -auto eth1 -iface eth1 inet static +auto eth2 +iface eth2 inet static address 192.168.100.51 netmask 255.255.255.0 @@ -188,29 +188,6 @@ netmask 255.255.255.0 Other - - - Install other services: - # apt-get install -y vlan bridge-utils - - - Enable IP_Forwarding: - # sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf - - - Also add the following two lines - into/etc/sysctl.conf: - net.ipv4.conf.all.rp_filter=0 - net.ipv4.conf.default.rp_filter=0 - - - To save you from reboot, perform the following - # sysctl net.ipv4.ip_forward=1 - # sysctl net.ipv4.conf.all.rp_filter=0 - # sysctl net.ipv4.conf.default.rp_filter=0 - # sysctl -p - - Keystone Keystone is an OpenStack project that provides Identity, Token, Catalog and Policy services for use specifically by @@ -336,7 +313,13 @@ admin_password = service_pass Update the /etc/glance/glance-api.conf: - sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance + +[DEFAULT] +rpc_backend = rabbit +rabbit_host = 10.10.10.51 + +[database] +sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance [keystone_authtoken] auth_host = 10.10.10.51 auth_port = 35357 @@ -351,7 +334,8 @@ flavor = keystone Update the /etc/glance/glance-registry.conf: - sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance + [database] +sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance [keystone_authtoken] auth_host = 10.10.10.51 auth_port = 35357 @@ -382,7 +366,74 @@ flavor = keystone $ glance image-list - Neutron + Nova + Nova is the project name for OpenStack Compute, a cloud + computing fabric controller, the main part of an IaaS system. + Individuals and organizations can use Nova to host and manage + their own cloud computing systems. Nova originated as a project + out of NASA Ames Research Laboratory. + Nova is written with the following design guidelines in + mind: + Install nova components: + + # apt-get install -y nova-api nova-cert nova-conductor nova-consoleauth nova-novncproxy nova-scheduler python-novaclient + + + + Edit /etc/nova/nova.conf + [database] +connection = mysql://novaUser:novaPass@10.10.10.51/nova + +[DEFAULT] +rpc_backend = rabbit +rabbit_host = 10.10.10.51 +my_ip = 10.10.10.51 +vncserver_listen = 10.10.10.51 +vncserver_proxyclient_address = 10.10.10.51 +auth_strategy = keystone + +network_api_class = nova.network.neutronv2.api.API +neutron_url = http://10.10.10.51:9696 +neutron_auth_strategy = keystone +neutron_admin_tenant_name = service +neutron_admin_username = neutron +neutron_admin_password = service_pass +neutron_admin_auth_url = http://10.10.10.51:35357/v2.0 +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +[keystone_authtoken] +auth_uri = http://10.10.10.51:5000 +auth_host = controller +auth_port = 35357 +auth_protocol = http +admin_tenant_name = service +admin_user = nova +admin_password = service_pass + + + + Synchronize your database: + # nova-manage db sync + + + Restart nova-* services (all nova services): + #service nova-api restart + #service nova-cert restart + #service nova-consoleauth restart + #service nova-scheduler restart + #service nova-conductor restart + #service nova-novncproxy restart + + + Check for the smiling faces on + nova-* services to confirm your + installation: + # nova-manage service list + + +Neutron Neutron is an OpenStack project to provide “network connectivity as a service" between interface devices (e.g., vNICs) managed by other OpenStack services (e.g., nova). @@ -390,26 +441,23 @@ flavor = keystone Install the Neutron Server and the Open vSwitch package collection: - # apt-get install -y neutron-server + # apt-get install -y neutron-server neutron-plugin-ml2 Edit the - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini: - [database] -connection = mysql://neutronUser:neutronPass@10.10.10.51/neutron + /etc/neutron/plugins/ml2/ml2_conf.ini: + [ml2] +type_drivers = gre +tenant_network_types = gre +mechanism_drivers = openvswitch -#Under the OVS section -[ovs] -tenant_network_type = gre +[ml2_type_gre] tunnel_id_ranges = 1:1000 -enable_tunneling = True -[agent] -tunnel_types = gre -#Firewall driver for realizing neutron security group function [securitygroup] firewall_driver = -neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True Edit the @@ -428,7 +476,18 @@ admin_password = service_pass Edit the /etc/neutron/neutron.conf: - rabbit_host = 10.10.10.51 + [DEFAULT] +auth_strategy = keystone +rpc_backend = neutron.openstack.common.rpc.impl_kombu +rabbit_host = controller +notify_nova_on_port_status_changes = True +notify_nova_on_port_data_changes = True +nova_url = http://controller:8774/v2 +nova_admin_username = nova +nova_admin_tenant_id = SERVICE_TENANT_ID +nova_admin_password = NOVA_PASS +nova_admin_auth_url = http://controller:35357/v2.0 + [keystone_authtoken] auth_host = 10.10.10.51 auth_port = 35357 @@ -442,210 +501,53 @@ signing_dir = /var/lib/neutron/keystone-signing connection = mysql://neutronUser:neutronPass@10.10.10.51/neutron + Restart Nova Services + # service nova-api restart + # service nova-scheduler restart + # service nova-conductor restart Restart Neutron services: # service neutron-server restart - Nova - Nova is the project name for OpenStack Compute, a cloud - computing fabric controller, the main part of an IaaS system. - Individuals and organizations can use Nova to host and manage - their own cloud computing systems. Nova originated as a project - out of NASA Ames Research Laboratory. - Nova is written with the following design guidelines in - mind: - - - Component based architecture: Quickly adds new - behaviors. - - - Highly available: Scales to very serious workloads. - - - Fault-Tolerant: Isolated processes avoid cascading - failures. - - - Recoverable: Failures should be easy to diagnose, debug, - and rectify. - - - Open standards: Be a reference implementation for a - community-driven api. - - - API compatibility: Nova strives to be API-compatible with - popular systems like Amazon EC2. - - - Install nova components: - # apt-get install -y nova-novncproxy novnc nova-api nova-ajax-console-proxy nova-cert nova-conductor nova-consoleauth nova-doc nova-scheduler python-novaclient - - - Edit /etc/nova/api-paste.ini - [filter:authtoken] -paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory -auth_host = 10.10.10.51 -auth_port = 35357 -auth_protocol = http -admin_tenant_name = service -admin_user = nova -admin_password = service_pass -signing_dir = /tmp/keystone-signing-nova -# Workaround for https://bugs.launchpad.net/nova/+bug/1154809 -auth_version = v2.0 - - - Edit /etc/nova/nova.conf - [DEFAULT] -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/run/lock/nova -verbose=True -api_paste_config=/etc/nova/api-paste.ini -compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler -rabbit_host=10.10.10.51 -nova_url=http://10.10.10.51:8774/v1.1/ -sql_connection=mysql://novaUser:novaPass@10.10.10.51/nova -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf - -# Auth -use_deprecated_auth=false -auth_strategy=keystone - -# Imaging service -glance_api_servers=10.10.10.51:9292 -image_service=nova.image.glance.GlanceImageService - -# Vnc configuration -novnc_enabled=true -novncproxy_base_url=http://192.168.1.51:6080/vnc_auto.html -novncproxy_port=6080 -vncserver_proxyclient_address=10.10.10.51 -vncserver_listen=0.0.0.0 - -# Network settings -network_api_class=nova.network.neutronv2.api.API -neutron_url=http://10.10.10.51:9696 -neutron_auth_strategy=keystone -neutron_admin_tenant_name=service -neutron_admin_username=neutron -neutron_admin_password=service_pass -neutron_admin_auth_url=http://10.10.10.51:35357/v2.0 -libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver -linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver - -#If you want Neutron + Nova Security groups -firewall_driver=nova.virt.firewall.NoopFirewallDriver -security_group_api=neutron -#If you want Nova Security groups only, comment the two lines above and -uncomment line -1-. -#-1-firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver - -#Metadata -service_neutron_metadata_proxy = True -neutron_metadata_proxy_shared_secret = helloOpenStack - -# Compute # -compute_driver=libvirt.LibvirtDriver - -# Cinder # -volume_api_class=nova.volume.cinder.API -osapi_volume_listen_port=5900 - - - Synchronize your database: - # nova-manage db sync - - - Restart nova-* services (all nova services): - # cd /etc/init.d/; for i in $( ls nova-* ); do service $i restart; done - - - Check for the smiling faces on nova-* services to confirm your - installation: - # nova-manage service list - - Cinder Cinder is an OpenStack project that provides “block storage as a service”. - - Component based architecture: Quickly adds new - behavior. - - - Highly available: Scales to very serious workloads. - - - Fault-Tolerant: Isolated processes avoid cascading - failures. - - - Recoverable: Failures should be easy to diagnose, debug - and rectify. - - - Open standards: Be a reference implementation for a - community-driven API. - - - API compatibility: Cinder strives to be API-compatible - with popular systems like Amazon EC2. - Install Cinder components: - # apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms + # apt-get install -y cinder-api cinder-scheduler cinder-volume lvm2 - Configure the iSCSI services: - # sed -i 's/false/true/g' /etc/default/iscsitarget - - - Restart the services: - # service iscsitarget start - # service open-iscsi start - - - Edit - /etc/cinder/api-paste.ini: - [filter:authtoken] -paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory -service_protocol = http -service_host = 192.168.100.51 -service_port = 5000 + Edit /etc/cinder/cinder.conf: + [database] +sql_connection = mysql://cinderUser:cinderPass@10.10.10.51/cinder + +[keystone_authtoken] +auth_uri = http://10.10.10.51:5000 auth_host = 10.10.10.51 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = cinder admin_password = service_pass -signing_dir = /var/lib/cinder - - - Edit /etc/cinder/cinder.conf: - [DEFAULT] -rootwrap_config=/etc/cinder/rootwrap.conf -sql_connection = mysql://cinderUser:cinderPass@10.10.10.51/cinder -api_paste_config = /etc/cinder/api-paste.ini -iscsi_helper=ietadm -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = keystone -iscsi_ip_address=10.10.10.51 + +[DEFAULT] rpc_backend = cinder.openstack.common.rpc.impl_kombu rabbit_host = 10.10.10.51 -rabbit_port = 5672 +rabbit_port = 5672 +rabbit_userid = guest + Then, synchronize Cinder database: # cinder-manage db sync + + Restart Cinder Services + # service cinder-scheduler restart + # service cinder-api restart + Finally, create a volume group and name it cinder-volumes: @@ -673,14 +575,6 @@ rabbit_port = 5672 down. - - Restart the Cinder services: - # cd /etc/init.d/; for i in $( ls cinder-* ); do service $i restart; done - - - Verify that the Cinder services are running: - # cd /etc/init.d/; for i in $( ls cinder-* ); do service $i status; done - Horizon Horizon is the canonical implementation of OpenStack’s