Updates Controller Node
Updates to controller node. Mostly fix ML2 plugin for neutron and also update the configuration files and install commands as per the changes in OpenStack with reference to install-guides in openstack-manuals. Change-Id: I0d6fa91b4245ed38ff86d86724bd6cc11d4a32dc
This commit is contained in:
parent
dcb47407a2
commit
ba268f3a8b
@ -68,20 +68,20 @@ iface lo inet loopback
|
||||
|
||||
# The primary network interface - Virtual Box NAT connection
|
||||
# (Virtual Box Network Adapter 3)
|
||||
auto eth2
|
||||
iface eth2 inet dhcp
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
# Virtual Box vboxnet0 - OpenStack Management Network
|
||||
# (Virtual Box Network Adapter 1)
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
|
||||
# Virtual Box vboxnet2 - for exposing OpenStack API over external network
|
||||
# (Virtual Box Network Adapter 2)
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
||||
</programlisting>
|
||||
@ -188,29 +188,6 @@ netmask 255.255.255.0
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Other</emphasis></para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Install other services:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y vlan bridge-utils</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Enable IP_Forwarding:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Also add the following two lines
|
||||
into<filename>/etc/sysctl.conf</filename>:</para>
|
||||
<programlisting>net.ipv4.conf.all.rp_filter=0</programlisting>
|
||||
<programlisting>net.ipv4.conf.default.rp_filter=0</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>To save you from reboot, perform the following</para>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl net.ipv4.ip_forward=1</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl net.ipv4.conf.all.rp_filter=0</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl net.ipv4.conf.default.rp_filter=0</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>sysctl -p</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Keystone</emphasis></para>
|
||||
<para>Keystone is an OpenStack project that provides Identity,
|
||||
Token, Catalog and Policy services for use specifically by
|
||||
@ -336,7 +313,13 @@ admin_password = service_pass</programlisting>
|
||||
<listitem>
|
||||
<para>Update the
|
||||
<filename>/etc/glance/glance-api.conf</filename>:</para>
|
||||
<programlisting>sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
<programlisting>
|
||||
[DEFAULT]
|
||||
rpc_backend = rabbit
|
||||
rabbit_host = 10.10.10.51
|
||||
|
||||
[database]
|
||||
sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
[keystone_authtoken]
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
@ -351,7 +334,8 @@ flavor = keystone</programlisting>
|
||||
<listitem>
|
||||
<para>Update the
|
||||
<filename>/etc/glance/glance-registry.conf</filename>:</para>
|
||||
<programlisting>sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
<programlisting>[database]
|
||||
sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
[keystone_authtoken]
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
@ -382,7 +366,74 @@ flavor = keystone</programlisting>
|
||||
<screen><prompt>$</prompt> <userinput>glance image-list</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Neutron</emphasis></para>
|
||||
<para><emphasis role="bold">Nova</emphasis></para>
|
||||
<para>Nova is the project name for OpenStack Compute, a cloud
|
||||
computing fabric controller, the main part of an IaaS system.
|
||||
Individuals and organizations can use Nova to host and manage
|
||||
their own cloud computing systems. Nova originated as a project
|
||||
out of NASA Ames Research Laboratory.</para>
|
||||
<para>Nova is written with the following design guidelines in
|
||||
mind:</para>
|
||||
<para>Install nova components:</para>
|
||||
<screen>
|
||||
<prompt>#</prompt> <userinput>apt-get install -y nova-api nova-cert nova-conductor nova-consoleauth nova-novncproxy nova-scheduler python-novaclient</userinput>
|
||||
</screen>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Edit <filename>/etc/nova/nova.conf</filename></para>
|
||||
<programlisting>[database]
|
||||
connection = mysql://novaUser:novaPass@10.10.10.51/nova
|
||||
|
||||
[DEFAULT]
|
||||
rpc_backend = rabbit
|
||||
rabbit_host = 10.10.10.51
|
||||
my_ip = 10.10.10.51
|
||||
vncserver_listen = 10.10.10.51
|
||||
vncserver_proxyclient_address = 10.10.10.51
|
||||
auth_strategy = keystone
|
||||
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
neutron_url = http://10.10.10.51:9696
|
||||
neutron_auth_strategy = keystone
|
||||
neutron_admin_tenant_name = service
|
||||
neutron_admin_username = neutron
|
||||
neutron_admin_password = service_pass
|
||||
neutron_admin_auth_url = http://10.10.10.51:35357/v2.0
|
||||
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
security_group_api = neutron
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_uri = http://10.10.10.51:5000
|
||||
auth_host = controller
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = nova
|
||||
admin_password = service_pass
|
||||
</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Synchronize your database:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage db sync</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart nova-* services (all nova services):</para>
|
||||
<screen><prompt>#</prompt><userinput>service nova-api restart</userinput></screen>
|
||||
<screen><prompt>#</prompt><userinput>service nova-cert restart</userinput></screen>
|
||||
<screen><prompt>#</prompt><userinput>service nova-consoleauth restart</userinput></screen>
|
||||
<screen><prompt>#</prompt><userinput>service nova-scheduler restart</userinput></screen>
|
||||
<screen><prompt>#</prompt><userinput>service nova-conductor restart</userinput></screen>
|
||||
<screen><prompt>#</prompt><userinput>service nova-novncproxy restart</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Check for the smiling faces on
|
||||
<systemitem class="service">nova-*</systemitem> services to confirm your
|
||||
installation:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage service list</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Neutron</emphasis></para>
|
||||
<para>Neutron is an OpenStack project to provide “network
|
||||
connectivity as a service" between interface devices (e.g., vNICs)
|
||||
managed by other OpenStack services (e.g., nova).</para>
|
||||
@ -390,26 +441,23 @@ flavor = keystone</programlisting>
|
||||
<listitem>
|
||||
<para>Install the Neutron Server and the Open vSwitch package
|
||||
collection:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y neutron-server</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y neutron-server neutron-plugin-ml2</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini</filename>:</para>
|
||||
<programlisting>[database]
|
||||
connection = mysql://neutronUser:neutronPass@10.10.10.51/neutron
|
||||
<filename>/etc/neutron/plugins/ml2/ml2_conf.ini</filename>:</para>
|
||||
<programlisting>[ml2]
|
||||
type_drivers = gre
|
||||
tenant_network_types = gre
|
||||
mechanism_drivers = openvswitch
|
||||
|
||||
#Under the OVS section
|
||||
[ovs]
|
||||
tenant_network_type = gre
|
||||
[ml2_type_gre]
|
||||
tunnel_id_ranges = 1:1000
|
||||
enable_tunneling = True
|
||||
[agent]
|
||||
tunnel_types = gre
|
||||
|
||||
#Firewall driver for realizing neutron security group function
|
||||
[securitygroup]
|
||||
firewall_driver =
|
||||
neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver</programlisting>
|
||||
neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
enable_security_group = True</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit the
|
||||
@ -428,7 +476,18 @@ admin_password = service_pass</programlisting>
|
||||
<listitem>
|
||||
<para>Edit the
|
||||
<filename>/etc/neutron/neutron.conf</filename>:</para>
|
||||
<programlisting>rabbit_host = 10.10.10.51
|
||||
<programlisting>[DEFAULT]
|
||||
auth_strategy = keystone
|
||||
rpc_backend = neutron.openstack.common.rpc.impl_kombu
|
||||
rabbit_host = controller
|
||||
notify_nova_on_port_status_changes = True
|
||||
notify_nova_on_port_data_changes = True
|
||||
nova_url = http://controller:8774/v2
|
||||
nova_admin_username = nova
|
||||
nova_admin_tenant_id = SERVICE_TENANT_ID
|
||||
nova_admin_password = NOVA_PASS
|
||||
nova_admin_auth_url = http://controller:35357/v2.0
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
@ -442,210 +501,53 @@ signing_dir = /var/lib/neutron/keystone-signing
|
||||
connection = mysql://neutronUser:neutronPass@10.10.10.51/neutron</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart Nova Services</para>
|
||||
<screen><prompt>#</prompt> <userinput>service nova-api restart</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>service nova-scheduler restart</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>service nova-conductor restart</userinput></screen>
|
||||
<para>Restart Neutron services:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service neutron-server restart</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Nova</emphasis></para>
|
||||
<para>Nova is the project name for OpenStack Compute, a cloud
|
||||
computing fabric controller, the main part of an IaaS system.
|
||||
Individuals and organizations can use Nova to host and manage
|
||||
their own cloud computing systems. Nova originated as a project
|
||||
out of NASA Ames Research Laboratory.</para>
|
||||
<para>Nova is written with the following design guidelines in
|
||||
mind:</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Component based architecture: Quickly adds new
|
||||
behaviors.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Highly available: Scales to very serious workloads.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Fault-Tolerant: Isolated processes avoid cascading
|
||||
failures.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Recoverable: Failures should be easy to diagnose, debug,
|
||||
and rectify.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Open standards: Be a reference implementation for a
|
||||
community-driven api.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>API compatibility: Nova strives to be API-compatible with
|
||||
popular systems like Amazon EC2.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Install nova components:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y nova-novncproxy novnc nova-api nova-ajax-console-proxy nova-cert nova-conductor nova-consoleauth nova-doc nova-scheduler python-novaclient</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit <filename>/etc/nova/api-paste.ini</filename></para>
|
||||
<programlisting>[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = nova
|
||||
admin_password = service_pass
|
||||
signing_dir = /tmp/keystone-signing-nova
|
||||
|
||||
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
|
||||
auth_version = v2.0</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit <filename>/etc/nova/nova.conf</filename></para>
|
||||
<programlisting>[DEFAULT]
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
lock_path=/run/lock/nova
|
||||
verbose=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
|
||||
rabbit_host=10.10.10.51
|
||||
nova_url=http://10.10.10.51:8774/v1.1/
|
||||
sql_connection=mysql://novaUser:novaPass@10.10.10.51/nova
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
|
||||
# Auth
|
||||
use_deprecated_auth=false
|
||||
auth_strategy=keystone
|
||||
|
||||
# Imaging service
|
||||
glance_api_servers=10.10.10.51:9292
|
||||
image_service=nova.image.glance.GlanceImageService
|
||||
|
||||
# Vnc configuration
|
||||
novnc_enabled=true
|
||||
novncproxy_base_url=http://192.168.1.51:6080/vnc_auto.html
|
||||
novncproxy_port=6080
|
||||
vncserver_proxyclient_address=10.10.10.51
|
||||
vncserver_listen=0.0.0.0
|
||||
|
||||
# Network settings
|
||||
network_api_class=nova.network.neutronv2.api.API
|
||||
neutron_url=http://10.10.10.51:9696
|
||||
neutron_auth_strategy=keystone
|
||||
neutron_admin_tenant_name=service
|
||||
neutron_admin_username=neutron
|
||||
neutron_admin_password=service_pass
|
||||
neutron_admin_auth_url=http://10.10.10.51:35357/v2.0
|
||||
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
|
||||
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
|
||||
|
||||
#If you want Neutron + Nova Security groups
|
||||
firewall_driver=nova.virt.firewall.NoopFirewallDriver
|
||||
security_group_api=neutron
|
||||
#If you want Nova Security groups only, comment the two lines above and
|
||||
uncomment line -1-.
|
||||
#-1-firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
|
||||
#Metadata
|
||||
service_neutron_metadata_proxy = True
|
||||
neutron_metadata_proxy_shared_secret = helloOpenStack
|
||||
|
||||
# Compute #
|
||||
compute_driver=libvirt.LibvirtDriver
|
||||
|
||||
# Cinder #
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
osapi_volume_listen_port=5900</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Synchronize your database:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage db sync</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart nova-* services (all nova services):</para>
|
||||
<screen><prompt>#</prompt> <userinput>cd /etc/init.d/; for i in $( ls nova-* ); do service $i restart; done</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Check for the smiling faces on <systemitem class="service"
|
||||
>nova-*</systemitem> services to confirm your
|
||||
installation:</para>
|
||||
<screen><prompt>#</prompt> <userinput>nova-manage service list</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Cinder</emphasis></para>
|
||||
<para>Cinder is an OpenStack project that provides “block storage as a
|
||||
service”.</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>Component based architecture: Quickly adds new
|
||||
behavior.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Highly available: Scales to very serious workloads.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Fault-Tolerant: Isolated processes avoid cascading
|
||||
failures.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Recoverable: Failures should be easy to diagnose, debug
|
||||
and rectify.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Open standards: Be a reference implementation for a
|
||||
community-driven API.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>API compatibility: Cinder strives to be API-compatible
|
||||
with popular systems like Amazon EC2.</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Install Cinder components:</para>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>apt-get install -y cinder-api cinder-scheduler cinder-volume lvm2</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Configure the iSCSI services:</para>
|
||||
<screen><prompt>#</prompt> <userinput>sed -i 's/false/true/g' /etc/default/iscsitarget</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart the services:</para>
|
||||
<screen><prompt>#</prompt> <userinput>service iscsitarget start</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>service open-iscsi start</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit
|
||||
<filename>/etc/cinder/api-paste.ini</filename>:</para>
|
||||
<programlisting>[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 192.168.100.51
|
||||
service_port = 5000
|
||||
<para>Edit <filename>/etc/cinder/cinder.conf</filename>:</para>
|
||||
<programlisting>[database]
|
||||
sql_connection = mysql://cinderUser:cinderPass@10.10.10.51/cinder
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_uri = http://10.10.10.51:5000
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = cinder
|
||||
admin_password = service_pass
|
||||
signing_dir = /var/lib/cinder</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Edit <filename>/etc/cinder/cinder.conf</filename>:</para>
|
||||
<programlisting>[DEFAULT]
|
||||
rootwrap_config=/etc/cinder/rootwrap.conf
|
||||
sql_connection = mysql://cinderUser:cinderPass@10.10.10.51/cinder
|
||||
api_paste_config = /etc/cinder/api-paste.ini
|
||||
iscsi_helper=ietadm
|
||||
volume_name_template = volume-%s
|
||||
volume_group = cinder-volumes
|
||||
verbose = True
|
||||
auth_strategy = keystone
|
||||
iscsi_ip_address=10.10.10.51
|
||||
|
||||
[DEFAULT]
|
||||
rpc_backend = cinder.openstack.common.rpc.impl_kombu
|
||||
rabbit_host = 10.10.10.51
|
||||
rabbit_port = 5672</programlisting>
|
||||
rabbit_port = 5672
|
||||
rabbit_userid = guest
|
||||
</programlisting>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Then, synchronize Cinder database:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cinder-manage db sync</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart Cinder Services</para>
|
||||
<screen><prompt>#</prompt> <userinput>service cinder-scheduler restart</userinput></screen>
|
||||
<screen><prompt>#</prompt> <userinput>service cinder-api restart</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Finally, create a volume group and name it
|
||||
<literal>cinder-volumes</literal>:</para>
|
||||
@ -673,14 +575,6 @@ rabbit_port = 5672</programlisting>
|
||||
down.</para>
|
||||
</note>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Restart the Cinder services:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cd /etc/init.d/; for i in $( ls cinder-* ); do service $i restart; done</userinput></screen>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>Verify that the Cinder services are running:</para>
|
||||
<screen><prompt>#</prompt> <userinput>cd /etc/init.d/; for i in $( ls cinder-* ); do service $i status; done</userinput></screen>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
<para><emphasis role="bold">Horizon</emphasis></para>
|
||||
<para>Horizon is the canonical implementation of OpenStack’s
|
||||
|
Loading…
x
Reference in New Issue
Block a user