From 688a2a9f253eca2f812c51be3dfb2858df3d6c11 Mon Sep 17 00:00:00 2001 From: Pranav Salunke Date: Fri, 14 Nov 2014 14:14:09 +0100 Subject: [PATCH] Updates basic install guides Update basic install guides from install guides. Need this for rough draft for POC for basic install guides as discussed during the openstack kilo summit. Change-Id: Iacbc0297ffe26932a1c1fc847554796c478250f1 --- .../basic-install-guide/app_reserved_uids.xml | 9 +- .../bk-openstack-basic-install-guide.xml | 85 --- .../ch_basic_environment.xml | 53 ++ .../ch_basic_networking.xml | 37 ++ .../basic-install-guide/ch_ceilometer.xml | 3 +- .../basic-install-guide/ch_cinder.xml | 27 +- .../basic-install-guide/ch_clients.xml | 4 - .../basic-install-guide/ch_glance.xml | 1 - .../basic-install-guide/ch_heat.xml | 1 - .../basic-install-guide/ch_horizon.xml | 54 +- .../basic-install-guide/ch_keystone.xml | 2 +- .../basic-install-guide/ch_networking.xml | 21 +- .../basic-install-guide/ch_nova.xml | 1 - .../basic-install-guide/ch_overview.xml | 49 +- .../basic-install-guide/ch_sahara.xml | 18 + .../basic-install-guide/ch_swift.xml | 22 +- .../basic-install-guide/ch_trove.xml | 1 - .../lab_virtualbox-basics.xml | 4 - .../section_object-storage-install.xml | 32 +- ...ection_object-storage-sys-requirements.xml | 4 - .../section_start-storage-node-services.xml | 14 +- .../section_swift-controller-node.xml | 195 +++++++ .../section_swift-example-arch.xml | 56 ++ .../section_swift-finalize-installation.xml | 134 +++++ .../section_swift-initial-rings.xml | 190 ++++++ .../section_swift-storage-node.xml | 256 ++++++++ .../section_swift-system-reqs.xml | 103 ++++ .../object-storage/section_swift-verify.xml | 50 ++ .../basic-install-guide/roadmap.rst | 11 +- .../section_basics-database.xml | 171 +++--- .../section_basics-networking-neutron.xml | 18 +- .../section_basics-networking-nova.xml | 19 +- .../section_basics-networking.xml | 52 +- .../section_basics-ntp.xml | 52 +- .../section_basics-packages.xml | 215 ++++--- .../section_basics-prerequisites.xml | 16 +- .../section_basics-queue.xml | 26 +- .../section_basics-security.xml | 130 +++++ .../section_ceilometer-cinder.xml | 35 +- .../section_ceilometer-controller.xml | 384 ++++++++++++ .../section_ceilometer-glance.xml | 20 +- .../section_ceilometer-nova.xml | 159 +++-- .../section_ceilometer-swift.xml | 28 +- .../section_ceilometer-verify.xml | 2 +- .../section_cinder-controller-node.xml | 264 +++++++++ .../section_cinder-storage-node.xml | 264 +++++++++ .../section_cinder-verify.xml | 59 +- .../section_dashboard-install.xml | 308 +++++----- .../section_dashboard-verify.xml | 24 + .../section_debconf-api-endpoints.xml | 30 +- .../section_debconf-concepts.xml | 2 +- .../section_debconf-keystone_authtoken.xml | 11 +- .../section_debconf-preseeding.xml | 2 +- .../section_glance-install.xml | 152 +++-- .../section_glance-verify.xml | 24 +- .../section_heat-install.xml | 221 ++++--- .../section_heat-verify.xml | 2 +- .../section_keystone-install.xml | 185 +++--- .../section_keystone-openrc.xml | 52 ++ .../section_keystone-services.xml | 34 +- .../section_keystone-users.xml | 22 +- .../section_keystone-verify.xml | 12 +- .../section_launch-instance-neutron.xml | 101 +++- .../section_launch-instance-nova.xml | 101 +++- .../section_neutron-compute-node.xml | 334 +++++++++++ .../section_neutron-concepts.xml | 24 +- .../section_neutron-controller-node.xml | 448 ++++++++++++++ .../section_neutron-initial-networks.xml | 80 +-- .../section_neutron-ml2-compute-node.xml | 9 - .../section_neutron-ml2-controller-node.xml | 10 - .../section_neutron-ml2-network-node.xml | 9 - .../section_neutron-network-node.xml | 550 ++++++++++++++++++ .../section_neutron-ovs-network-node.xml | 11 - .../section_nova-compute-install.xml | 124 ++-- .../section_nova-controller-install.xml | 126 ++-- .../section_nova-networking-compute-node.xml | 67 +-- ...ection_nova-networking-controller-node.xml | 34 +- ...ection_nova-networking-initial-network.xml | 4 +- .../section_nova-verify.xml | 20 +- .../section_sahara-install.xml | 97 +++ .../section_sahara-verify.xml | 26 + .../section_trove-install.xml | 190 +++--- .../section_trove-verify.xml | 2 +- doc/training-guides/bk_preface.xml | 2 +- doc/training-guides/st-training-guides.xml | 4 +- 85 files changed, 5299 insertions(+), 1506 deletions(-) delete mode 100644 doc/training-guides/basic-install-guide/bk-openstack-basic-install-guide.xml create mode 100644 doc/training-guides/basic-install-guide/ch_basic_environment.xml create mode 100644 doc/training-guides/basic-install-guide/ch_basic_networking.xml create mode 100644 doc/training-guides/basic-install-guide/ch_sahara.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-controller-node.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-example-arch.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-finalize-installation.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-initial-rings.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-storage-node.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-system-reqs.xml create mode 100644 doc/training-guides/basic-install-guide/object-storage/section_swift-verify.xml create mode 100644 doc/training-guides/basic-install-guide/section_basics-security.xml create mode 100644 doc/training-guides/basic-install-guide/section_ceilometer-controller.xml create mode 100644 doc/training-guides/basic-install-guide/section_cinder-controller-node.xml create mode 100644 doc/training-guides/basic-install-guide/section_cinder-storage-node.xml create mode 100644 doc/training-guides/basic-install-guide/section_dashboard-verify.xml create mode 100644 doc/training-guides/basic-install-guide/section_keystone-openrc.xml create mode 100644 doc/training-guides/basic-install-guide/section_neutron-compute-node.xml create mode 100644 doc/training-guides/basic-install-guide/section_neutron-controller-node.xml create mode 100644 doc/training-guides/basic-install-guide/section_neutron-network-node.xml create mode 100644 doc/training-guides/basic-install-guide/section_sahara-install.xml create mode 100644 doc/training-guides/basic-install-guide/section_sahara-verify.xml diff --git a/doc/training-guides/basic-install-guide/app_reserved_uids.xml b/doc/training-guides/basic-install-guide/app_reserved_uids.xml index 1fe1422c..0905f81b 100644 --- a/doc/training-guides/basic-install-guide/app_reserved_uids.xml +++ b/doc/training-guides/basic-install-guide/app_reserved_uids.xml @@ -3,14 +3,13 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="reserved_uids"> + xml:id="reserved_user_ids"> Reserved user IDs - In OpenStack, certain user IDs are reserved and used to run - specific OpenStack services and own specific OpenStack - files. These users are set up according to the distribution - packages. The following table gives an overview. + OpenStack reserves certain user IDs to run specific services and + own specific files. These user IDs are set up according to the + distribution packages. The following table gives an overview. diff --git a/doc/training-guides/basic-install-guide/bk-openstack-basic-install-guide.xml b/doc/training-guides/basic-install-guide/bk-openstack-basic-install-guide.xml deleted file mode 100644 index 3944ba68..00000000 --- a/doc/training-guides/basic-install-guide/bk-openstack-basic-install-guide.xml +++ /dev/null @@ -1,85 +0,0 @@ - - - OpenStack Installation Guide for - <phrase os="ubuntu">Ubuntu 12.04/14.04 (LTS)</phrase> - - - - OpenStack Installation Guide for - Red Hat Enterprise Linux, CentOS, and Fedora - Ubuntu 12.04/14.04 (LTS) - openSUSE and SUSE Linux Enterprise Server - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/training-guides/basic-install-guide/ch_basic_environment.xml b/doc/training-guides/basic-install-guide/ch_basic_environment.xml new file mode 100644 index 00000000..74c05a23 --- /dev/null +++ b/doc/training-guides/basic-install-guide/ch_basic_environment.xml @@ -0,0 +1,53 @@ + + + + Basic environment + + + + The trunk version of this guide focuses on the future Juno + release and will not work for the current Icehouse release. If + you want to install Icehouse, you must use the Icehouse version + of this guide instead. + + + + This chapter explains how to configure each node in the + example architectures + including the + two-node architecture with legacy networking and + three-node + architecture with OpenStack Networking (neutron). + + Although most environments include OpenStack Identity, Image Service, + Compute, at least one networking service, and the dashboard, OpenStack + Object Storage can operate independently of most other services. If your + use case only involves Object Storage, you can skip to + . However, the dashboard will not run without + at least OpenStack Image Service and Compute. + + + You must use an account with administrative privileges to configure + each node. Either run the commands as the root user + or configure the sudo utility. + + + + The systemctl enable call on openSUSE outputs + a warning message when the service uses SysV Init scripts + instead of native systemd files. This warning can be ignored. + + + + + + + + + + diff --git a/doc/training-guides/basic-install-guide/ch_basic_networking.xml b/doc/training-guides/basic-install-guide/ch_basic_networking.xml new file mode 100644 index 00000000..d1ff6636 --- /dev/null +++ b/doc/training-guides/basic-install-guide/ch_basic_networking.xml @@ -0,0 +1,37 @@ + + + Add a networking component + This chapter explains how to install and configure either + OpenStack Networking (neutron) or the legacy nova-network networking service. + The nova-network service + enables you to deploy one network type per instance and is + suitable for basic network functionality. OpenStack Networking + enables you to deploy multiple network types per instance and + includes plug-ins for a + variety of products that support virtual + networking. + For more information, see the Networking chapter of the OpenStack Cloud + Administrator Guide. +
+ OpenStack Networking (neutron) + + + + + +
+
+ Next steps + Your OpenStack environment now includes the core components + necessary to launch a basic instance. You can launch an instance or add + more OpenStack services to your environment. +
+
diff --git a/doc/training-guides/basic-install-guide/ch_ceilometer.xml b/doc/training-guides/basic-install-guide/ch_ceilometer.xml index 359116c1..0af09578 100644 --- a/doc/training-guides/basic-install-guide/ch_ceilometer.xml +++ b/doc/training-guides/basic-install-guide/ch_ceilometer.xml @@ -8,8 +8,7 @@ Telemetry provides a framework for monitoring and metering the OpenStack cloud. It is also known as the ceilometer project. - - + diff --git a/doc/training-guides/basic-install-guide/ch_cinder.xml b/doc/training-guides/basic-install-guide/ch_cinder.xml index f7ade20b..28ad1341 100644 --- a/doc/training-guides/basic-install-guide/ch_cinder.xml +++ b/doc/training-guides/basic-install-guide/ch_cinder.xml @@ -5,22 +5,25 @@ version="5.0" xml:id="ch_cinder"> Add the Block Storage service - The OpenStack Block Storage service works through the - interaction of a series of daemon processes named cinder-* that reside persistently on - the host machine or machines. You can run the binaries from a - single node or across multiple nodes. You can also run them on the - same node as other OpenStack services. The following sections - introduce Block Storage service components and concepts. They will also show - you how to configure and install the Block Storage service. - - - + The OpenStack Block Storage service provides block storage devices + to instances using various backends. The Block Storage API and scheduler + services run on the controller node and the volume service runs on one + or more storage nodes. Storage nodes provide volumes to instances using + local block storage devices or SAN/NAS backends with the appropriate + drivers. For more information, see the + Configuration Reference. + + This chapter omits the backup manager because it depends on the + Object Storage service. + + +
Next steps Your OpenStack environment now includes Block Storage. You can launch an instance or add more - services to your environment in the next chapters. + services to your environment in the following chapters.
diff --git a/doc/training-guides/basic-install-guide/ch_clients.xml b/doc/training-guides/basic-install-guide/ch_clients.xml index cb079018..c3f98d19 100644 --- a/doc/training-guides/basic-install-guide/ch_clients.xml +++ b/doc/training-guides/basic-install-guide/ch_clients.xml @@ -13,10 +13,6 @@ Configure the clients on your desktop rather than on the server so that you have a similar experience to your users. - - - -
Create openrc.sh files diff --git a/doc/training-guides/basic-install-guide/ch_glance.xml b/doc/training-guides/basic-install-guide/ch_glance.xml index 46d6a2ca..a085c86c 100644 --- a/doc/training-guides/basic-install-guide/ch_glance.xml +++ b/doc/training-guides/basic-install-guide/ch_glance.xml @@ -25,7 +25,6 @@ >Configuration Reference. - diff --git a/doc/training-guides/basic-install-guide/ch_heat.xml b/doc/training-guides/basic-install-guide/ch_heat.xml index f6033723..a1d53b95 100644 --- a/doc/training-guides/basic-install-guide/ch_heat.xml +++ b/doc/training-guides/basic-install-guide/ch_heat.xml @@ -7,7 +7,6 @@ Add the Orchestration module The Orchestration module (heat) uses a heat orchestration template (HOT) to create and manage cloud resources. -
diff --git a/doc/training-guides/basic-install-guide/ch_horizon.xml b/doc/training-guides/basic-install-guide/ch_horizon.xml index adfd6ead..4a626c3a 100644 --- a/doc/training-guides/basic-install-guide/ch_horizon.xml +++ b/doc/training-guides/basic-install-guide/ch_horizon.xml @@ -7,38 +7,38 @@ Add the dashboard The OpenStack dashboard, also known as Horizon, is a web interface that enables cloud - administrators and users to manage various OpenStack resources and - services. + >Horizon, is a Web interface that enables cloud + administrators and users to manage various OpenStack resources and + services. The dashboard enables web-based interactions with the - OpenStack Compute cloud controller through the OpenStack - APIs. - These instructions show an example deployment, configured with - an Apache web server. - After you install and - configure the dashboard, you can complete the following - tasks: - - - Customize your dashboard. See section Customize the dashboard in the OpenStack Cloud Administrator - Guide. - - - Set up session storage for the dashboard. See . - - + OpenStack Compute cloud controller through the OpenStack + APIs. + Horizon enables you to customize the brand of the dashboard. + Horizon provides a set of core classes and reusable templates and tools. + This example deployment uses an Apache web server. - +
Next steps Your OpenStack environment now includes the dashboard. You can - launch an instance and add more - services to your environment in the following chapters. + launch an instance or add + more services to your environment in the following chapters. + After you install and configure the dashboard, you can + complete the following tasks: + + + Customize your dashboard. See section Customize the dashboard in the OpenStack Cloud Administrator Guide + for information on setting up colors, logos, and site titles. + + + Set up session storage. See section Set up session storage for the dashboard + in the OpenStack Cloud Administrator Guide for information on user + session data. + +
diff --git a/doc/training-guides/basic-install-guide/ch_keystone.xml b/doc/training-guides/basic-install-guide/ch_keystone.xml index ed96f44a..653b2605 100644 --- a/doc/training-guides/basic-install-guide/ch_keystone.xml +++ b/doc/training-guides/basic-install-guide/ch_keystone.xml @@ -5,9 +5,9 @@ version="5.0" xml:id="ch_keystone"> Add the Identity service - + diff --git a/doc/training-guides/basic-install-guide/ch_networking.xml b/doc/training-guides/basic-install-guide/ch_networking.xml index 134759b1..8cf07d1a 100644 --- a/doc/training-guides/basic-install-guide/ch_networking.xml +++ b/doc/training-guides/basic-install-guide/ch_networking.xml @@ -5,16 +5,16 @@ version="5.0" xml:id="ch_networking"> Add a networking component - This chapter explains how to install and configure + This chapter explains how to install and configure either OpenStack Networking (neutron) or the legacy nova-network service. + class="service">nova-network networking service. The nova-network service enables you to deploy one network type per instance and is suitable for basic network functionality. OpenStack Networking enables you to deploy multiple network types per instance and includes plug-ins for a variety of products that support virtual - networking. + networking. For more information, see the Networking chapter of the OpenStack Cloud @@ -22,13 +22,10 @@
OpenStack Networking (neutron) -
- Modular Layer 2 (ML2) plug-in - - - - -
+ + + +
Legacy networking (nova-network) @@ -39,8 +36,8 @@
Next steps Your OpenStack environment now includes the core components - necessary to launch an instance. You can launch an instance and add + necessary to launch a basic instance. You can launch an instance or add more OpenStack services to your environment.
diff --git a/doc/training-guides/basic-install-guide/ch_nova.xml b/doc/training-guides/basic-install-guide/ch_nova.xml index a0c41458..0b1211d6 100644 --- a/doc/training-guides/basic-install-guide/ch_nova.xml +++ b/doc/training-guides/basic-install-guide/ch_nova.xml @@ -6,7 +6,6 @@ xml:id="ch_nova"> Add the Compute service - diff --git a/doc/training-guides/basic-install-guide/ch_overview.xml b/doc/training-guides/basic-install-guide/ch_overview.xml index fb813912..5c802571 100644 --- a/doc/training-guides/basic-install-guide/ch_overview.xml +++ b/doc/training-guides/basic-install-guide/ch_overview.xml @@ -18,8 +18,6 @@ services. Each service offers an application programming interface (API) that facilitates this integration. The following table provides a list of OpenStack services: - This guide describes how to deploy these services in a functional test environment and, by example, teaches you how to build a production environment. @@ -29,15 +27,6 @@ Launching a virtual machine or instance involves many interactions among several services. The following diagram provides the conceptual architecture of a typical OpenStack environment. -
- Conceptual architecture - - - - - -
Example architectures @@ -48,8 +37,6 @@ architectures: - Three-node architecture with OpenStack Networking (neutron). - To be implemented The basic controller node runs the Identity service, Image @@ -65,27 +52,41 @@ your environment. - The network node runs the Networking plug-in, layer 2 agent, - and several layer 3 agents that provision and operate tenant - networks. Layer 2 services include provisioning of virtual - networks and tunnels. Layer 3 services include routing, - NAT - , and DHCP. This node also handles - external (internet) connectivity for tenant virtual machines + The network node runs the Networking plug-in, layer-2 agent, + and several layer-3 agents that provision and operate tenant + networks. Layer-2 services include provisioning of virtual + networks and tunnels. Layer-3 services include routing, + NAT, + and DHCP. This node also handles + external (Internet) connectivity for tenant virtual machines or instances. The compute node runs the hypervisor portion of Compute, which operates tenant virtual machines or instances. By default Compute uses KVM as the hypervisor. The compute node also runs - the Networking plug-in and layer 2 agent which operate tenant + the Networking plug-in and layer-2 agent which operate tenant networks and implement security groups. You can run more than one compute node. Optionally, the compute node also runs the Telemetry agent. This component provides additional features for your environment. + + The optional storage node contains the disks that the Block + Storage service uses to serve volumes. You can run more than one + storage node. + Optionally, the storage node also runs the Telemetry + agent. This component provides additional features for + your environment. + + + When you implement this architecture, skip + To use optional services, you + might need to install additional nodes, as described in + subsequent chapters. +
Three-node architecture with OpenStack Networking (neutron) @@ -97,6 +98,7 @@
+ Two-node architecture with legacy networking (nova-network). See The basic @@ -126,6 +128,11 @@ your environment. + + When you implement this architecture, skip + might need to install additional nodes, as described in + subsequent chapters. +
Two-node architecture with legacy networking (nova-network) diff --git a/doc/training-guides/basic-install-guide/ch_sahara.xml b/doc/training-guides/basic-install-guide/ch_sahara.xml new file mode 100644 index 00000000..e615a13e --- /dev/null +++ b/doc/training-guides/basic-install-guide/ch_sahara.xml @@ -0,0 +1,18 @@ + + + Add the Data processing service + The Data processing service (sahara) enables users to provide a + scalable data processing stack and associated management interfaces. + This includes provision and operation of data processing clusters as + well as scheduling and operation of data processing jobs. + + + This chapter is a work in progress. It may contain + incorrect information, and will be updated frequently. + + + diff --git a/doc/training-guides/basic-install-guide/ch_swift.xml b/doc/training-guides/basic-install-guide/ch_swift.xml index b8f0b048..341af6f3 100644 --- a/doc/training-guides/basic-install-guide/ch_swift.xml +++ b/doc/training-guides/basic-install-guide/ch_swift.xml @@ -7,26 +7,22 @@ Add Object Storage The OpenStack Object Storage services work together to provide object storage and retrieval through a REST API. For this example - architecture, as a prerequisite, you should already have the Identity - Service, also known as Keystone, installed. - + architecture, you must have already installed the Identity + Service, also known as Keystone. + href="object-storage/section_swift-system-reqs.xml"/> + href="object-storage/section_swift-example-arch.xml"/> - + href="object-storage/section_swift-controller-node.xml"/> + href="object-storage/section_swift-storage-node.xml"/> + href="object-storage/section_swift-initial-rings.xml"/> + href="object-storage/section_swift-finalize-installation.xml"/> - + href="object-storage/section_swift-verify.xml"/>
Next steps Your OpenStack environment now includes Object Storage. You can diff --git a/doc/training-guides/basic-install-guide/ch_trove.xml b/doc/training-guides/basic-install-guide/ch_trove.xml index c8a8fedd..4bb8375c 100644 --- a/doc/training-guides/basic-install-guide/ch_trove.xml +++ b/doc/training-guides/basic-install-guide/ch_trove.xml @@ -10,7 +10,6 @@ integrated project name is trove. This chapter is a work in progress. It may contain incorrect information, and will be updated frequently. - diff --git a/doc/training-guides/basic-install-guide/lab_virtualbox-basics.xml b/doc/training-guides/basic-install-guide/lab_virtualbox-basics.xml index b13b895d..4ec9c7f2 100644 --- a/doc/training-guides/basic-install-guide/lab_virtualbox-basics.xml +++ b/doc/training-guides/basic-install-guide/lab_virtualbox-basics.xml @@ -1,8 +1,4 @@ - -%openstack; -]> http://devstack.org for all-in-one including authentication with the Identity Service (keystone) v2.0 API. - - In this guide we recommend installing and configuring the Identity - service so that it implements Identity API v2.0. The Object Storage - service is unaware of domains when implementing Access Control Lists - (ACLs), so you must use the v2.0 API to avoid having identical user - names in different domains, which would enable two users to access - the same objects. -
Before you begin Have a copy of the operating system installation media available if you are installing on a new server. These steps assume you have set up repositories for packages for - your operating system as shown in OpenStack Packages. + your operating system as shown in + . This document demonstrates how to install a cluster by using the following types of nodes: @@ -69,15 +61,16 @@ the swift user. Use the service tenant and give the user the admin role: - $ keystone user-create --name=swift --pass=SWIFT_PASS \ - --email=swift@example.com -$ keystone user-role-add --user=swift --tenant=service --role=admin + $ keystone user-create --name swift --pass SWIFT_PASS +$ keystone user-role-add --user swift --tenant service --role admin + Replace SWIFT_PASS with a + suitable password. Create a service entry for the Object Storage Service: - $ keystone service-create --name=swift --type=object-store \ - --description="OpenStack Object Storage" + $ keystone service-create --name swift --type object-store \ + --description "OpenStack Object Storage" +-------------+----------------------------------+ | Property | Value | +-------------+----------------------------------+ @@ -98,10 +91,11 @@ API. In this guide, the controller host name is used: $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ object-store / {print $2}') \ - --publicurl='http://controller:8080/v1/AUTH_%(tenant_id)s' \ - --internalurl='http://controller:8080/v1/AUTH_%(tenant_id)s' \ - --adminurl=http://controller:8080 + --service-id $(keystone service-list | awk '/ object-store / {print $2}') \ + --publicurl 'http://controller:8080/v1/AUTH_%(tenant_id)s' \ + --internalurl 'http://controller:8080/v1/AUTH_%(tenant_id)s' \ + --adminurl http://controller:8080 \ + --region regionOne +-------------+---------------------------------------------------+ | Property | Value | +-------------+---------------------------------------------------+ diff --git a/doc/training-guides/basic-install-guide/object-storage/section_object-storage-sys-requirements.xml b/doc/training-guides/basic-install-guide/object-storage/section_object-storage-sys-requirements.xml index 9dcddc74..010f9c79 100644 --- a/doc/training-guides/basic-install-guide/object-storage/section_object-storage-sys-requirements.xml +++ b/doc/training-guides/basic-install-guide/object-storage/section_object-storage-sys-requirements.xml @@ -1,8 +1,4 @@ - -%openstack; -]>
- # for service in \ + # for service in \ + openstack-swift-object openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object-auditor \ + openstack-swift-container openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container-auditor \ + openstack-swift-account openstack-swift-account-replicator openstack-swift-account-reaper openstack-swift-account-auditor; do \ + systemctl enable $service.service; systemctl start $service.service; done + On SLES: + # for service in \ openstack-swift-object openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object-auditor \ openstack-swift-container openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container-auditor \ openstack-swift-account openstack-swift-account-replicator openstack-swift-account-reaper openstack-swift-account-auditor; do \ service $service start; chkconfig $service on; done + On openSUSE: + # for service in \ + openstack-swift-object openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object-auditor \ + openstack-swift-container openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container-auditor \ + openstack-swift-account openstack-swift-account-replicator openstack-swift-account-reaper openstack-swift-account-auditor; do \ + systemctl enable $service.service; systemctl start $service.service; done To start all swift services at once, run the command: # swift-init all start diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-controller-node.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-controller-node.xml new file mode 100644 index 00000000..5f14b945 --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-controller-node.xml @@ -0,0 +1,195 @@ + +
+ Install and configure the controller node + This section describes how to install and configure the proxy + service that handles requests for the account, container, and object + services operating on the storage nodes. For simplicity, this + guide installs and configures the proxy service on the controller node. + However, you can run the proxy service on any node with network + connectivity to the storage nodes. Additionally, you can install and + configure the proxy service on multiple nodes to increase performance + and redundancy. For more information, see the + Deployment Guide. + + To configure prerequisites + The proxy service relies on an authentication and authorization + mechanism such as the Identity service. However, unlike other services, + it also offers an internal mechanism that allows it to operate without + any other OpenStack services. However, for simplicity, this guide + references the Identity service in . Before + you configure the Object Storage service, you must create Identity + service credentials including endpoints. + + The Object Storage service does not use a SQL database on + the controller node. + + + To create the Identity service credentials, complete these + steps: + + + Create a swift user: + $ keystone user-create --name swift --pass SWIFT_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | d535e5cbd2b74ac7bfb97db9cced3ed6 | +| name | swift | +| username | swift | ++----------+----------------------------------+ + Replace SWIFT_PASS with a suitable + password. + + + Link the swift user to the + service tenant and admin + role: + $ keystone user-role-add --user swift --tenant service --role admin + + This command provides no output. + + + + Create the swift service: + $ keystone service-create --name swift --type object-store \ + --description "OpenStack Object Storage" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Object Storage | +| enabled | True | +| id | 75ef509da2c340499d454ae96a2c5c34 | +| name | swift | +| type | object-store | ++-------------+----------------------------------+ + + + + + Create the Identity service endpoints: + $ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ object-store / {print $2}') \ + --publicurl 'http://controller:8080/v1/AUTH_%(tenant_id)s' \ + --internalurl 'http://controller:8080/v1/AUTH_%(tenant_id)s' \ + --adminurl http://controller:8080 \ + --region regionOne ++-------------+---------------------------------------------------+ +| Property | Value | ++-------------+---------------------------------------------------+ +| adminurl | http://controller:8080/ | +| id | af534fb8b7ff40a6acf725437c586ebe | +| internalurl | http://controller:8080/v1/AUTH_%(tenant_id)s | +| publicurl | http://controller:8080/v1/AUTH_%(tenant_id)s | +| region | regionOne | +| service_id | 75ef509da2c340499d454ae96a2c5c34 | ++-------------+---------------------------------------------------+ + + + + To install and configure the controller node components + + Install the packages: + + Complete OpenStack environments already include some of these + packages. + + # apt-get install swift swift-proxy python-swiftclient python-keystoneclient memcached + # yum install openstack-swift-proxy python-swiftclient python-keystone-auth-token memcached + # zypper install openstack-swift-proxy python-swiftclient python-keystoneclient memcached python-xml + + + Create the /etc/swift directory. + + + Obtain the proxy service configuration file from the Object + Storage source repository: + # curl -o /etc/swift/proxy-server.conf \ + https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/proxy-server.conf-sample + + + Edit the /etc/swift/proxy-server.conf + file and complete the following actions: + + + In the [DEFAULT] section, configure + the bind port, user, and configuration directory: + [DEFAULT] +... +bind_port = 8080 +user = swift +swift_dir = /etc/swift + + + In the [pipeline] section, enable + the appropriate modules: + [pipeline] +pipeline = authtoken cache healthcheck keystoneauth proxy-logging proxy-server + + For more information on other modules that enable + additional features, see the + Deployment Guide. + + + + In the [app:proxy-server] section, enable + account management: + [app:proxy-server] +... +allow_account_management = true +account_autocreate = true + + + In the [filter:keystoneauth] section, + configure the operator roles: + [filter:keystoneauth] +use = egg:swift#keystoneauth +... +operator_roles = admin,_member_ + + You might need to uncomment this section. + + + + In the [filter:authtoken] section, + configure Identity service access: + [filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = swift +admin_password = SWIFT_PASS +delay_auth_decision = true + Replace SWIFT_PASS with the + password you chose for the swift user in the + Identity service. + + You might need to uncomment this section. + + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [filter:cache] section, configure + the memcached location: + [filter:cache] +... +memcache_servers = 127.0.0.1:11211 + + + + +
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-example-arch.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-example-arch.xml new file mode 100644 index 00000000..b9e6fe62 --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-example-arch.xml @@ -0,0 +1,56 @@ + +
+ Example architecture + In a production environment, the Object Storage service requires + at least two proxy nodes and five storage nodes. For simplicity, this + guide uses a minimal architecture with the proxy service running on + the existing OpenStack controller node and two storage nodes. However, + these concepts still apply. + + + Node: A host machine that runs one or more OpenStack + Object Storage services. + + + Proxy node: Runs proxy services. + + + Storage node: Runs account, container, and object + services. Contains the SQLite databases. + + + Ring: A set of mappings between OpenStack Object + Storage data to physical devices. + + + Replica: A copy of an object. By default, three + copies are maintained in the cluster. + + + Zone (optional): A logically separate section of the cluster, + related to independent failure characteristics. + + + Region (optional): A logically separate section of + the cluster, representing distinct physical locations + such as cities or countries. Similar to zones, but + representing physical locations of portions of the + cluster rather than logical segments. + + + To increase reliability and performance, you can add + additional proxy servers. + The following diagram shows one possible architecture for a + minimal production environment: + + + + + + + +
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-finalize-installation.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-finalize-installation.xml new file mode 100644 index 00000000..629f0ca4 --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-finalize-installation.xml @@ -0,0 +1,134 @@ + +
+ Finalize installation + + Configure hashes and default storage policy + + Obtain the /etc/swift/swift.conf file from + the Object Storage source repository: + # curl -o /etc/swift/swift.conf \ + https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/swift.conf-sample + + + Edit the /etc/swift/swift.conf file and + complete the following actions: + + + In the [swift-hash] section, configure + the hash path prefix and suffix for your environment. + [swift-hash] +... +swift_hash_path_suffix = HASH_PATH_PREFIX +swift_hash_path_prefix = HASH_PATH_SUFFIX + Replace HASH_PATH_PREFIX and + HASH_PATH_SUFFIX with unique + values. + + Keep these values secret and do not change or lose + them. + + + + In the [storage-policy:0] section, + configure the default storage policy: + [storage-policy:0] +... +name = Policy-0 +default = yes + + + + + Copy the swift.conf file to + the /etc/swift directory on each storage node + and any additional nodes running the proxy service. + + + On all nodes, ensure proper ownership of the configuration + directory: + # chown -R swift:swift /etc/swift + + + On the controller node and any other nodes running the proxy + service, restart the Object Storage proxy service including + its dependencies: + # service memcached restart +# service swift-proxy restart + + + On the controller node and any other nodes running the proxy + service, start the Object Storage proxy service including its + dependencies and configure them to start when the system boots: + # systemctl enable openstack-swift-proxy.service memcached.service +# systemctl start openstack-swift-proxy.service memcached.service + On SLES: + # service memcached start +# service openstack-swift-proxy start +# chkconfig memcached on +# chkconfig openstack-swift-proxy on + On openSUSE: + # systemctl enable openstack-swift-proxy.service memcached.service +# systemctl start openstack-swift-proxy.service memcached.service + + + On the storage nodes, start the Object Storage services: + # swift-init all start + + The storage node runs many Object Storage services and the + swift-init command makes them easier to + manage. You can ignore errors from services not running on the + storage node. + + + + On the storage nodes, start the Object Storage services and + configure them to start when the system boots: + # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service +# systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service +# systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service +# systemctl start openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service +# systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service +# systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service + + + On the storage nodes, start the Object Storage services and + configure them to start when the system boots: + On SLES: + # for service in \ + openstack-swift-account openstack-swift-account-auditor \ + openstack-swift-account-reaper openstack-swift-account-replicator; do \ + service $service start; chkconfig $service on; done +# for service in \ + openstack-swift-container openstack-swift-container-auditor \ + openstack-swift-container-replicator openstack-swift-container-updater; do \ + service $service start; chkconfig $service on; done +# for service in \ + openstack-swift-object openstack-swift-object-auditor \ + openstack-swift-object-replicator openstack-swift-object-updater; do \ + service $service start; chkconfig $service on; done + On openSUSE: + # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service +# systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service +# systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service +# systemctl start openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service +# systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service +# systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service + + +
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-initial-rings.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-initial-rings.xml new file mode 100644 index 00000000..05f5b6cf --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-initial-rings.xml @@ -0,0 +1,190 @@ + +
+ Create initial rings + Before starting the Object Storage services, you must create + the initial account, container, and object rings. The ring builder + creates configuration files that each node uses to determine and + deploy the storage architecture. For simplicity, this guide uses one + region and zone with 2^10 (1024) maximum partitions, 3 replicas of each + object, and 1 hour minimum time between moving a partition more than + once. For Object Storage, a partition indicates a directory on a storage + device rather than a conventional partition table. For more information, + see the + Deployment Guide. +
+ Account ring + The account server uses the account ring to maintain lists + of containers. + + To create the ring + + Perform these steps on the controller node. + + + Change to the /etc/swift directory. + + + Create the base account.builder file: + # swift-ring-builder account.builder create 10 3 1 + + + Add each storage node to the ring: + # swift-ring-builder account.builder \ + add r1z1-STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS:6002/DEVICE_NAME DEVICE_WEIGHT + Replace + STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS + with the IP address of the management network on the storage node. + Replace DEVICE_NAME with a storage + device name on the same storage node. For example, using the first + storage node in + with the + /dev/sdb1 storage device and weight of 100: + # swift-ring-builder account.builder add r1z1-10.0.0.51:6002/sdb1 100 + Repeat this command for each storage device on each storage + node. The example architecture requires four variations of this + command. + + + Verify the ring contents: + # swift-ring-builder account.builder +account.builder, build version 4 +1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 4 devices, 0.00 balance +The minimum number of hours before a partition can be reassigned is 1 +Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6002 10.0.0.51 6002 sdb1 100.00 768 0.00 + 1 1 1 10.0.0.51 6002 10.0.0.51 6002 sdc1 100.00 768 0.00 + 2 1 1 10.0.0.52 6002 10.0.0.52 6002 sdb1 100.00 768 0.00 + 3 1 1 10.0.0.52 6002 10.0.0.52 6002 sdc1 100.00 768 0.00 + + + Rebalance the ring: + # swift-ring-builder account.builder rebalance + + This process can take a while. + + + +
+
+ Container ring + The container server uses the container ring to maintain lists + of objects. However, it does not track object locations. + + To create the ring + + Perform these steps on the controller node. + + + Change to the /etc/swift directory. + + + Create the base container.builder + file: + # swift-ring-builder container.builder create 10 3 1 + + + Add each storage node to the ring: + # swift-ring-builder container.builder \ + add r1z1-STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS:6001/DEVICE_NAME DEVICE_WEIGHT + Replace + STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS + with the IP address of the management network on the storage node. + Replace DEVICE_NAME with a storage + device name on the same storage node. For example, using the first + storage node in + with the + /dev/sdb1 storage device and weight of 100: + # swift-ring-builder container.builder add r1z1-10.0.0.51:6001/sdb1 100 + Repeat this command for each storage device on each storage + node. The example architecture requires four variations of this + command. + + + Verify the ring contents: + # swift-ring-builder container.builder +container.builder, build version 4 +1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 4 devices, 0.00 balance +The minimum number of hours before a partition can be reassigned is 1 +Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6001 10.0.0.51 6001 sdb1 100.00 768 0.00 + 1 1 1 10.0.0.51 6001 10.0.0.51 6001 sdc1 100.00 768 0.00 + 2 1 1 10.0.0.52 6001 10.0.0.52 6001 sdb1 100.00 768 0.00 + 3 1 1 10.0.0.52 6001 10.0.0.52 6001 sdc1 100.00 768 0.00 + + + Rebalance the ring: + # swift-ring-builder container.builder rebalance + + This process can take a while. + + + +
+
+ Object ring + The object server uses the object ring to maintain lists + of object locations on local devices. + + To create the ring + + Perform these steps on the controller node. + + + Change to the /etc/swift directory. + + + Create the base object.builder file: + # swift-ring-builder object.builder create 10 3 1 + + + Add each storage node to the ring: + # swift-ring-builder object.builder \ + add r1z1-STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS:6000/DEVICE_NAME DEVICE_WEIGHT + Replace + STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS + with the IP address of the management network on the storage node. + Replace DEVICE_NAME with a storage + device name on the same storage node. For example, using the first + storage node in + with the + /dev/sdb1 storage device and weight of 100: + # swift-ring-builder object.builder add r1z1-10.0.0.51:6000/sdb1 100 + Repeat this command for each storage device on each storage + node. The example architecture requires four variations of this + command. + + + Verify the ring contents: + # swift-ring-builder object.builder +object.builder, build version 4 +1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 4 devices, 0.00 balance +The minimum number of hours before a partition can be reassigned is 1 +Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6000 10.0.0.51 6000 sdb1 100.00 768 0.00 + 1 1 1 10.0.0.51 6000 10.0.0.51 6000 sdc1 100.00 768 0.00 + 2 1 1 10.0.0.52 6000 10.0.0.52 6000 sdb1 100.00 768 0.00 + 3 1 1 10.0.0.52 6000 10.0.0.52 6000 sdc1 100.00 768 0.00 + + + Rebalance the ring: + # swift-ring-builder object.builder rebalance + + This process can take a while. + + + +
+
+ Distribute ring configuration files + Copy the account.ring.gz, + container.ring.gz, and + object.ring.gz files to the + /etc/swift directory on each storage node and + any additional nodes running the proxy service. +
+
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-storage-node.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-storage-node.xml new file mode 100644 index 00000000..09da2dcc --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-storage-node.xml @@ -0,0 +1,256 @@ + +
+ Install and configure the storage nodes + This section describes how to install and configure storage nodes + that operate the account, container, and object services. For + simplicity, this configuration references two storage nodes, each + containing two empty local block storage devices. Each of the + devices, /dev/sdb and /dev/sdc, + must contain a suitable partition table with one partition occupying + the entire device. Although the Object Storage service supports any + file system with extended attributes (xattr), + testing and benchmarking indicate the best performance and reliability + on XFS. For more information on horizontally + scaling your environment, see the + Deployment Guide. + + To configure prerequisites + You must configure each storage node before you install and + configure the Object Storage service on it. Similar to the controller + node, each storage node contains one network interface on the + management network. Optionally, each storage + node can contain a second network interface on a separate network for + replication. For more information, see + . + + Configure unique items on the first storage node: + + + Configure the management interface: + IP address: 10.0.0.51 + Network mask: 255.255.255.0 (or /24) + Default gateway: 10.0.0.1 + + + Set the hostname of the node to + object1. + + + + + Configure unique items on the second storage node: + + + Configure the management interface: + IP address: 10.0.0.52 + Network mask: 255.255.255.0 (or /24) + Default gateway: 10.0.0.1 + + + Set the hostname of the node to + object2. + + + + + Configure shared items on both storage nodes: + + + Copy the contents of the /etc/hosts file + from the controller node and add the following to it: + # object1 +10.0.0.51 object1 + +# object2 +10.0.0.52 object2 + Also add this content to the /etc/hosts + file on all other nodes in your environment. + + + Install and configure + NTP + using the instructions in + . + + + Install the supporting utility packages: + # apt-get install xfsprogs rsync + # yum install xfsprogs rsync + # zypper install xfsprogs rsync xinetd + + + Format the /dev/sdb1 and + /dev/sdc1 partitions as XFS: + # mkfs.xfs /dev/sdb1 +# mkfs.xfs /dev/sdc1 + + + Create the mount point directory structure: + # mkdir -p /srv/node/sdb1 +# mkdir -p /srv/node/sdc1 + + + Edit the /etc/fstab file and add the + following to it: + /dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 +/dev/sdc1 /srv/node/sdc1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + + + Mount the devices: + # mount /srv/node/sdb1 +# mount /srv/node/sdc1 + + + + + Edit the /etc/rsyncd.conf file and add the + following to it: + uid = swift +gid = swift +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +address = MANAGEMENT_INTERFACE_IP_ADDRESS + +[account] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/account.lock + +[container] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/container.lock + +[object] +max connections = 2 +path = /srv/node/ +read only = false +lock file = /var/lock/object.lock + Replace MANAGEMENT_INTERFACE_IP_ADDRESS + with the IP address of the management network on the storage + node. + + The rsync service + requires no authentication, so consider running it on a private + network. + + + + Edit the /etc/default/rsync file and enable + the rsync service: + RSYNC_ENABLE=true + + + Edit the /etc/xinetd.d/rsync file and enable + the rsync service: + disable = no + + + Start the rsync + service: + # service rsync start + + + Start the rsyncd service + and configure it to start when the system boots: + # systemctl enable rsyncd.service +# systemctl start rsyncd.service + + + Start the xinetd service + and configure it to start when the system boots: + On SLES: + # service xinetd start +# chkconfig xinetd on + On openSUSE: + # systemctl enable xinetd.service +# systemctl start xinetd.service + + + + Install and configure storage node components + + Perform these steps on each storage node. + + + Install the packages: + # apt-get install swift swift-account swift-container swift-object + # yum install openstack-swift-account openstack-swift-container \ + openstack-swift-object + # zypper install openstack-swift-account openstack-swift-container \ + openstack-swift-object python-xml + + + Obtain the accounting, container, and object service configuration + files from the Object Storage source repository: + # curl -o /etc/swift/account-server.conf \ + https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/account-server.conf-sample + # curl -o /etc/swift/container-server.conf \ + https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/container-server.conf-sample + # curl -o /etc/swift/object-server.conf \ + https://raw.githubusercontent.com/openstack/swift/stable/juno/etc/object-server.conf-sample + + + Edit the + /etc/swift/account-server.conf, + /etc/swift/container-server.conf, and + /etc/swift/object-server.conf files and + complete the following actions: + + + In the [DEFAULT] section, configure the + bind IP address, bind port, user, configuration directory, and + mount point directory: + [DEFAULT] +... +bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS +bind_port = 6002 +user = swift +swift_dir = /etc/swift +devices = /srv/node + Replace + MANAGEMENT_INTERFACE_IP_ADDRESS + with the IP address of the management network on the storage + node. + + + In the [pipeline] section, enable + the appropriate modules: + [pipeline] +pipeline = healthcheck recon account-server + + For more information on other modules that enable + additional features, see the + Deployment Guide. + + + + In the [filter:recon] section, configure + the recon (metrics) cache directory: + [filter:recon] +... +recon_cache_path = /var/cache/swift + + + + + Ensure proper ownership of the mount point directory + structure: + # chown -R swift:swift /srv/node + + + Create the recon directory and ensure proper + ownership of it: + # mkdir -p /var/cache/swift +# chown -R swift:swift /var/cache/swift + + +
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-system-reqs.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-system-reqs.xml new file mode 100644 index 00000000..0c31e721 --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-system-reqs.xml @@ -0,0 +1,103 @@ + +
+ + System requirements + Hardware: OpenStack Object + Storage is designed to run on commodity hardware. + + When you install only the Object Storage and Identity + Service, you cannot use the dashboard unless you also + install Compute and the Image Service. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Hardware recommendations
ServerRecommended HardwareNotes
Object Storage object servers + Processor: dual quad + coreMemory: 8 or 12GB RAM + Disk space: optimized for cost per GB + Network: one 1GB Network Interface Card + (NIC)The amount of disk space depends on how much + you can fit into the rack efficiently. You + want to optimize these for best cost per GB + while still getting industry-standard failure + rates. At Rackspace, our storage servers are + currently running fairly generic 4U servers + with 24 2T SATA drives and 8 cores of + processing power. RAID on the storage drives + is not required and not recommended. Swift's + disk usage pattern is the worst case possible + for RAID, and performance degrades very + quickly using RAID 5 or 6. + As an example, Rackspace runs Cloud Files + storage servers with 24 2T SATA drives and 8 + cores of processing power. Most services + support either a worker or concurrency value + in the settings. This allows the services to + make effective use of the cores + available.
Object Storage container/account + servers + Processor: dual quad core + Memory: 8 or 12GB RAM + Network: one 1GB Network Interface Card + (NIC)Optimized for IOPS due to tracking with + SQLite databases.
Object Storage proxy server + Processor: dual quad + coreNetwork: one 1 GB Network + Interface Card (NIC)Higher network throughput offers better + performance for supporting many API + requests. + Optimize your proxy servers for best CPU + performance. The Proxy Services are more CPU + and network I/O intensive. If you are using 10 + GB networking to the proxy, or are terminating + SSL traffic at the proxy, greater CPU power is + required.
+ Operating system: OpenStack + Object Storage currently runs on Ubuntu, RHEL, CentOS, Fedora, + openSUSE, or SLES. + Networking: 1 Gbps or 10 + Gbps is suggested internally. For OpenStack Object Storage, an + external network should connect the outside world to the proxy + servers, and the storage network is intended to be isolated on + a private network or multiple private networks. + Database: For OpenStack + Object Storage, a SQLite database is part of the OpenStack + Object Storage container and account management + process. + Permissions: You can + install OpenStack Object Storage either as root or as a user + with sudo permissions if you configure the sudoers file to + enable all the permissions. +
diff --git a/doc/training-guides/basic-install-guide/object-storage/section_swift-verify.xml b/doc/training-guides/basic-install-guide/object-storage/section_swift-verify.xml new file mode 100644 index 00000000..65d58aa7 --- /dev/null +++ b/doc/training-guides/basic-install-guide/object-storage/section_swift-verify.xml @@ -0,0 +1,50 @@ + +
+ Verify operation + This section describes how to verify operation of the Object + Storage service. + + + Perform these steps on the controller node. + + + Source the demo tenant credentials: + $ source demo-openrc.sh + + + Show the service status: + $ swift stat +Account: AUTH_11b9758b7049476d9b48f7a91ea11493 +Containers: 0 + Objects: 0 + Bytes: 0 +Content-Type: text/plain; charset=utf-8 +X-Timestamp: 1381434243.83760 +X-Trans-Id: txdcdd594565214fb4a2d33-0052570383 +X-Put-Timestamp: 1381434243.83760 + + + Upload a test file: + $ swift upload demo-container1 FILE + Replace FILE with the name of a local + file to upload to the demo-container1 + container. + + + List containers: + $ swift list +demo-container1 + + + Download a test file: + $ swift download demo-container1 FILE + Replace FILE with the name of the + file uploaded to the demo-container1 + container. + + +
diff --git a/doc/training-guides/basic-install-guide/roadmap.rst b/doc/training-guides/basic-install-guide/roadmap.rst index a417abbe..423a246a 100644 --- a/doc/training-guides/basic-install-guide/roadmap.rst +++ b/doc/training-guides/basic-install-guide/roadmap.rst @@ -11,20 +11,23 @@ This guide has an overall blueprint with spec at: https://wiki.openstack.org/wiki/Documentation/InstallationGuideImprovements To do tasks: -- Remove openstack-config (crudini) commands; standardize manual install + - Unify chapter and section names (such as Overview) - Add sample output of each command and highlight important parts - Mention project as standard but tenant must be used for CLI params -- Refer to generic SQL database and update for MariaDB (RHEL), MySQL, and -PostgreSQL +- Refer to generic SQL database and update for MariaDB (RHEL), MySQL, + and PostgreSQL - Provide sample configuration files for each node - Compute and network nodes should reference server on controller node - Update password list - Add audience information; who is this book intended for Ongoing tasks: + - Ensure it meets conventions and standards - Continually update with latest release information relevant to install Wishlist tasks: -- Replace all individual client commands (like keystone, nova) with openstack client commands + +- Replace all individual client commands (like keystone, nova) with + openstack client commands diff --git a/doc/training-guides/basic-install-guide/section_basics-database.xml b/doc/training-guides/basic-install-guide/section_basics-database.xml index f6992340..78269e83 100644 --- a/doc/training-guides/basic-install-guide/section_basics-database.xml +++ b/doc/training-guides/basic-install-guide/section_basics-database.xml @@ -6,111 +6,86 @@ xml:id="basics-database"> Database - Most OpenStack - services require a database to store information. These examples - use a MySQL database that runs on the controller node. You must - install the MySQL database on the controller node. You must - install the MySQL Python library on any additional nodes that - access MySQL. - Most OpenStack services require a - database to store information. This guide uses a MySQL database - on SUSE Linux Enterprise Server and a compatible database on - openSUSE running on the controller node. This compatible - database for openSUSE is MariaDB. You must install the MariaDB - database on the controller node. You must install the MySQL - Python library on any additional nodes that access MySQL or MariaDB. - -
- Controller setup - For SUSE Linux Enterprise Server: - On the controller node, install the MySQL client and - server packages, and the Python library. - # zypper install mysql-client mysql python-mysql - For openSUSE: On the controller node, - install the MariaDB client and database server packages, - and the MySQL Python library. - # zypper install mariadb-client mariadb python-mysql - # apt-get install python-mysqldb mysql-server - # yum install mysql mysql-server MySQL-python - - When you install the server package, you are prompted - for the root password for the database. Choose a strong - password and remember it. - - The MySQL configuration requires some changes to work with - OpenStack. - - - Edit the - /etc/mysql/my.cnf file: - Edit the - /etc/my.cnf file: - - - Under the [mysqld] section, set the - bind-address key to the management IP - address of the controller node to enable access by other - nodes via the management network: - [mysqld] + Most OpenStack services use an SQL database to store information. + The database typically runs on the controller node. The procedures in + this guide use MariaDB or + MySQL depending on the distribution. + OpenStack services also support other SQL databases including + PostgreSQL. + + To install and configure the database server + + Install the packages: + + The Python MySQL library is compatible with MariaDB. + + # apt-get install mariadb-server python-mysqldb + # apt-get install mysql-server python-mysqldb + # yum install mariadb mariadb-server MySQL-python + On openSUSE: + # zypper install mariadb-client mariadb python-mysql + On SLES: + # zypper install mysql-client mysql python-mysql + + + Choose a suitable password for the database root account. + + + Edit the + /etc/mysql/my.cnf file and complete the + following actions: + Edit the + /etc/my.cnf file and complete the following + actions: + + + In the [mysqld] section, set the + bind-address key to the management IP + address of the controller node to enable access by other + nodes via the management network: + [mysqld] ... bind-address = 10.0.0.11 - - - Under the [mysqld] section, set the - following keys to enable InnoDB, UTF-8 character set, and - UTF-8 collation by default: - [mysqld] + + + In the [mysqld] section, set the + following keys to enable useful options and the UTF-8 + character set: + [mysqld] ... default-storage-engine = innodb innodb_file_per_table collation-server = utf8_general_ci init-connect = 'SET NAMES utf8' character-set-server = utf8 - - - - - Restart the MySQL service to apply - the changes: - # service mysql restart - Start the MySQL - MariaDB or MySQL database - server and set it to start automatically when the system - boots: - # service mysqld start -# chkconfig mysqld on - # service mysql start + + + + + + To finalize installation + + Restart the database service: + # service mysql restart + + + Start the database service and configure it to start when the + system boots: + # systemctl enable mariadb.service +# systemctl start mariadb.service + On SLES: + # service mysql start # chkconfig mysql on - Finally, you should - set a root password for your MySQL - MariaDB or MySQL database. - The OpenStack programs that set up databases and tables prompt - you for this password if it is set. - You must - delete the anonymous users that are created when the database is - first started. Otherwise, database connection problems occur - when you follow the instructions in this guide. To do this, use - the mysql_secure_installation command. - Note that if mysql_secure_installation fails - you might need to use mysql_install_db first: - # mysql_install_db -# mysql_secure_installation - If you have - not already set a root database password, press - ENTER when you are prompted for the - password. This command presents a number of options - for you to secure your database installation. Respond - yes to all prompts unless you have a - good reason to do otherwise. -
-
- Node setup - On all nodes other than the controller node, install the - MySQL Python library: - # apt-get install python-mysqldb - # yum install MySQL-python - # zypper install python-mysql -
+ On openSUSE: + # systemctl start mysql.service +# systemctl enable mysql.service + + + Secure the database service: + Secure the database + service including choosing a suitable password for the root + account: + # mysql_secure_installation + +
diff --git a/doc/training-guides/basic-install-guide/section_basics-networking-neutron.xml b/doc/training-guides/basic-install-guide/section_basics-networking-neutron.xml index e78f6c71..b6fd3872 100644 --- a/doc/training-guides/basic-install-guide/section_basics-networking-neutron.xml +++ b/doc/training-guides/basic-install-guide/section_basics-networking-neutron.xml @@ -54,6 +54,9 @@ Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1
+ + Reboot the system to activate the changes. + To configure name resolution: @@ -133,9 +136,7 @@ BOOTPROTO='static' - Restart networking: - # service networking stop && service networking start - # service network restart + Reboot the system to activate the changes. @@ -185,6 +186,9 @@ BOOTPROTO='static' and so on. + + Reboot the system to activate the changes. + To configure name resolution: @@ -211,12 +215,12 @@ BOOTPROTO='static'
Verify connectivity - We recommend that you verify network connectivity to the internet + We recommend that you verify network connectivity to the Internet and among the nodes before proceeding further. From the controller node, - ping a site on the internet: + ping a site on the Internet: # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms @@ -260,7 +264,7 @@ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms From the network node, - ping a site on the internet: + ping a site on the Internet: # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms @@ -304,7 +308,7 @@ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms From the compute node, - ping a site on the internet: + ping a site on the Internet: # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms diff --git a/doc/training-guides/basic-install-guide/section_basics-networking-nova.xml b/doc/training-guides/basic-install-guide/section_basics-networking-nova.xml index 2fe0ff3d..0e78f71b 100644 --- a/doc/training-guides/basic-install-guide/section_basics-networking-nova.xml +++ b/doc/training-guides/basic-install-guide/section_basics-networking-nova.xml @@ -50,6 +50,9 @@ Network mask: 255.255.255.0 (or /24) Default gateway: 10.0.0.1 + + Reboot the system to activate the changes. + To configure name resolution: @@ -120,13 +123,11 @@ BOOTPROTO="none" file to contain the following: STARTMODE='auto' BOOTPROTO='static' - - - - Restart networking: - # service networking stop && service networking start - # service network restart + + + + Reboot the system to activate the changes. @@ -151,12 +152,12 @@ BOOTPROTO='static'
Verify connectivity - We recommend that you verify network connectivity to the internet + We recommend that you verify network connectivity to the Internet and among the nodes before proceeding further. From the controller node, - ping a site on the internet: + ping a site on the Internet: # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms @@ -185,7 +186,7 @@ rtt min/avg/max/mdev = 0.202/0.217/0.263/0.030 ms From the compute node, - ping a site on the internet: + ping a site on the Internet: # ping -c 4 openstack.org PING openstack.org (174.143.194.225) 56(84) bytes of data. 64 bytes from 174.143.194.225: icmp_seq=1 ttl=54 time=18.3 ms diff --git a/doc/training-guides/basic-install-guide/section_basics-networking.xml b/doc/training-guides/basic-install-guide/section_basics-networking.xml index 43ea47ed..b5e09c9f 100644 --- a/doc/training-guides/basic-install-guide/section_basics-networking.xml +++ b/doc/training-guides/basic-install-guide/section_basics-networking.xml @@ -28,19 +28,8 @@ openSUSE documentation. - - To disable <systemitem class="service">NetworkManager</systemitem> - and enable the <systemitem class="service">network</systemitem> - service: - - # service NetworkManager stop -# service network start -# chkconfig NetworkManager off -# chkconfig network on - - - To disable <systemitem class="service">NetworkManager</systemitem>: + To disable Network Manager: Use the YaST network module: # yast2 network @@ -52,28 +41,23 @@ - RHEL and derivatives including CentOS and Scientific - Linux enable a restrictive firewall by default. - During this installation, certain steps will fail unless you alter or - disable the firewall. For further information about securing your - installation, refer to the - - OpenStack Security Guide. - On Fedora, firewalld replaces - iptables as the default firewall system. While you - can use firewalld successfully, this guide - references iptables for compatibility with other - distributions. - - To disable <literal>firewalld</literal> and enable - <literal>iptables</literal>: - - # service firewalld stop -# service iptables start -# chkconfig firewalld off -# chkconfig iptables on - - + RHEL and CentOS enable a restrictive + firewall by default. During the installation + process, certain steps will fail unless you alter or disable the + firewall. For more information about securing your environment, refer + to the OpenStack + Security Guide. + openSUSE and SLES enable a restrictive + firewall by default. During the installation + process, certain steps will fail unless you alter or disable the + firewall. For more information about securing your environment, refer + to the OpenStack + Security Guide. + Your distribution does not enable a + restrictive firewall by default. For more + information about securing your environment, refer to the + OpenStack + Security Guide. Proceed to network configuration for the example OpenStack Networking (neutron) or legacy diff --git a/doc/training-guides/basic-install-guide/section_basics-ntp.xml b/doc/training-guides/basic-install-guide/section_basics-ntp.xml index 46e1fdee..474b49c0 100644 --- a/doc/training-guides/basic-install-guide/section_basics-ntp.xml +++ b/doc/training-guides/basic-install-guide/section_basics-ntp.xml @@ -9,10 +9,10 @@ You must install NTP to properly synchronize services among nodes. We recommend that you configure - the controller node to reference upstream servers and other nodes to - reference the controller node. + the controller node to reference more accurate (lower stratum) servers and + other nodes to reference the controller node.
- Configure controller node + Controller node To install the NTP service @@ -28,12 +28,21 @@ /etc/ntp.conf file to configure alternative servers such as those provided by your organization. - Edit the /etc/ntp.conf file: - Add, change, or remove the server keys as - necessary for your environment. Replace - NTP_SERVER with the hostname or IP address - of suitable NTP server. - server NTP_SERVER iburst + Edit the /etc/ntp.conf file and add, + change, or remove the following keys as necessary for your + environment: + server NTP_SERVER iburst +restrict -4 default kod notrap nomodify +restrict -6 default kod notrap nomodify + Replace NTP_SERVER with the + hostname or IP address of a suitable more accurate (lower stratum) + NTP server. The configuration supports multiple + server keys. + + For the restrict keys, you essentially + remove the nopeer and noquery + options. + Remove the /var/lib/ntp/ntp.conf.dhcp file if it exists. @@ -46,15 +55,19 @@ Start the NTP service and configure it to start when the system boots: - # service ntpd start -# chkconfig ntpd on - # service ntp start + # systemctl enable ntpd.service +# systemctl start ntpd.service + On SLES: + # service ntp start # chkconfig ntp on + On openSUSE: + # systemctl enable ntp.service +# systemctl start ntp.service
- Configure other nodes + Other nodes To install the NTP service @@ -71,7 +84,7 @@ Edit the /etc/ntp.conf file: Comment out or remove all but one server key and change it to reference the controller node. - server controller iburst + server controller iburst Remove the /var/lib/ntp/ntp.conf.dhcp file if it exists. @@ -84,10 +97,14 @@ Start the NTP service and configure it to start when the system boots: - # service ntpd start -# chkconfig ntpd on - # service ntp start + # systemctl enable ntpd.service +# systemctl start ntpd.service + On SLES: + # service ntp start # chkconfig ntp on + On openSUSE: + # systemctl enable ntp.service +# systemctl start ntp.service
@@ -97,7 +114,6 @@ further. Some nodes, particularly those that reference the controller node, can take several minutes to synchronize. - To verify NTP synchronization Run this command on the controller node: diff --git a/doc/training-guides/basic-install-guide/section_basics-packages.xml b/doc/training-guides/basic-install-guide/section_basics-packages.xml index a27719e6..222b4823 100644 --- a/doc/training-guides/basic-install-guide/section_basics-packages.xml +++ b/doc/training-guides/basic-install-guide/section_basics-packages.xml @@ -6,133 +6,85 @@ xml:id="basics-packages"> OpenStack packages - Distributions might release OpenStack packages as part of - their distribution or through other methods because the - OpenStack and distribution release times are independent of each - other. - This section describes the configuration you must - complete after you configure machines to install the latest - OpenStack packages. - The examples in this guide use the - OpenStack packages from the RDO repository. These packages work - on Red Hat Enterprise Linux 6, compatible versions of CentOS, - and Fedora 20. - - Install the yum-plugin-priorities plug-in. This package - allows the assignment of relative priorities to the configured software - repositories. This functionality is used by the RDO release packages: - - # yum install yum-plugin-priorities - - To enable the RDO repository, download and - install the rdo-release-juno - package: - # yum install http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm - The EPEL package includes GPG keys - for package signing and repository information. This should only - be installed on Red Hat Enterprise Linux and CentOS, not Fedora. - Install the latest epel-release package (see - http://download.fedoraproject.org/pub/epel/6/x86_64/repoview/epel-release.html). - For example: - # yum install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm - The - openstack-utils package contains utility - programs that make installation and configuration easier. These - programs are used throughout this guide. Install - openstack-utils. This verifies that you can - access the RDO repository: - # yum install openstack-utils - Use the Open Build Service repositories - for Juno based on your openSUSE or - SUSE Linux Enterprise Server version. - For openSUSE 13.1 use: - # zypper addrepo -f obs://Cloud:OpenStack:Juno/openSUSE_13.1 Juno - If you use SUSE Linux Enterprise Server 11 SP3, - use: - # zypper addrepo -f obs://Cloud:OpenStack:Juno/SLE_11_SP3 Juno - The packages are signed by GPG key 893A90DAD85F9316. You should verify the fingerprint of the imported GPG key before using it. -Key ID: 893A90DAD85F9316 + Distributions release OpenStack packages as part of the distribution + or using other methods because of differing release schedules. Perform + these procedures on all nodes. + + Disable or remove any automatic update services because they can + impact your OpenStack environment. + + + To configure prerequisites + + Install the python-software-properties package + to ease repository management: + # apt-get install python-software-properties + + + + To enable the OpenStack repository + + Enable the Ubuntu Cloud archive repository: + # add-apt-repository cloud-archive:juno + + + + To configure prerequisites + + Install the yum-plugin-priorities package to + enable assignment of relative priorities within repositories: + # yum install yum-plugin-priorities + + + Install the epel-release package to enable the + EPEL repository: + # yum install http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-2.noarch.rpm + + Fedora does not require this package. + + + + + To enable the OpenStack repository + + Install the rdo-release-juno package to enable + the RDO repository: + # yum install http://rdo.fedorapeople.org/openstack-juno/rdo-release-juno.rpm + + + + To enable the OpenStack repository + + Enable the Open Build Service repositories based on your openSUSE + or SLES version: + On openSUSE 13.1: + # zypper addrepo -f obs://Cloud:OpenStack:Juno/openSUSE_13.1 Juno + On SLES 11 SP3: + # zypper addrepo -f obs://Cloud:OpenStack:Juno/SLE_11_SP3 Juno + + The packages are signed by GPG key 893A90DAD85F9316. You should + verify the fingerprint of the imported GPG key before using + it. + Key ID: 893A90DAD85F9316 Key Name: Cloud:OpenStack OBS Project <Cloud:OpenStack@build.opensuse.org> Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 Key Created: Tue Oct 8 13:34:21 2013 Key Expires: Thu Dec 17 13:34:21 2015 - - The openstack-utils - package contains utility programs that make installation and - configuration easier. These programs are used throughout this - guide. Install openstack-utils. This verifies - that you can access the Open Build Service repository: - # zypper install openstack-utils - - The openstack-config program - in the openstack-utils package uses - crudini to manipulate configuration - files. However, crudini version 0.3 - does not support multi valued options. See - https://bugs.launchpad.net/openstack-manuals/+bug/1269271. - As a work around, you must manually set any multi valued - options or the new value overwrites the previous value instead - of creating a new option. - - The - openstack-selinux package includes the - policy files that are required to configure SELinux during - OpenStack installation on RHEL and CentOS. This step is not required during - OpenStack installation on Fedora. - Install openstack-selinux: - # yum install openstack-selinux - Upgrade your system packages: - # yum upgrade - # zypper refresh -# zypper dist-upgrade - If the upgrade included a new - kernel package, reboot the system to ensure the new kernel is running: - # reboot - - To use the Ubuntu Cloud Archive for Juno - The Ubuntu Cloud Archive is a special repository that - allows you to install newer releases of OpenStack on the - stable supported version of Ubuntu. - - Install the Ubuntu Cloud Archive for - Juno: - # apt-get install python-software-properties -# add-apt-repository cloud-archive:juno - - - Update the package database and upgrade your system: - # apt-get update -# apt-get dist-upgrade - - - If you intend to use OpenStack Networking with Ubuntu 12.04, - you should install a backported Linux kernel to improve the - stability of your system. This installation is not needed if you - intend to use the legacy networking service. - Install the Ubuntu 13.10 backported kernel: - # apt-get install linux-image-generic-lts-saucy linux-headers-generic-lts-saucy - - - Reboot the system for all changes to take effect: - # reboot + - + To use the Debian Wheezy backports archive for Juno The Juno release is available - only in Debian Sid - (otherwise called Unstable). However, the Debian maintainers + only in Debian Experimental (otherwise called rc-buggy), + as Jessie is frozen soon, and will contain Icehouse. + However, the Debian maintainers of OpenStack also maintain a non-official Debian repository for OpenStack containing Wheezy backports. - Install the Debian Wheezy backport repository + On all nodes, install the Debian Wheezy backport repository Juno: # echo "deb http://archive.gplhost.com/debian juno-backports main" >>/etc/apt/sources.list @@ -158,7 +110,7 @@ Key Expires: Thu Dec 17 13:34:21 2015 mirrors is available at http://archive.gplhost.com/readme.mirrors. -
+ Manually install python-argparse The Debian OpenStack packages are maintained on Debian Sid (also known as Debian Unstable) - the current development @@ -172,6 +124,7 @@ Key Expires: Thu Dec 17 13:34:21 2015 Python 2.7, this package is installed by default. Unfortunately, in Python 2.7, this package does not include Provides: python-argparse directive. + Because the packages are maintained in Sid where the Provides: python-argparse directive causes an error, and the Debian OpenStack maintainer wants to maintain one @@ -183,5 +136,33 @@ Key Expires: Thu Dec 17 13:34:21 2015 # apt-get install python-argparse This caveat applies to most OpenStack packages in Wheezy. -
+
+
+ + To finalize installation + + Upgrade the packages on your system: + # apt-get update && apt-get dist-upgrade + # yum upgrade + # zypper refresh && zypper dist-upgrade + + If the upgrade process includes a new kernel, reboot your system + to activate it. + + + + RHEL and CentOS enable SELinux by + default. Install the openstack-selinux package + to automatically manage security policies for OpenStack + services: + # yum install openstack-selinux + + Fedora does not require this package. + + + The installation process for this package can take a + while. + + +
diff --git a/doc/training-guides/basic-install-guide/section_basics-prerequisites.xml b/doc/training-guides/basic-install-guide/section_basics-prerequisites.xml index aadd2a81..d17695c6 100644 --- a/doc/training-guides/basic-install-guide/section_basics-prerequisites.xml +++ b/doc/training-guides/basic-install-guide/section_basics-prerequisites.xml @@ -6,7 +6,7 @@ xml:id="basics-prerequisites"> Before you begin - For a functional environment, OpenStack does not require a + For a functional environment, OpenStack doesn't require a significant amount of resources. We recommend that your environment meets or exceeds the following minimum requirements which can support several minimal CirrOS instances: @@ -28,7 +28,7 @@ recommend a minimal installation of your Linux distribution. Also, we strongly recommend that you install a 64-bit version of your distribution on at least the compute node. If you install a 32-bit version of your - distribution on the compute node, starting an instance using + distribution on the compute node, attempting to start an instance using a 64-bit image will fail. A single disk partition on each node works for most basic @@ -38,20 +38,20 @@ Many users build their test environments on virtual machines - (VMs). The primary benefits of this method include the + (VMs). The primary benefits of VMs include the following: - One physical server can support multiple nodes with almost + One physical server can support multiple nodes, each with almost any number of network interfaces. - The ability to take periodic "snapshots" throughout the installation + Ability to take periodic "snap shots" throughout the installation process and "roll back" to a working configuration in the event of a problem. - VMs can result in slow instances, particularly + However, VMs will reduce performance of your instances, particularly if your hypervisor and/or processor lacks support for hardware acceleration of nested VMs. @@ -59,7 +59,5 @@ permits promiscuous mode on the external network. - For more information about system requirements, see the OpenStack Operations - Guide. + For more information about system requirements, see the
diff --git a/doc/training-guides/basic-install-guide/section_basics-queue.xml b/doc/training-guides/basic-install-guide/section_basics-queue.xml index 436fc928..5cd5cd7d 100644 --- a/doc/training-guides/basic-install-guide/section_basics-queue.xml +++ b/doc/training-guides/basic-install-guide/section_basics-queue.xml @@ -3,7 +3,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="basics-queue"> + xml:id="basics-messaging-server"> Messaging server OpenStack uses a message broker to coordinate @@ -11,7 +11,7 @@ service typically runs on the controller node. OpenStack supports several message brokers including RabbitMQ, Qpid, and ZeroMQ. - Most distributions that package OpenStack support a particular + However, most distributions that package OpenStack support a particular message broker. This guide covers the RabbitMQ message broker which is supported by each distribution. If you prefer to implement a different message broker, consult the documentation associated @@ -41,11 +41,17 @@ To configure the message broker service - - Start the message broker service and enable it to start when the + + Start the message broker service and configure it to start when the system boots: - # service rabbitmq-server start + # systemctl enable rabbitmq-server.service +# systemctl start rabbitmq-server.service + On SLES: + # service rabbitmq-server start # chkconfig rabbitmq-server on + On openSUSE: + # systemctl enable rabbitmq-server.service +# systemctl start rabbitmq-server.service The message broker creates a default account that uses @@ -55,17 +61,19 @@ Run the following command: Replace RABBIT_PASS with a suitable password. - # rabbitmqctl change_password guest RABBIT_PASS + # rabbitmqctl change_password guest RABBIT_PASS +Changing password for user "guest" ... +...done. You must configure the rabbit_password key in the configuration file for each OpenStack service that uses the message broker. For production environments, you should create a unique account - with a suitable password. For more information on securing the + with suitable password. For more information on securing the message broker, see the documentation. - If you decide to create a unique account with a suitable password + If you decide to create a unique account with suitable password for your test environment, you must configure the rabbit_userid and rabbit_password keys in the configuration file @@ -73,6 +81,6 @@ - Congratulations, you are now ready to install OpenStack + Congratulations, now you are ready to install OpenStack services!
diff --git a/doc/training-guides/basic-install-guide/section_basics-security.xml b/doc/training-guides/basic-install-guide/section_basics-security.xml new file mode 100644 index 00000000..766e7fed --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_basics-security.xml @@ -0,0 +1,130 @@ + +
+ + Security + OpenStack services support various security methods including + password, policy, and encryption. Additionally, supporting services + including the database server and message broker support at least + password security. + To ease the installation process, this guide only covers password + security where applicable. You can create secure passwords manually, + generate them using a tool such as + pwgen, or + by running the following command: + $ openssl rand -hex 10 + For OpenStack services, this guide uses + SERVICE_PASS to reference service account + passwords and SERVICE_DBPASS to reference + database passwords. + The following table provides a list of services that require + passwords and their associated references in the guide: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Passwords
Password nameDescription
Database password (no variable used)Root password for the database
RABBIT_PASSPassword of user guest of RabbitMQ
KEYSTONE_DBPASSDatabase password of Identity service
DEMO_PASSPassword of user demo
ADMIN_PASSPassword of user admin
GLANCE_DBPASSDatabase password for Image Service
GLANCE_PASSPassword of Image Service user glance
NOVA_DBPASSDatabase password for Compute service
NOVA_PASSPassword of Compute service user nova
DASH_DBPASSDatabase password for the dashboard
CINDER_DBPASSDatabase password for the Block Storage service
CINDER_PASSPassword of Block Storage service user cinder
NEUTRON_DBPASSDatabase password for the Networking service
NEUTRON_PASSPassword of Networking service user neutron
HEAT_DBPASSDatabase password for the Orchestration service
HEAT_PASSPassword of Orchestration service user heat
CEILOMETER_DBPASSDatabase password for the Telemetry service
CEILOMETER_PASSPassword of Telemetry service user ceilometer
TROVE_DBPASSDatabase password of Database service
TROVE_PASSPassword of Database Service user trove
+
+ OpenStack and supporting services require administrative privileges + during installation and operation. In some cases, services perform + modifications to the host that can interfere with deployment automation + tools such as Ansible, Chef, and Puppet. For example, some OpenStack + services add a root wrapper to sudo that can interfere + with security policies. See the + Cloud Administrator Guide + for more information. Also, the Networking service assumes default values + for kernel network parameters and modifies firewall rules. To avoid most + issues during your initial installation, we recommend using a stock + deployment of a supported distribution on your hosts. However, if you + choose to automate deployment of your hosts, review the configuration + and policies applied to them before proceeding further. +
diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-cinder.xml b/doc/training-guides/basic-install-guide/section_ceilometer-cinder.xml index 58e94c56..da54bba5 100644 --- a/doc/training-guides/basic-install-guide/section_ceilometer-cinder.xml +++ b/doc/training-guides/basic-install-guide/section_ceilometer-cinder.xml @@ -3,37 +3,44 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="ceilometer-install-cinder"> + xml:id="ceilometer-agent-cinder"> Add the Block Storage service agent for Telemetry To retrieve volume samples, you must configure the Block Storage service to send notifications to the bus. - - Edit /etc/cinder/cinder.conf + Edit /etc/cinder/cinder.conf and add in the [DEFAULT] section on the controller and volume nodes: - control_exchange = cinder + control_exchange = cinder notification_driver = cinder.openstack.common.notifier.rpc_notifier - - Run the following commands on - the controller and volume nodes: - # openstack-config --set /etc/cinder/cinder.conf DEFAULT control_exchange cinder -# openstack-config --set /etc/cinder/cinder.conf DEFAULT notification_driver cinder.openstack.common.notifier.rpc_notifier - Restart the Block Storage services with their new settings. On the controller node: - # service cinder-api restart + # service cinder-api restart # service cinder-scheduler restart - # service openstack-cinder-api restart + # systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service + On SLES: + # service openstack-cinder-api restart # service openstack-cinder-scheduler restart - On the volume node: - # service openstack-cinder-volume restart + On openSUSE: + # systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service + On the storage node: # service cinder-volume restart + # systemctl restart openstack-cinder-volume.service + On SLES: + # service openstack-cinder-volume restart + On openSUSE: + # systemctl restart openstack-cinder-volume.service + + If you want to collect OpenStack Block Storage notification on demand, + you can use cinder-volume-usage-audit from OpenStack Block Storage. + For more information, Block Storage audit script setup to get notifications. +
diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-controller.xml b/doc/training-guides/basic-install-guide/section_ceilometer-controller.xml new file mode 100644 index 00000000..6d70adc2 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_ceilometer-controller.xml @@ -0,0 +1,384 @@ + +
+ Install and configure controller node + This section describes how to install and configure the Telemetry + module, code-named ceilometer, on the controller node. The Telemetry + module uses separate agents to collect measurements from each OpenStack + service in your environment. + + To configure prerequisites + Before you install and configure Telemetry, you must install + MongoDB, create a MongoDB database, and + create Identity service credentials including endpoints. + + Enable the Open Build Service repositories for MongoDB based on + your openSUSE or SLES version: + On openSUSE: + # zypper addrepo -f obs://server:database/openSUSE_13.1 Database + On SLES: + # zypper addrepo -f obs://server:database/SLE_11_SP3 Database + + The packages are signed by GPG key + 562111AC05905EA8. You should + verify the fingerprint of the imported GPG key before using + it. + Key Name: server:database OBS Project <server:database@build.opensuse.org> +Key Fingerprint: 116EB86331583E47E63CDF4D562111AC05905EA8 +Key Created: Thu Oct 11 20:08:39 2012 +Key Expires: Sat Dec 20 20:08:39 2014 + + + + Install the MongoDB package: + # yum install mongodb-server mongodb + # zypper install mongodb + # apt-get install mongodb-server + + + Edit the /etc/mongodb.conf file and + complete the following actions: + + + Configure the bind_ip key to use the + management interface IP address of the controller node. + bind_ip = 10.0.0.11 + + + By default, MongoDB creates several 1GB journal files + in the /var/lib/mongodb/journal + directory. If you want to reduce the size of each journal file + to 128MB and limit total journal space consumption to + 512MB, assert the smallfiles key: + smallfiles = true + If you change the journaling configuration, + stop the MongoDB service, remove the initial journal files, and + start the service: + # service mongodb stop +# rm /var/lib/mongodb/journal/prealloc.* +# service mongodb start + You can also disable journaling. For more information, see + the MongoDB manual. + + + Restart the MongoDB service: + # service mongodb restart + + + Start the MongoDB services and configure them to start when + the system boots: + On SLES: + # service mongodb start +# chkconfig mongodb on + On openSUSE: + # systemctl enable mongodb.service +# systemctl start mongodb.service + + # service mongod start +# chkconfig mongod on + + + + + Create the ceilometer database: + # mongo --host controller --eval ' + db = db.getSiblingDB("ceilometer"); + db.addUser({user: "ceilometer", + pwd: "CEILOMETER_DBPASS", + roles: [ "readWrite", "dbAdmin" ]})' + Replace CEILOMETER_DBPASS with a + suitable password. + + + Source the admin credentials to gain access + to admin-only CLI commands: + $ source admin-openrc.sh + + + To create the Identity service credentials: + + + Create the ceilometer user: + $ keystone user-create --name ceilometer --pass CEILOMETER_PASS + Replace CEILOMETER_PASS with a + suitable password. + + + Link the ceilometer user to the + service tenant and admin + role: + $ keystone user-role-add --user ceilometer --tenant service --role admin + + + Create the ceilometer service: + $ keystone service-create --name ceilometer --type metering \ + --description "Telemetry" + + + Create the Identity service endpoints: + $ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ metering / {print $2}') \ + --publicurl http://controller:8777 \ + --internalurl http://controller:8777 \ + --adminurl http://controller:8777 \ + --region regionOne + + + + + + To configure prerequisites + Before you install and configure Telemetry, you must install + MongoDB. + + Install the MongoDB package: + # apt-get install mongodb-server + + + Edit the /etc/mongodb.conf file and + complete the following actions: + + + Configure the bind_ip key to use the + management interface IP address of the controller node. + bind_ip = 10.0.0.11 + + + By default, MongoDB creates several 1GB journal files + in the /var/lib/mongodb/journal + directory. If you want to reduce the size of each journal file + to 128MB and limit total journal space consumption to + 512MB, assert the smallfiles key: + smallfiles = true + If you change the journaling configuration, stop the MongoDB + service, remove the initial journal files, and start the + service: + # service mongodb stop +# rm /var/lib/mongodb/journal/prealloc.* +# service mongodb start + You can also disable journaling. For more information, see + the MongoDB manual. + + + Restart the MongoDB service: + # service mongodb restart + + + + + + To install and configure the Telemetry module components + + Install the packages: + # apt-get install ceilometer-api ceilometer-collector ceilometer-agent-central \ + ceilometer-agent-notification ceilometer-alarm-evaluator ceilometer-alarm-notifier \ + python-ceilometerclient + # yum install openstack-ceilometer-api openstack-ceilometer-collector \ + openstack-ceilometer-notification openstack-ceilometer-central openstack-ceilometer-alarm \ + python-ceilometerclient + # zypper install openstack-ceilometer-api openstack-ceilometer-collector \ + openstack-ceilometer-agent-notification openstack-ceilometer-agent-central python-ceilometerclient \ + openstack-ceilometer-alarm-evaluator openstack-ceilometer-alarm-notifier + + + Generate a random value to use as the metering secret: + # openssl rand -hex 10 + # openssl rand 10 | hexdump -e '1/1 "%.2x"' + + + Edit the /etc/ceilometer/ceilometer.conf file + and complete the following actions: + + + In the [database] section, + configure database access: + [database] +... +connection = mongodb://ceilometer:CEILOMETER_DBPASS@controller:27017/ceilometer + Replace CEILOMETER_DBPASS with + the password you chose for the Telemetry module database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the password + you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, configure + Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = ceilometer +admin_password = CEILOMETER_PASS + Replace CEILOMETER_PASS with the + password you chose for the celiometer + user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [service_credentials] + section, configure service credentials: + [service_credentials] +... +os_auth_url = http://controller:5000/v2.0 +os_username = ceilometer +os_tenant_name = service +os_password = CEILOMETER_PASS + Replace CEILOMETER_PASS with + the password you chose for the ceilometer + user in the Identity service. + + + In the [publisher] section, configure + the metering secret: + [publisher] +... +metering_secret = METERING_SECRET + Replace METERING_SECRET with the + random value that you generated in a previous step. + + + In the [DEFAULT] section, configure the log + directory: + [DEFAULT] +... +log_dir = /var/log/ceilometer + + + In the [collector] section, configure the + dispatcher: + + [collector] +... +dispatcher = database + + + + + + + To install and configure the Telemetry module components + + Install the packages: + # apt-get install ceilometer-api ceilometer-collector ceilometer-agent-central \ + ceilometer-agent-notification ceilometer-alarm-evaluator ceilometer-alarm-notifier \ + python-ceilometerclient + + + Respond to prompts for + database management, + Identity service + credentials, + service endpoint + registration, and + message broker + credentials. + + + Generate a random value to use as the metering secret: + # openssl rand -hex 10 + + + Edit the /etc/ceilometer/ceilometer.conf file + and complete the following actions: + + + In the [publisher] section, configure + the metering secret: + [publisher] +... +metering_secret = METERING_SECRET + Replace METERING_SECRET with the + random value that you generated in a previous step. + + + In the [service_credentials] + section, configure service credentials: + [service_credentials] +... +os_auth_url = http://controller:5000/v2.0 +os_username = ceilometer +os_tenant_name = service +os_password = CEILOMETER_PASS + Replace CEILOMETER_PASS with + the password you chose for the ceilometer + user in the Identity service. + + + + + + To finalize installation + + Restart the Telemetry services: + # service ceilometer-agent-central restart +# service ceilometer-agent-notification restart +# service ceilometer-api restart +# service ceilometer-collector restart +# service ceilometer-alarm-evaluator restart +# service ceilometer-alarm-notifier restart + + + Start the Telemetry services and configure them to start when the + system boots: + # systemctl enable openstack-ceilometer-api.service openstack-ceilometer-notification.service \ + openstack-ceilometer-central.service openstack-ceilometer-collector.service \ + openstack-ceilometer-alarm-evaluator.service openstack-ceilometer-alarm-notifier.service +# systemctl start openstack-ceilometer-api.service openstack-ceilometer-notification.service \ + openstack-ceilometer-central.service openstack-ceilometer-collector.service \ + openstack-ceilometer-alarm-evaluator.service openstack-ceilometer-alarm-notifier.service + On SLES: + # service openstack-ceilometer-api start +# service openstack-ceilometer-agent-notification start +# service openstack-ceilometer-agent-central start +# service openstack-ceilometer-collector start +# service openstack-ceilometer-alarm-evaluator start +# service openstack-ceilometer-alarm-notifier start +# chkconfig openstack-ceilometer-api on +# chkconfig openstack-ceilometer-agent-notification on +# chkconfig openstack-ceilometer-agent-central on +# chkconfig openstack-ceilometer-collector on +# chkconfig openstack-ceilometer-alarm-evaluator on +# chkconfig openstack-ceilometer-alarm-notifier on + On openSUSE: + # systemctl enable openstack-ceilometer-api.service +# systemctl enable openstack-ceilometer-agent-notification.service +# systemctl enable openstack-ceilometer-agent-central.service +# systemctl enable openstack-ceilometer-collector.service +# systemctl enable openstack-ceilometer-alarm-evaluator.service +# systemctl enable openstack-ceilometer-alarm-notifier.service +# systemctl start openstack-ceilometer-api.service +# systemctl start openstack-ceilometer-agent-notification.service +# systemctl start openstack-ceilometer-agent-central.service +# systemctl start openstack-ceilometer-collector.service +# systemctl start openstack-ceilometer-alarm-evaluator.service +# systemctl start openstack-ceilometer-alarm-notifier.service + + +
diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-glance.xml b/doc/training-guides/basic-install-guide/section_ceilometer-glance.xml index 2faddac0..38eaed86 100644 --- a/doc/training-guides/basic-install-guide/section_ceilometer-glance.xml +++ b/doc/training-guides/basic-install-guide/section_ceilometer-glance.xml @@ -3,35 +3,31 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="ceilometer-install-glance"> + xml:id="ceilometer-agent-glance"> Configure the Image Service for Telemetry To retrieve image samples, you must configure the Image Service to send notifications to the bus. - - Edit + Edit /etc/glance/glance-api.conf and modify the [DEFAULT] section: - notification_driver = messaging + notification_driver = messaging rpc_backend = rabbit rabbit_host = controller rabbit_password = RABBIT_PASS - - Run the following commands: - # openstack-config --set /etc/glance/glance-api.conf DEFAULT notification_driver messaging -# openstack-config --set /etc/glance/glance-api.conf DEFAULT rpc_backend rabbit -# openstack-config --set /etc/glance/glance-api.conf DEFAULT rabbit_host controller -# openstack-config --set /etc/glance/glance-api.conf DEFAULT rabbit_password RABBIT_PASS - Restart the Image Services with their new settings: # service glance-registry restart # service glance-api restart - # service openstack-glance-api restart + # systemctl restart openstack-glance-api.service openstack-glance-registry.service + On SLES: + # service openstack-glance-api restart # service openstack-glance-registry restart + On openSUSE: + # systemctl restart openstack-glance-api.service openstack-glance-registry.service
diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-nova.xml b/doc/training-guides/basic-install-guide/section_ceilometer-nova.xml index f96f1701..61443036 100644 --- a/doc/training-guides/basic-install-guide/section_ceilometer-nova.xml +++ b/doc/training-guides/basic-install-guide/section_ceilometer-nova.xml @@ -3,143 +3,118 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="ceilometer-install-nova"> + xml:id="ceilometer-agent-nova"> Install the Compute agent for Telemetry + Telemetry is composed of an API service, a collector and a range + of disparate agents. This section explains how to install and configure + the agent that runs on the compute node. - Telemetry provides an API service that provides a - collector and a range of disparate agents. This procedure - details how to install the agent that runs on the compute - node. + To configure prerequisites - Install the Telemetry service on the compute node: + Install the package: # apt-get install ceilometer-agent-compute # yum install openstack-ceilometer-compute python-ceilometerclient python-pecan # zypper install openstack-ceilometer-agent-compute - Set the following - options in the /etc/nova/nova.conf - file: - # openstack-config --set /etc/nova/nova.conf DEFAULT \ - instance_usage_audit True -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - instance_usage_audit_period hour -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - notify_on_state_change vm_and_task_state - - The option is a multi - valued option, which - openstack-config cannot set - properly. See . - - - Edit the - /etc/nova/nova.conf file and add the - following lines to the [DEFAULT] + Edit the /etc/nova/nova.conf file and + add the following lines to the [DEFAULT] section: - [DEFAULT] + [DEFAULT] ... instance_usage_audit = True instance_usage_audit_period = hour notify_on_state_change = vm_and_task_state notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - [DEFAULT] -... -notification_driver = nova.openstack.common.notifier.rpc_notifier notification_driver = ceilometer.compute.nova_notifier Restart the Compute service: - # service openstack-nova-compute restart # service nova-compute restart + # systemctl restart openstack-nova-compute.service + On SLES: + # service openstack-nova-compute restart + On openSUSE: + # systemctl restart openstack-nova-compute.service + + + To configure the Compute agent for Telemetry + Edit the /etc/ceilometer/ceilometer.conf + file and complete the following actions: - You must set the secret key that you defined previously. - The Telemetry service nodes share this key as a shared - secret: - # openstack-config --set /etc/ceilometer/ceilometer.conf publisher \ - metering_secret CEILOMETER_TOKEN - Edit the - /etc/ceilometer/ceilometer.conf file - and change these lines in the [publisher] - section. Replace CEILOMETER_TOKEN with - the ceilometer token that you created previously: - [publisher] + In the [publisher] section, set the + secret key for Telemetry service nodes: + [publisher] # Secret value for signing metering messages (string value) metering_secret = CEILOMETER_TOKEN + Replace CEILOMETER_TOKEN with + the ceilometer token that you created previously. - - - Configure the RabbitMQ access: - # openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT rabbit_host controller -# openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT rabbit_password RABBIT_PASS - Edit the /etc/ceilometer/ceilometer.conf file and update the [DEFAULT] section: - [DEFAULT] + + In the [DEFAULT] section, configure + RabbitMQ broker access: + [DEFAULT] rabbit_host = controller rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the password + you chose for the guest account in RabbitMQ. - - - Add the Identity service credentials: - # openstack-config --set /etc/ceilometer/ceilometer.conf \ - keystone_authtoken auth_host controller -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - keystone_authtoken admin_user ceilometer -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - keystone_authtoken admin_tenant_name service -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - keystone_authtoken auth_protocol http -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - keystone_authtoken admin_password CEILOMETER_PASS -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - service_credentials os_username ceilometer -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - service_credentials os_tenant_name service -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - service_credentials os_password CEILOMETER_PASS -# openstack-config --set /etc/ceilometer/ceilometer.conf \ - service_credentials os_auth_url http://controller:5000/v2.0 - Edit the - /etc/ceilometer/ceilometer.conf file - and change the [keystone_authtoken] - section: - [keystone_authtoken] -auth_host = controller -auth_port = 35357 -auth_protocol = http + + In the [keystone_authtoken] section, + configure Identity service access: + [keystone_authtoken] +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = ceilometer admin_password = CEILOMETER_PASS - Also set the - [service_credentials] section: - [service_credentials] + Replace CEILOMETER_PASS with the + password you chose for the Telemetry module database. + + Comment out the auth_host, + auth_port, and auth_protocol + keys, since they are replaced by the identity_uri + and auth_uri keys. + + + + In the [service_credentials] section, + configure service credentials: + [service_credentials] os_auth_url = http://controller:5000/v2.0 os_username = ceilometer os_tenant_name = service -os_password = CEILOMETER_PASS +os_password = CEILOMETER_PASS +os_endpoint_type = internalURL + Replace CEILOMETER_PASS with the password you chose for the + ceilometer user in the Identity service. - - Configure the log directory. - Edit the /etc/ceilometer/ceilometer.conf file - and update the [DEFAULT] section: - [DEFAULT] + In the [DEFAULT] section, configure the + log directory: + [DEFAULT] log_dir = /var/log/ceilometer - + + + To finish installation Restart the service with its new settings: # service ceilometer-agent-compute restart - + Start the service and configure it to start when the system boots: - # service openstack-ceilometer-agent-compute start + # systemctl enable openstack-ceilometer-compute.service +# systemctl start openstack-ceilometer-compute.service + On SLES: + # service openstack-ceilometer-agent-compute start # chkconfig openstack-ceilometer-agent-compute on - # service openstack-ceilometer-compute start -# chkconfig openstack-ceilometer-compute on + On openSUSE: + # systemctl enable openstack-ceilometer-compute.service +# systemctl start openstack-ceilometer-compute.service diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-swift.xml b/doc/training-guides/basic-install-guide/section_ceilometer-swift.xml index ed5e83a9..abfaa6d2 100644 --- a/doc/training-guides/basic-install-guide/section_ceilometer-swift.xml +++ b/doc/training-guides/basic-install-guide/section_ceilometer-swift.xml @@ -3,16 +3,23 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="ceilometer-install-swift"> + xml:id="ceilometer-agent-swift"> Configure the Object Storage service for Telemetry + + Install the python-ceilometerclient + package on your Object Storage proxy server: + # apt-get install python-ceilometerclient + # yum install python-ceilometerclient + # zypper install python-ceilometerclient + To retrieve object store statistics, the Telemetry service needs access to Object Storage with the ResellerAdmin role. Give this role to your os_username user for the os_tenant_name tenant: - $ keystone role-create --name=ResellerAdmin + $ keystone role-create --name ResellerAdmin +----------+----------------------------------+ | Property | Value | +----------+----------------------------------+ @@ -38,10 +45,25 @@ use = egg:ceilometer#swift [pipeline:main] pipeline = healthcheck cache authtoken keystoneauth ceilometer proxy-server + + Add the system user swift to the system group + ceilometer to give Object Storage access to the + ceilometer.conf file. + # usermod -a -G ceilometer swift + + + Add ResellerAdmin to the + operator_roles parameter of that same file: + operator_roles = Member,admin,swiftoperator,_member_,ResellerAdmin + Restart the service with its new settings: # service swift-proxy restart - # service openstack-swift-proxy restart + # systemctl restart openstack-swift-proxy.service + On SLES: + # service openstack-swift-proxy restart + On openSUSE: + # systemctl restart openstack-swift-proxy.service diff --git a/doc/training-guides/basic-install-guide/section_ceilometer-verify.xml b/doc/training-guides/basic-install-guide/section_ceilometer-verify.xml index 9e130887..aaa5b31e 100644 --- a/doc/training-guides/basic-install-guide/section_ceilometer-verify.xml +++ b/doc/training-guides/basic-install-guide/section_ceilometer-verify.xml @@ -22,7 +22,7 @@ Download an image from the Image Service: - $ glance image-download "cirros-0.3.2-x86_64" > cirros.img + $ glance image-download "cirros-0.3.3-x86_64" > cirros.img Call the ceilometer meter-list command again to diff --git a/doc/training-guides/basic-install-guide/section_cinder-controller-node.xml b/doc/training-guides/basic-install-guide/section_cinder-controller-node.xml new file mode 100644 index 00000000..03796f12 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_cinder-controller-node.xml @@ -0,0 +1,264 @@ + +
+ Install and configure controller node + This section describes how to install and configure the Block + Storage service, code-named cinder, on the controller node. This + service requires at least one additional storage node that provides + volumes to instances. + + To configure prerequisites + Before you install and configure the Block Storage service, you must + create a database and Identity service credentials including + endpoints. + + To create the database, complete these steps: + + + Use the database access client to connect to the database + server as the root user: + $ mysql -u root -p + + + Create the cinder database: + CREATE DATABASE cinder; + + + Grant proper access to the cinder + database: + GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ + IDENTIFIED BY 'CINDER_DBPASS'; +GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ + IDENTIFIED BY 'CINDER_DBPASS'; + Replace CINDER_DBPASS with + a suitable password. + + + Exit the database access client. + + + + + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + To create the Identity service credentials, complete these + steps: + + + Create a cinder user: + $ keystone user-create --name cinder --pass CINDER_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | 881ab2de4f7941e79504a759a83308be | +| name | cinder | +| username | cinder | ++----------+----------------------------------+ + Replace CINDER_PASS with a suitable + password. + + + Link the cinder user to the + service tenant and admin + role: + $ keystone user-role-add --user cinder --tenant service --role admin + + This command provides no output. + + + + Create the cinder services: + $ keystone service-create --name cinder --type volume \ + --description "OpenStack Block Storage" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Block Storage | +| enabled | True | +| id | 1e494c3e22a24baaafcaf777d4d467eb | +| name | cinder | +| type | volume | ++-------------+----------------------------------+ +$ keystone service-create --name cinderv2 --type volumev2 \ + --description "OpenStack Block Storage" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Block Storage | +| enabled | True | +| id | 16e038e449c94b40868277f1d801edb5 | +| name | cinderv2 | +| type | volumev2 | ++-------------+----------------------------------+ + + The Block Storage service requires two different services + to support API versions 1 and 2. + + + + Create the Block Storage service endpoints: + $ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ volume / {print $2}') \ + --publicurl http://controller:8776/v1/%\(tenant_id\)s \ + --internalurl http://controller:8776/v1/%\(tenant_id\)s \ + --adminurl http://controller:8776/v1/%\(tenant_id\)s \ + --region regionOne ++-------------+-----------------------------------------+ +| Property | Value | ++-------------+-----------------------------------------+ +| adminurl | http://controller:8776/v1/%(tenant_id)s | +| id | d1b7291a2d794e26963b322c7f2a55a4 | +| internalurl | http://controller:8776/v1/%(tenant_id)s | +| publicurl | http://controller:8776/v1/%(tenant_id)s | +| region | regionOne | +| service_id | 1e494c3e22a24baaafcaf777d4d467eb | ++-------------+-----------------------------------------+ +$ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ volumev2 / {print $2}') \ + --publicurl http://controller:8776/v2/%\(tenant_id\)s \ + --internalurl http://controller:8776/v2/%\(tenant_id\)s \ + --adminurl http://controller:8776/v2/%\(tenant_id\)s \ + --region regionOne ++-------------+-----------------------------------------+ +| Property | Value | ++-------------+-----------------------------------------+ +| adminurl | http://controller:8776/v2/%(tenant_id)s | +| id | 097b4a6fc8ba44b4b10d4822d2d9e076 | +| internalurl | http://controller:8776/v2/%(tenant_id)s | +| publicurl | http://controller:8776/v2/%(tenant_id)s | +| region | regionOne | +| service_id | 16e038e449c94b40868277f1d801edb5 | ++-------------+-----------------------------------------+ + + The Block Storage service requires two different endpoints + to support API versions 1 and 2. + + + + + + + To install and configure Block Storage controller components + + Install the packages: + # apt-get install cinder-api cinder-scheduler python-cinderclient + # yum install openstack-cinder python-cinderclient python-oslo-db + # zypper install openstack-cinder-api openstack-cinder-scheduler python-cinderclient + + + Edit the /etc/cinder/cinder.conf file and + complete the following actions: + + + In the [database] section, configure + database access: + [database] +... +connection = mysql://cinder:CINDER_DBPASS@controller/cinder + Replace CINDER_DBPASS with the + password you chose for the Block Storage database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the + password you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, + configure Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = cinder +admin_password = CINDER_PASS + Replace CINDER_PASS with the + password you chose for the cinder user in the + Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [DEFAULT] section, configure the + my_ip option to use the management interface IP + address of the controller node: + [DEFAULT] +... +my_ip = 10.0.0.11 + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + Populate the Block Storage database: + # su -s /bin/sh -c "cinder-manage db sync" cinder + + + + To install and configure Block Storage controller components + + Install the packages: + # apt-get install cinder-api cinder-scheduler python-cinderclient + + + + To finalize installation + + Restart the Block Storage services: + # service cinder-scheduler restart +# service cinder-api restart + + + Start the Block Storage services and configure them to start when + the system boots: + # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service +# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service + On SLES: + # service openstack-cinder-api start +# service openstack-cinder-scheduler start +# chkconfig openstack-cinder-api on +# chkconfig openstack-cinder-scheduler on + On openSUSE: + # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service +# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service + + + By default, the Ubuntu packages create an SQLite database. + Because this configuration uses a SQL database server, you can + remove the SQLite database file: + # rm -f /var/lib/cinder/cinder.sqlite + + +
diff --git a/doc/training-guides/basic-install-guide/section_cinder-storage-node.xml b/doc/training-guides/basic-install-guide/section_cinder-storage-node.xml new file mode 100644 index 00000000..cea1faae --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_cinder-storage-node.xml @@ -0,0 +1,264 @@ + +
+ + Install and configure a storage node + This section describes how to install and configure storage nodes + for the Block Storage service. For simplicity, this configuration + references one storage node with an empty local block storage device + /dev/sdb that contains a suitable partition table with + one partition /dev/sdb1 occupying the entire device. + The service provisions logical volumes on this device using the + LVM driver and provides them to instances via + iSCSI transport. You can follow these instructions with + minor modifications to horizontally scale your environment with + additional storage nodes. + + To configure prerequisites + You must configure the storage node before you install and + configure the volume service on it. Similar to the controller node, + the storage node contains one network interface on the + management network. The storage node also + needs an empty block storage device of suitable size for your + environment. For more information, see + + + Configure the management interface: + IP address: 10.0.0.41 + Network mask: 255.255.255.0 (or /24) + Default gateway: 10.0.0.1 + + + Set the hostname of the node to + block1. + + + Copy the contents of the /etc/hosts file from + the controller node to the storage node and add the following + to it: + # block1 +10.0.0.41 block1 + Also add this content to the /etc/hosts file + on all other nodes in your environment. + + + Install the LVM packages: + # apt-get install lvm2 + # yum install lvm2 + + Some distributions include LVM by default. + + + + Start the LVM metadata service and configure it to start when the + system boots: + # systemctl enable lvm2-lvmetad.service +# systemctl start lvm2-lvmetad.service + + + Create the LVM physical volume /dev/sdb1: + # pvcreate /dev/sdb1 + Physical volume "/dev/sdb1" successfully created + + If your system uses a different device name, adjust these + steps accordingly. + + + + Create the LVM volume group + cinder-volumes: + # vgcreate cinder-volumes /dev/sdb1 + Volume group "cinder-volumes" successfully created + The Block Storage service creates logical volumes in this + volume group. + + + Only instances can access Block Storage volumes. However, the + underlying operating system manages the devices associated with + the volumes. By default, the LVM volume scanning tool scans the + /dev directory for block storage devices that + contain volumes. If tenants use LVM on their volumes, the scanning + tool detects these volumes and attempts to cache them which can cause + a variety of problems with both the underlying operating system + and tenant volumes. You must reconfigure LVM to scan only the devices + that contain the cinder-volume volume group. Edit + the /etc/lvm/lvm.conf file and complete the + following actions: + + + In the devices section, add a filter + that accepts the /dev/sdb device and rejects + all other devices: + devices { +... +filter = [ "a/sdb/", "r/.*/"] + Each item in the filter array begins with a + for accept or r for + reject and includes a regular expression + for the device name. The array must end with + r/.*/ to reject any remaining + devices. You can use the vgs -vvvv + command to test filters. + + If your storage nodes use LVM on the operating system disk, + you must also add the associated device to the filter. For + example, if the /dev/sda device contains + the operating system: + filter = [ "a/sda", "a/sdb/", "r/.*/"] + Similarly, if your compute nodes use LVM on the operating + system disk, you must also modify the filter in the + /etc/lvm/lvm.conf file on those nodes to + include only the operating system disk. For example, if the + /dev/sda device contains the operating + system: + filter = [ "a/sda", "r/.*/"] + + + + + + + Install and configure Block Storage volume components + + Install the packages: + # apt-get install cinder-volume python-mysqldb + # yum install openstack-cinder targetcli python-oslo-db MySQL-python + # zypper install openstack-cinder-volume tgt python-mysql + + + Edit the /etc/cinder/cinder.conf file + and complete the following actions: + + + In the [database] section, configure + database access: + [database] +... +connection = mysql://cinder:CINDER_DBPASS@controller/cinder + Replace CINDER_DBPASS with + the password you chose for the Block Storage database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the + password you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, + configure Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = cinder +admin_password = CINDER_PASS + Replace CINDER_PASS with the + password you chose for the cinder user in the + Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [DEFAULT] section, configure the + my_ip option: + [DEFAULT] +... +my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS + Replace + MANAGEMENT_INTERFACE_IP_ADDRESS with + the IP address of the management network interface on your + storage node, typically 10.0.0.41 for the first node in the + example + architecture. + + + In the [DEFAULT] section, configure the + location of the Image Service: + [DEFAULT] +... +glance_host = controller + + + In the [DEFAULT] section, configure Block + Storage to use the lioadm iSCSI + service: + [DEFAULT] +... +iscsi_helper = lioadm + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + + Install and configure Block Storage volume components + + Install the packages: + # apt-get install cinder-volume python-mysqldb + + + Respond to prompts for the volume group to associate with the + Block Storage service. The script scans for volume groups and + attempts to use the first one. If your system only contains the + cinder-volumes volume group, the script should + automatically choose it. + + + + To finalize installation + + Restart the Block Storage volume service including its + dependencies: + # service tgt restart +# service cinder-volume restart + + + Start the Block Storage volume service including its dependencies + and configure them to start when the system boots: + # systemctl enable openstack-cinder-volume.service target.service +# systemctl start openstack-cinder-volume.service target.service + On SLES: + # service tgtd start +# chkconfig tgtd on +# service openstack-cinder-volume start +# chkconfig openstack-cinder-volume on + On openSUSE: + # systemctl enable openstack-cinder-volume.service tgtd.service +# systemctl start openstack-cinder-volume.service tgtd.service + + + By default, the Ubuntu packages create an SQLite database. + Because this configuration uses a SQL database server, remove + the SQLite database file: + # rm -f /var/lib/cinder/cinder.sqlite + + +
diff --git a/doc/training-guides/basic-install-guide/section_cinder-verify.xml b/doc/training-guides/basic-install-guide/section_cinder-verify.xml index 8c538a26..76612396 100644 --- a/doc/training-guides/basic-install-guide/section_cinder-verify.xml +++ b/doc/training-guides/basic-install-guide/section_cinder-verify.xml @@ -4,32 +4,51 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="cinder-verify"> - Verify the Block Storage installation - To verify that the Block Storage is installed and configured properly, - create a new volume. + Verify operation + This section describes how to verify operation of the Block Storage + service by creating a volume. For more information about how to manage volumes, see the OpenStack User Guide. + >OpenStack User Guide.
+ + Perform these commands on the controller node. + - Source the demo-openrc.sh file: + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + List service components to verify successful launch of each + process: + $ cinder service-list ++------------------+------------+------+---------+-------+----------------------------+-----------------+ +| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | ++------------------+------------+------+---------+-------+----------------------------+-----------------+ +| cinder-scheduler | controller | nova | enabled | up | 2014-10-18T01:30:54.000000 | None | +| cinder-volume | block1 | nova | enabled | up | 2014-10-18T01:30:57.000000 | None | ++------------------+------------+------+---------+-------+----------------------------+-----------------+ + + + Source the demo tenant credentials to perform + the following steps as a non-administrative tenant: $ source demo-openrc.sh - - Use the cinder create command to create a new volume: - $ cinder create --display-name myVolume 1 + Create a 1 GB volume: + $ cinder create --display-name demo-volume1 1 +---------------------+--------------------------------------+ | Property | Value | +---------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | -| created_at | 2014-04-17T10:28:19.615050 | +| created_at | 2014-10-14T23:11:50.870239 | | display_description | None | -| display_name | myVolume | +| display_name | demo-volume1 | | encrypted | False | -| id | 5e691b7b-12e3-40b6-b714-7f17550db5d1 | +| id | 158bea89-07db-4ac2-8115-66c0d6a4bb48 | | metadata | {} | | size | 1 | | snapshot_id | None | @@ -39,18 +58,22 @@ +---------------------+--------------------------------------+ - Make sure that the volume has been correctly created with the - cinder list command: + Verify creation and availability of the volume: $ cinder list --------------------------------------+-----------+--------------+------+-------------+----------+-------------+ | ID | Status | Display Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ -| 5e691b7b-12e3-40b6-b714-7f17550db5d1 | available | myVolume | 1 | None | false | | +| 158bea89-07db-4ac2-8115-66c0d6a4bb48 | available | demo-volume1 | 1 | None | false | | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ - If the status value is not available, the volume - creation failed. Check the log files in the - /var/log/cinder/ directory on the controller and - volume nodes to get information about the failure. + If the status does not indicate available, + check the logs in the /var/log/cinder directory + on the controller and volume nodes for more information. + + The + launch an instance + chapter includes instructions for attaching this volume to an + instance. + diff --git a/doc/training-guides/basic-install-guide/section_dashboard-install.xml b/doc/training-guides/basic-install-guide/section_dashboard-install.xml index e1a77fa5..eb61410f 100644 --- a/doc/training-guides/basic-install-guide/section_dashboard-install.xml +++ b/doc/training-guides/basic-install-guide/section_dashboard-install.xml @@ -4,188 +4,140 @@ xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="install_dashboard"> - - Install the dashboard - Before you can install and configure the dashboard, meet the - requirements in . - - When you install only Object Storage and the Identity - Service, even if you install the dashboard, it does not - pull up projects and is unusable. - - For more information about how to deploy the dashboard, see - deployment topics in the developer - documentation. - - - Install the dashboard on the node that can contact - the Identity Service as root: - # apt-get install apache2 memcached libapache2-mod-wsgi openstack-dashboard - # yum install memcached python-memcached mod_wsgi openstack-dashboard - # zypper install memcached python-python-memcached apache2-mod_wsgi openstack-dashboard openstack-dashboard-test - - Note for Ubuntu users - - Remove the - openstack-dashboard-ubuntu-theme - package. This theme prevents translations, several - menus as well as the network map from rendering - correctly: - # apt-get remove --purge openstack-dashboard-ubuntu-theme - - - - Note for Debian users - To install the Apache package: - # apt-get install openstack-dashboard-apache - This command installs and configures Apache - correctly, provided that the user asks for it - during the debconf prompts. The - default SSL certificate is self-signed, and it is - probably wise to have it signed by a root - Certificate Authority (CA). - - - - Modify the value of - CACHES['default']['LOCATION'] - in /etc/openstack-dashboard/local_settings.py/etc/openstack-dashboard/local_settings/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py - to match the ones set in /etc/memcached.conf/etc/sysconfig/memcached. - Open /etc/openstack-dashboard/local_settings.py - /etc/openstack-dashboard/local_settings - and look for this line: - CACHES = { -'default': { -'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', -'LOCATION' : '127.0.0.1:11211' -} -} - - Notes - - - The address and port must match the ones - set in /etc/memcached.conf/etc/sysconfig/memcached. - If you change the memcached settings, - you must restart the Apache web server for - the changes to take effect. - - - You can use options other than memcached - option for session storage. Set the - session back-end through the - SESSION_ENGINE - option. - - - To change the timezone, use the - dashboard or edit the /etc/openstack-dashboard/local_settings/etc/openstack-dashboard/local_settings.py/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py - file. - Change the following parameter: - TIME_ZONE = "UTC" - - - - - - Update the ALLOWED_HOSTS in - local_settings.py to include - the addresses you wish to access the dashboard - from. - Edit /etc/openstack-dashboard/local_settings/etc/openstack-dashboard/local_settings.py/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py: - ALLOWED_HOSTS = ['localhost', 'my-desktop'] - - - - This guide assumes that you are running the - Dashboard on the controller node. You can easily run - the dashboard on a separate server, by changing the - appropriate settings in - local_settings.py. - Edit /etc/openstack-dashboard/local_settings/etc/openstack-dashboard/local_settings.py/srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py - and change OPENSTACK_HOST to the - hostname of your Identity Service: - OPENSTACK_HOST = "controller" - - - - Setup Apache configuration: - # cp /etc/apache2/conf.d/openstack-dashboard.conf.sample \ - /etc/apache2/conf.d/openstack-dashboard.conf + + Install and configure + This section describes how to install and configure the dashboard + on the controller node. + Before you proceed, verify that your system meets the requirements + in . Also, the dashboard + relies on functional core services including Identity, Image Service, + Compute, and either Networking (neutron) or legacy networking + (nova-network). Environments with stand-alone services such as Object + Storage cannot use the dashboard. For more information, see the + developer documentation. + + To install the dashboard components + + Install the packages: + # apt-get install openstack-dashboard apache2 libapache2-mod-wsgi memcached python-memcache + # yum install openstack-dashboard httpd mod_wsgi memcached python-memcached + # zypper install openstack-dashboard apache2-mod_wsgi memcached python-python-memcached \ + openstack-dashboard-test + + Ubuntu installs the + openstack-dashboard-ubuntu-theme package + as a dependency. Some users reported issues with this theme in + previous releases. If you encounter issues, remove this package + to restore the original OpenStack theme. + + + + + To install the dashboard components + + Install the packages: + # apt-get install openstack-dashboard-apache + + + Respond to prompts for web server configuration. + + The automatic configuration process generates a self-signed + SSL certificate. Consider obtaining an official certificate for + production environments. + + + + + To configure the dashboard + + Configure the web server: + # cp /etc/apache2/conf.d/openstack-dashboard.conf.sample \ + /etc/apache2/conf.d/openstack-dashboard.conf # a2enmod rewrite;a2enmod ssl;a2enmod wsgi - - - - By default, the - openstack-dashboard - package enables a database as session store. Before - you continue, either change the session store set up - as described in - or finish the setup of the database session store as - explained in . - - - Ensure that the SELinux policy of the system is - configured to allow network connections to the HTTP - server. - # setsebool -P httpd_can_network_connect on + + + Edit the + /etc/openstack-dashboard/local_settings.py + file and complete the following actions: + Edit the + /etc/openstack-dashboard/local_settings + file and complete the following actions: + Edit the + /srv/www/openstack-dashboard/openstack_dashboard/local/local_settings.py + file and complete the following actions: + + + Configure the dashboard to use OpenStack services on the + controller node: + OPENSTACK_HOST = "controller" + + + Allow all hosts to access the dashboard: + ALLOWED_HOSTS = ['*'] + + + Configure the memcached session + storage service: + CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + } +} + + Comment out any other session storage configuration. + + + By default, SLES and openSUSE use a SQL database for session + storage. For simplicity, we recommend changing the configuration + to use memcached for session + storage. + - Start the Apache web server and memcached: - # service apache2 start + Optionally, configure the time zone: + TIME_ZONE = "TIME_ZONE" + Replace TIME_ZONE with an + appropriate time zone identifier. For more information, see the + list of time zones. + + + + + + To finalize installation + + On RHEL and CentOS, configure SELinux to permit the web server + to connect to OpenStack services: + # setsebool -P httpd_can_network_connect on + + + Due to a packaging bug, the dashboard CSS fails to load properly. + Run the following command to resolve this issue: + # chown -R apache:apache /usr/share/openstack-dashboard/static + For more information, see the + bug report. + + + Restart the web server and session storage service: + # service apache2 restart +# service memcached restart + + + Start the web server and session storage service and configure + them to start when the system boots: + # systemctl enable httpd.service memcached.service +# systemctl start httpd.service memcached.service + On SLES: + # service apache2 start # service memcached start # chkconfig apache2 on # chkconfig memcached on - # service httpd start -# service memcached start -# chkconfig httpd on -# chkconfig memcached on - # service apache2 restart -# service memcached restart - - - You can now access the dashboard at http://controller/horizon - https://controller/ - http://controller/dashboard - http://controller. - Login with credentials for any user that you created - with the OpenStack Identity Service. - - + On openSUSE: + # systemctl enable apache2.service memcached.service +# systemctl start apache2.service memcached.service +
+ diff --git a/doc/training-guides/basic-install-guide/section_dashboard-verify.xml b/doc/training-guides/basic-install-guide/section_dashboard-verify.xml new file mode 100644 index 00000000..6214e125 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_dashboard-verify.xml @@ -0,0 +1,24 @@ + +
+ + Verify operation + This section describes how to verify operation of the + dashboard. + + + Access the dashboard using a web browser: + http://controller/horizon + https://controller/ + http://controller/dashboard + http://controller. + + + Authenticate using admin or + demo user credentials. + + +
diff --git a/doc/training-guides/basic-install-guide/section_debconf-api-endpoints.xml b/doc/training-guides/basic-install-guide/section_debconf-api-endpoints.xml index af4cbbed..805495b3 100644 --- a/doc/training-guides/basic-install-guide/section_debconf-api-endpoints.xml +++ b/doc/training-guides/basic-install-guide/section_debconf-api-endpoints.xml @@ -7,19 +7,19 @@ Register API endpoints All Debian packages for API services, except the heat-api package, register the service in the - Identity service catalog. This feature is helpful because API - endpoints can be difficult to remember. + Identity Service catalog. This feature is helpful because API + endpoints are difficult to remember. - The heat-common package, not the - heat-api package, configures the + The heat-common package and not the + heat-api package configures the Orchestration service. When you install a package for an API service, you are - prompted to register that service. After you install or + prompted to register that service. However, after you install or upgrade the package for an API service, Debian immediately removes your response to this prompt from the debconf database. Consequently, you are prompted to re-register the - service with the Identity service. If you already registered the + service with the Identity Service. If you already registered the API service, respond no when you upgrade. @@ -31,7 +31,7 @@ - This screen registers packages in the Identity service + This screen registers packages in the Identity Service catalog: @@ -42,8 +42,8 @@ - You are prompted for the Identity service - admin_token value. The Identity service uses + You are prompted for the Identity Service + admin_token value. The Identity Service uses this value to register the API service. When you set up the keystone package, this value is configured automatically. @@ -87,17 +87,17 @@ below commands for you: PKG_SERVICE_ID=$(pkgos_get_id keystone --os-token ${AUTH_TOKEN} \ --os-endpoint http://${KEYSTONE_ENDPOINT_IP}:35357/v2.0/ service-create \ - --name=${SERVICE_NAME} --type=${SERVICE_TYPE} --description="${SERVICE_DESC}") + --name ${SERVICE_NAME} --type ${SERVICE_TYPE} --description "${SERVICE_DESC}") keystone --os-token ${AUTH_TOKEN} \ --os-endpoint http://${KEYSTONE_ENDPOINT_IP}:35357/v2.0/ endpoint-create \ - --region "${REGION_NAME}" --service_id=${PKG_SERVICE_ID} \ - --publicurl=http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL} \ - --internalurl=http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL} \ - --adminurl=http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL}) + --region "${REGION_NAME}" --service_id ${PKG_SERVICE_ID} \ + --publicurl http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL} \ + --internalurl http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL} \ + --adminurl http://${PKG_ENDPOINT_IP}:${SERVICE_PORT}${SERVICE_URL}) The values of AUTH_TOKEN, KEYSTONE_ENDPOINT_IP, PKG_ENDPOINT_IP and REGION_NAME depend on the - answer you will provide to the debconf prompts. The values of SERVICE_NAME, + answer you will provide to the debconf prompts. But the values of SERVICE_NAME, SERVICE_TYPE, SERVICE_DESC and SERVICE_URL are already pre-wired in each package, so you don't have to remember them. diff --git a/doc/training-guides/basic-install-guide/section_debconf-concepts.xml b/doc/training-guides/basic-install-guide/section_debconf-concepts.xml index 7f401513..20562f2f 100644 --- a/doc/training-guides/basic-install-guide/section_debconf-concepts.xml +++ b/doc/training-guides/basic-install-guide/section_debconf-concepts.xml @@ -3,7 +3,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="debconf-general-principles"> + xml:id="debconf-concepts"> debconf concepts This chapter explains how to use the Debian Generally, this section looks like this: [keystone_authtoken] -auth_host = 127.0.0.1 -auth_port = 35357 -auth_protocol = http +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USER% admin_password = %SERVICE_PASSWORD% The debconf system helps users configure the - auth_host, admin_tenant_name, - admin_user and admin_password - options. + auth_uri, identity_uri, + admin_tenant_name, admin_user and + admin_password options. The following screens show an example Image Service configuration: diff --git a/doc/training-guides/basic-install-guide/section_debconf-preseeding.xml b/doc/training-guides/basic-install-guide/section_debconf-preseeding.xml index b78f0dfa..3a85b3a8 100644 --- a/doc/training-guides/basic-install-guide/section_debconf-preseeding.xml +++ b/doc/training-guides/basic-install-guide/section_debconf-preseeding.xml @@ -3,7 +3,7 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="debconf-preseeding"> + xml:id="debconf-preseed-prompts"> Pre-seed debconf prompts You can pre-seed all debconf prompts. To pre-seed means diff --git a/doc/training-guides/basic-install-guide/section_glance-install.xml b/doc/training-guides/basic-install-guide/section_glance-install.xml index a53f19c0..f5cec9a0 100644 --- a/doc/training-guides/basic-install-guide/section_glance-install.xml +++ b/doc/training-guides/basic-install-guide/section_glance-install.xml @@ -8,12 +8,6 @@ This section describes how to install and configure the Image Service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. - - This section assumes proper installation, configuration, and - operation of the Identity service as described in - and - . - To configure prerequisites Before you install and configure the Image Service, you must create @@ -28,19 +22,20 @@ Create the glance database: - mysql> CREATE DATABASE glance; + CREATE DATABASE glance; Grant proper access to the glance database: - mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS'; -mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS'; + GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ + IDENTIFIED BY 'GLANCE_DBPASS'; +GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ + IDENTIFIED BY 'GLANCE_DBPASS'; Replace GLANCE_DBPASS with a suitable password. - Exit the database access client: - mysql> exit + Exit the database access client. @@ -50,35 +45,67 @@ $ source admin-openrc.sh - To create the Identity service credentials, complete these steps: + To create the Identity service credentials, complete these + steps: Create the glance user: - $ keystone user-create --name=glance --pass=GLANCE_PASS --email=EMAIL_ADDRESS + $ keystone user-create --name glance --pass GLANCE_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | f89cca5865dc42b18e2421fa5f5cce66 | +| name | glance | +| username | glance | ++----------+----------------------------------+ Replace GLANCE_PASS with a suitable - password and EMAIL_ADDRESS with - a suitable e-mail address. + password. Link the glance user to the service tenant and admin role: - $ keystone user-role-add --user=glance --tenant=service --role=admin + $ keystone user-role-add --user glance --tenant service --role admin + + This command provides no output. + Create the glance service: - $ keystone service-create --name=glance --type=image \ - --description="OpenStack Image Service" + $ keystone service-create --name glance --type image \ + --description "OpenStack Image Service" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Image Service | +| enabled | True | +| id | 23f409c4e79f4c9e9d23d809c50fbacf | +| name | glance | +| type | image | ++-------------+----------------------------------+ Create the Identity service endpoints: $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ image / {print $2}') \ - --publicurl=http://controller:9292 \ - --internalurl=http://controller:9292 \ - --adminurl=http://controller:9292 + --service-id $(keystone service-list | awk '/ image / {print $2}') \ + --publicurl http://controller:9292 \ + --internalurl http://controller:9292 \ + --adminurl http://controller:9292 \ + --region regionOne ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| adminurl | http://controller:9292 | +| id | a2ee818c69cb475199a1ca108332eb35 | +| internalurl | http://controller:9292 | +| publicurl | http://controller:9292 | +| region | regionOne | +| service_id | 23f409c4e79f4c9e9d23d809c50fbacf | ++-------------+----------------------------------+ @@ -102,18 +129,6 @@ connection = mysql://glance:GLANCE_DBPASS@Replace GLANCE_DBPASS with the password you chose for the Image Service database. - - In the [DEFAULT] section, configure - RabbitMQ message broker access: - [DEFAULT] -... -rpc_backend = rabbit -rabbit_host = controller -rabbit_password = RABBIT_PASS - Replace RABBIT_PASS with the password - you chose for the guest account in - RabbitMQ. - In the [keystone_authtoken] and [paste_deploy] sections, configure Identity @@ -121,9 +136,7 @@ rabbit_password = RABBIT_PASS [keystone_authtoken] ... auth_uri = http://controller:5000/v2.0 -auth_host = controller -auth_port = 35357 -auth_protocol = http +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = glance admin_password = GLANCE_PASS @@ -134,6 +147,28 @@ flavor = keystone Replace GLANCE_PASS with the password you chose for the glance user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [glance_store] section, configure + the local file system store and location of image files: + [glance_store] +... +default_store = file +filesystem_store_datadir = /var/lib/glance/images/ + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True @@ -157,25 +192,36 @@ connection = mysql://glance:GLANCE_DBPASS@[keystone_authtoken] ... auth_uri = http://controller:5000/v2.0 -auth_host = controller -auth_port = 35357 -auth_protocol = http +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = glance admin_password = GLANCE_PASS -... + [paste_deploy] ... flavor = keystone Replace GLANCE_PASS with the password you chose for the glance user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True - Populate the Image Service - database: + Populate the Image Service database: # su -s /bin/sh -c "glance-manage db_sync" glance @@ -185,16 +231,6 @@ flavor = keystone Install the packages: # apt-get install glance python-glanceclient - - Respond to prompts for - database management, - Identity service - credentials, - service endpoint - registration, and - message broker - credentials. - Select the keystone pipeline to configure the Image Service to use the Identity service: @@ -217,16 +253,22 @@ flavor = keystone Start the Image Service services and configure them to start when the system boots: - # service openstack-glance-api start + # systemctl enable openstack-glance-api.service openstack-glance-registry.service +# systemctl start openstack-glance-api.service openstack-glance-registry.service + On SLES: + # service openstack-glance-api start # service openstack-glance-registry start # chkconfig openstack-glance-api on # chkconfig openstack-glance-registry on + On openSUSE: + # systemctl enable openstack-glance-api.service openstack-glance-registry.service +# systemctl start openstack-glance-api.service openstack-glance-registry.service By default, the Ubuntu packages create an SQLite database. Because this configuration uses a SQL database server, you can remove the SQLite database file: - # rm /var/lib/glance/glance.sqlite + # rm -f /var/lib/glance/glance.sqlite diff --git a/doc/training-guides/basic-install-guide/section_glance-verify.xml b/doc/training-guides/basic-install-guide/section_glance-verify.xml index 68b01337..307fe8a0 100644 --- a/doc/training-guides/basic-install-guide/section_glance-verify.xml +++ b/doc/training-guides/basic-install-guide/section_glance-verify.xml @@ -24,8 +24,8 @@ $ cd /tmp/images - Download the image to the local directory: - $ wget http://cdn.download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-disk.img + Download the image to the temporary local directory: + $ wget http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img Source the admin credentials to gain access to @@ -34,14 +34,15 @@ Upload the image to the Image Service: - $ glance image-create --name "cirros-0.3.2-x86_64" --file cirros-0.3.2-x86_64-disk.img \ + $ glance image-create --name "cirros-0.3.3-x86_64" --file cirros-0.3.3-x86_64-disk.img \ --disk-format qcow2 --container-format bare --is-public True --progress -+------------------+--------------------------------------+ +[=============================>] 100% ++------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ -| checksum | 64d7c1cd2b6f60c92c14662941cb7913 | +| checksum | 133eae9fb1c98f45894a4e60d8736619 | | container_format | bare | -| created_at | 2014-04-08T18:59:18 | +| created_at | 2014-10-10T13:14:42 | | deleted | False | | deleted_at | None | | disk_format | qcow2 | @@ -49,12 +50,13 @@ | is_public | True | | min_disk | 0 | | min_ram | 0 | -| name | cirros-0.3.2-x86_64 | -| owner | efa984b0a914450e9a47788ad330699d | +| name | cirros-0.3.3-x86_64 | +| owner | ea8c352d253443118041c9c8b8416040 | | protected | False | -| size | 13167616 | +| size | 13200896 | | status | active | -| updated_at | 2014-01-08T18:59:18 | +| updated_at | 2014-10-10T13:14:43 | +| virtual_size | None | +------------------+--------------------------------------+ For information about the parameters for the glance image-create command, see +--------------------------------------+---------------------+-------------+------------------+----------+--------+ | ID | Name | Disk Format | Container Format | Size | Status | +--------------------------------------+---------------------+-------------+------------------+----------+--------+ -| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.2-x86_64 | qcow2 | bare | 13167616 | active | +| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.3-x86_64 | qcow2 | bare | 13200896 | active | +--------------------------------------+---------------------+-------------+------------------+----------+--------+ diff --git a/doc/training-guides/basic-install-guide/section_heat-install.xml b/doc/training-guides/basic-install-guide/section_heat-install.xml index 672258c0..45bdce31 100644 --- a/doc/training-guides/basic-install-guide/section_heat-install.xml +++ b/doc/training-guides/basic-install-guide/section_heat-install.xml @@ -3,83 +3,158 @@ xmlns:xi="http://www.w3.org/2001/XInclude" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" - xml:id="heat-install"> + xml:id="heat-install-controller-node"> Install and configure Orchestration This section describes how to install and configure the - Orchestration module (heat) on the controller node. + Orchestration module, code-named heat, on the controller node. To configure prerequisites Before you install and configure Orchestration, you must create a database and Identity service credentials including endpoints. - Connect to the database server as the root user: - $ mysql -u root -p - Create the heat database: - mysql> CREATE DATABASE heat; - Grant the - proper access to the database: - mysql> GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' IDENTIFIED BY 'HEAT_DBPASS'; -mysql> GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' IDENTIFIED BY 'HEAT_DBPASS'; -mysql> exit - Replace HEAT_DBPASS with a suitable - password. + To create the database, complete these steps: + + + Use the database access client to connect to the database + server as the root user: + $ mysql -u root -p + + + Create the heat database: + CREATE DATABASE heat; + + + Grant proper access to the heat + database: + GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' \ + IDENTIFIED BY 'HEAT_DBPASS'; +GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' \ + IDENTIFIED BY 'HEAT_DBPASS'; + Replace HEAT_DBPASS with a suitable + password. + + + Exit the database access client. + + - Create Identity service credentials: + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + To create the Identity service credentials, complete these + steps: Create the heat user: - $ keystone user-create --name heat --pass HEAT_PASS --email EMAIL_ADDRESS + $ keystone user-create --name heat --pass HEAT_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | 7fd67878dcd04d0393469ef825a7e005 | +| name | heat | +| username | heat | ++----------+----------------------------------+ Replace HEAT_PASS with a suitable - password and EMAIL_ADDRESS with - a suitable e-mail address. + password. Link the heat user to the service tenant and admin role: $ keystone user-role-add --user heat --tenant service --role admin + + This command provides no output. + + + + Create the heat_stack_user and heat_stack_owner roles: + $ keystone role-create --name heat_stack_user +$ keystone role-create --name heat_stack_owner + By default, users created by Orchestration use the + heat_stack_user role. Create the heat and heat-cfn services: - $ keystone service-create --name=heat --type=orchestration \ - --description="Orchestration" -$ keystone service-create --name=heat-cfn --type=cloudformation \ - --description="Orchestration CloudFormation" + $ keystone service-create --name heat --type orchestration \ + --description "Orchestration" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | Orchestration | +| enabled | True | +| id | 031112165cad4c2bb23e84603957de29 | +| name | heat | +| type | orchestration | ++-------------+----------------------------------+ +$ keystone service-create --name heat-cfn --type cloudformation \ + --description "Orchestration" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | Orchestration | +| enabled | True | +| id | 297740d74c0a446bbff867acdccb33fa | +| name | heat-cfn | +| type | cloudformation | ++-------------+----------------------------------+ - Create the heat_stack_user and heat_stack_owner roles: - By default, users created by Orchestration use the role heat_stack_user. - $ keystone role-create --name heat_stack_user -$ keystone role-create --name heat_stack_owner + Create the Identity service endpoints: + $ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ orchestration / {print $2}') \ + --publicurl http://controller:8004/v1/%\(tenant_id\)s \ + --internalurl http://controller:8004/v1/%\(tenant_id\)s \ + --adminurl http://controller:8004/v1/%\(tenant_id\)s \ + --region regionOne ++-------------+-----------------------------------------+ +| Property | Value | ++-------------+-----------------------------------------+ +| adminurl | http://controller:8004/v1/%(tenant_id)s | +| id | f41225f665694b95a46448e8676b0dc2 | +| internalurl | http://controller:8004/v1/%(tenant_id)s | +| publicurl | http://controller:8004/v1/%(tenant_id)s | +| region | regionOne | +| service_id | 031112165cad4c2bb23e84603957de29 | ++-------------+-----------------------------------------+ +$ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ cloudformation / {print $2}') \ + --publicurl http://controller:8000/v1 \ + --internalurl http://controller:8000/v1 \ + --adminurl http://controller:8000/v1 \ + --region regionOne ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| adminurl | http://controller:8000/v1 | +| id | f41225f665694b95a46448e8676b0dc2 | +| internalurl | http://controller:8000/v1 | +| publicurl | http://controller:8000/v1 | +| region | regionOne | +| service_id | 297740d74c0a446bbff867acdccb33fa | ++-------------+----------------------------------+ - - Create the Identity service endpoints: - $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ orchestration / {print $2}') \ - --publicurl=http://controller:8004/v1/%\(tenant_id\)s \ - --internalurl=http://controller:8004/v1/%\(tenant_id\)s \ - --adminurl=http://controller:8004/v1/%\(tenant_id\)s -$ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ cloudformation / {print $2}') \ - --publicurl=http://controller:8000/v1 \ - --internalurl=http://controller:8000/v1 \ - --adminurl=http://controller:8000/v1 - To install and configure the Orchestration components Run the following commands to install the packages: - # apt-get install heat-api heat-api-cfn heat-engine - # yum install openstack-heat-api openstack-heat-engine openstack-heat-api-cfn - # zypper install openstack-heat-api openstack-heat-engine openstack-heat-api-cfn + # apt-get install heat-api heat-api-cfn heat-engine python-heatclient + # yum install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine \ + python-heatclient + # zypper install openstack-heat-api openstack-heat-api-cfn openstack-heat-engine \ + python-heatclient - Edit the /etc/heat/heat.conf file. + Edit the /etc/heat/heat.conf file and + complete the following actions: In the [database] section, configure @@ -87,19 +162,19 @@ [database] ... connection = mysql://heat:HEAT_DBPASS@controller/heat - Replace HEAT_DBPASS with the password - you chose for the Orchestration database. + Replace HEAT_DBPASS with the + password you chose for the Orchestration database. In the [DEFAULT] section, configure RabbitMQ message broker access: [DEFAULT] ... -rpc_backend = heat.openstack.common.rpc.impl_kombu +rpc_backend = rabbit rabbit_host = controller rabbit_password = RABBIT_PASS - Replace RABBIT_PASS with the password - you chose for the guest account in + Replace RABBIT_PASS with the + password you chose for the guest account in RabbitMQ. @@ -109,9 +184,7 @@ rabbit_password = RABBIT_PASS [keystone_authtoken] ... auth_uri = http://controller:5000/v2.0 -auth_host = controller -auth_port = 35357 -auth_protocol = http +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = heat admin_password = HEAT_PASS @@ -122,6 +195,12 @@ auth_uri = http://controller:5000/v2.0Replace HEAT_PASS with the password you chose for the heat user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + In the [DEFAULT] section, configure @@ -131,18 +210,17 @@ auth_uri = http://controller:5000/v2.0controller:8000 heat_waitcondition_server_url = http://controller:8000/v1/waitcondition - - Configure the log directory in the [DEFAULT] - section: - [DEFAULT] + + (Optional) To assist with troubleshooting, enable verbose + logging in the [DEFAULT] section: + [DEFAULT] ... -log_dir = /var/log/heat +verbose = True - Run the following command to populate the Orchestration - database: + Populate the Orchestration database: # su -s /bin/sh -c "heat-manage db_sync" heat @@ -150,7 +228,7 @@ log_dir = /var/log/heat To install and configure the Orchestration components Run the following commands to install the packages: - # apt-get install heat-api heat-api-cfn heat-engine + # apt-get install heat-api heat-api-cfn heat-engine python-heat-client Respond to prompts for @@ -163,16 +241,13 @@ log_dir = /var/log/heat credentials. - Respond to the debconf configuration - tool prompts. - - - Edit the /etc/heat/heat.conf file. + Edit the /etc/heat/heat.conf file and + complete the following actions: In the [ec2authtoken] section, configure - Identity service access for EC2 operations: - [ec2authtoken] + Identity service access: + [ec2authtoken] ... auth_uri = http://controller:5000/v2.0 @@ -187,21 +262,31 @@ auth_uri = http://controller:5000/v2.0# service heat-api-cfn restart # service heat-engine restart - + Start the Orchestration services and configure them to start when the system boots: - # service openstack-heat-api start + # systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service \ + openstack-heat-engine.service +# systemctl start openstack-heat-api.service openstack-heat-api-cfn.service \ + openstack-heat-engine.service + On SLES: + # service openstack-heat-api start # service openstack-heat-api-cfn start # service openstack-heat-engine start # chkconfig openstack-heat-api on # chkconfig openstack-heat-api-cfn on # chkconfig openstack-heat-engine on + On openSUSE: + # systemctl enable openstack-heat-api.service openstack-heat-api-cfn.service \ + openstack-heat-engine.service +# systemctl start openstack-heat-api.service openstack-heat-api-cfn.service \ + openstack-heat-engine.service By default, the Ubuntu packages create a SQLite database. Because this configuration uses a SQL database server, you can remove the SQLite database file: - # rm /var/lib/heat/heat.sqlite + # rm -f /var/lib/heat/heat.sqlite diff --git a/doc/training-guides/basic-install-guide/section_heat-verify.xml b/doc/training-guides/basic-install-guide/section_heat-verify.xml index 864c47bc..3f6aac02 100644 --- a/doc/training-guides/basic-install-guide/section_heat-verify.xml +++ b/doc/training-guides/basic-install-guide/section_heat-verify.xml @@ -28,7 +28,7 @@ stack from the template: $ NET_ID=$(nova net-list | awk '/ demo-net / { print $2 }') $ heat stack-create -f test-stack.yml \ - -P "ImageID=cirros-0.3.2-x86_64;NetID=$NET_ID" testStack + -P "ImageID=cirros-0.3.3-x86_64;NetID=$NET_ID" testStack +--------------------------------------+------------+--------------------+----------------------+ | id | stack_name | stack_status | creation_time | +--------------------------------------+------------+--------------------+----------------------+ diff --git a/doc/training-guides/basic-install-guide/section_keystone-install.xml b/doc/training-guides/basic-install-guide/section_keystone-install.xml index 6cf3fd80..12c4edb0 100644 --- a/doc/training-guides/basic-install-guide/section_keystone-install.xml +++ b/doc/training-guides/basic-install-guide/section_keystone-install.xml @@ -5,29 +5,41 @@ version="5.0" xml:id="keystone-install"> Install and configure - This section describes how to install and configure the - OpenStack Identity service on the controller node. + This section describes how to install and configure the OpenStack + Identity service on the controller node. To configure prerequisites - Before you configure the OpenStack Identity service, you - must create a database and an administration token. + Before you configure the OpenStack Identity service, you must create + a database and an administration token. - As the root user, connect to the - database to create the keystone database - and grant the proper access to it: - $ mysql -u root -p -mysql> CREATE DATABASE keystone; -mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ + To create the database, complete these steps: + + + Use the database access client to connect to the database + server as the root user: + $ mysql -u root -p + + + Create the keystone database: + CREATE DATABASE keystone; + + + Grant proper access to the keystone + database: + GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; -mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ - IDENTIFIED BY 'KEYSTONE_DBPASS'; -mysql> exit - Replace KEYSTONE_DBPASS with a - suitable password. +GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ + IDENTIFIED BY 'KEYSTONE_DBPASS'; + Replace KEYSTONE_DBPASS with a suitable password. + + + Exit the database access client. + + - Generate a random value to use as the administration token - during initial configuration: + Generate a random value to use as the administration token during + initial configuration: # openssl rand -hex 10 # openssl rand 10 | hexdump -e '1/1 "%.2x"' @@ -35,8 +47,8 @@ To configure prerequisites - Generate a random value to use as the administration token - during initial configuration: + Generate a random value to use as the administration token during + initial configuration: # openssl rand -hex 10 @@ -46,51 +58,57 @@ Run the following command to install the packages: # apt-get install keystone python-keystoneclient # yum install openstack-keystone python-keystoneclient - # zypper install openstack-keystone python-keystoneclient + # zypper install openstack-keystone python-keystoneclient - - Edit the /etc/keystone/keystone.conf - file. + + Edit the /etc/keystone/keystone.conf file and + complete the following actions: - + In the [DEFAULT] section, define the value of the initial administration token: [DEFAULT] ... admin_token = ADMIN_TOKEN - Replace ADMIN_TOKEN with the - random value that you generated in a previous step. + Replace ADMIN_TOKEN with the random + value that you generated in a previous step. - + In the [database] section, configure database access: [database] ... connection = mysql://keystone:KEYSTONE_DBPASS@controller/keystone - Replace KEYSTONE_DBPASS with - the password you chose for the database. + Replace KEYSTONE_DBPASS with the + password you chose for the database. - - In the [DEFAULT] section, configure the - log directory: + + In the [token] section, configure the UUID + token provider and SQL driver: + [token] +... +provider = keystone.token.providers.uuid.Provider +driver = keystone.token.persistence.backends.sql.Token + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] section: [DEFAULT] ... -log_dir = /var/log/keystone +verbose = True - By default, the Identity service uses public key - infrastructure (PKI). - Create generic certificates and keys and restrict access - to the associated files: + Create generic certificates and keys and restrict access to the + associated files: # keystone-manage pki_setup --keystone-user keystone --keystone-group keystone +# chown -R keystone:keystone /var/log/keystone # chown -R keystone:keystone /etc/keystone/ssl # chmod -R o-rwx /etc/keystone/ssl - Run the following command to populate the Identity service - database: + Populate the Identity service database: # su -s /bin/sh -c "keystone-manage db_sync" keystone @@ -101,34 +119,28 @@ log_dir = /var/log/keystone # apt-get install keystone python-keystoneclient - Respond to prompts for database - management. + Respond to prompts for Configure the initial administration token: - + - Use the random value that you generated in a previous step. If - you install using non-interactive mode or you do not specify this - token, the configuration tool generates a random value. + Use the random value that you generated in a previous step. If you + install using non-interactive mode or you do not specify this token, + the configuration tool generates a random value. - Create the admin tenant and - user: + Create the admin tenant and user: + fileref="figures/debconf-screenshots/keystone_2_register_admin_tenant_yes_no.png"/> @@ -136,8 +148,7 @@ log_dir = /var/log/keystone + fileref="figures/debconf-screenshots/keystone_3_admin_user_name.png"/> @@ -145,8 +156,7 @@ log_dir = /var/log/keystone + fileref="figures/debconf-screenshots/keystone_4_admin_user_email.png"/> @@ -154,8 +164,7 @@ log_dir = /var/log/keystone + fileref="figures/debconf-screenshots/keystone_5_admin_user_pass.png"/> @@ -163,20 +172,18 @@ log_dir = /var/log/keystone + fileref="figures/debconf-screenshots/keystone_6_admin_user_pass_confirm.png"/> - Register the Identity service in the catalog: + Create the Identity service endpoints: + fileref="figures/debconf-screenshots/keystone_7_register_endpoint.png"/> @@ -189,40 +196,40 @@ log_dir = /var/log/keystone # service keystone restart - Start the Identity service and configure it to start when - the system boots: - # service openstack-keystone start + Start the Identity service and configure it to start when the + system boots: + # systemctl enable openstack-keystone.service +# systemctl start openstack-keystone.service + On SLES: + # service openstack-keystone start # chkconfig openstack-keystone on + On openSUSE: + # systemctl enable openstack-keystone.service +# systemctl start openstack-keystone.service - By default, the Ubuntu packages create a SQLite - database. - Because this configuration uses a SQL database server, you - can remove the SQLite database file: - # rm /var/lib/keystone/keystone.db + By default, the Ubuntu packages create a SQLite database. + Because this configuration uses a SQL database server, you can + remove the SQLite database file: + # rm -f /var/lib/keystone/keystone.db - By default, the Identity service stores expired tokens in - the database indefinitely. The accumulation of expired tokens - considerably increases the database size and might degrade - service performance, particularly in test environments with - limited resources. - We recommend that you use cron to configure a periodic task that purges - expired tokens hourly. - Run the following command to purge expired tokens every - hour and log the output to the - /var/log/keystone/keystone-tokenflush.log - file: + By default, the Identity service stores expired tokens in the + database indefinitely. The accumulation of expired tokens considerably + increases the database size and might degrade service performance, + particularly in environments with limited resources. + We recommend that you use + cron to configure a periodic + task that purges expired tokens hourly: # (crontab -l -u keystone 2>&1 | grep -q token_flush) || \ -echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ ->> /var/spool/cron/crontabs/keystone + echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ + >> /var/spool/cron/crontabs/keystone # (crontab -l -u keystone 2>&1 | grep -q token_flush) || \ -echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ ->> /var/spool/cron/keystone + echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ + >> /var/spool/cron/keystone # (crontab -l -u keystone 2>&1 | grep -q token_flush) || \ -echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ ->> /var/spool/cron/tabs/keystone + echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' \ + >> /var/spool/cron/tabs/keystone diff --git a/doc/training-guides/basic-install-guide/section_keystone-openrc.xml b/doc/training-guides/basic-install-guide/section_keystone-openrc.xml new file mode 100644 index 00000000..7c2b67cd --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_keystone-openrc.xml @@ -0,0 +1,52 @@ + +
+ Create OpenStack client environment scripts + The previous section used a combination of environment variables and + command options to interact with the Identity service via the + keystone client. To increase efficiency of client + operations, OpenStack supports simple client environment scripts also + known as OpenRC files. These scripts typically contain common options for + all clients, but also support unique options. For more information, see the + OpenStack User Guide. + + To create the scripts + Create client environment scripts for the admin + and demo tenants and users. Future portions of this + guide reference these scripts to load appropriate credentials for client + operations. + + Edit the admin-openrc.sh file and add the + following content: + export OS_TENANT_NAME=admin +export OS_USERNAME=admin +export OS_PASSWORD=ADMIN_PASS +export OS_AUTH_URL=http://controller:35357/v2.0 + Replace ADMIN_PASS with the password you chose + for the admin user in the Identity service. + + + Edit the demo-openrc.sh file and add the + following content: + export OS_TENANT_NAME=demo +export OS_USERNAME=demo +export OS_PASSWORD=DEMO_PASS +export OS_AUTH_URL=http://controller:5000/v2.0 + Replace DEMO_PASS with the password you chose + for the demo user in the Identity service. + + + + To load client environment scripts + + To run clients as a certain tenant and user, you can simply load + the associated client environment script prior to running them. For + example, to load the location of the Identity service and + admin tenant and user credentials: + $ source admin-openrc.sh + + +
diff --git a/doc/training-guides/basic-install-guide/section_keystone-services.xml b/doc/training-guides/basic-install-guide/section_keystone-services.xml index 01f83db1..0b26041a 100644 --- a/doc/training-guides/basic-install-guide/section_keystone-services.xml +++ b/doc/training-guides/basic-install-guide/section_keystone-services.xml @@ -25,14 +25,15 @@ services in your environment. Create the service entity for the Identity service: $ keystone service-create --name keystone --type identity \ - --description="OpenStack Identity" + --description "OpenStack Identity" +-------------+----------------------------------+ | Property | Value | +-------------+----------------------------------+ | description | OpenStack Identity | -| id | 15c11a23667e427e91bc31335b45f4bd | -| name | keystone | -| type | identity | +| enabled | True | +| id | 15c11a23667e427e91bc31335b45f4bd | +| name | keystone | +| type | identity | +-------------+----------------------------------+ Because OpenStack generates IDs dynamically, you will see @@ -47,23 +48,26 @@ OpenStack provides three API endpoint variations for each service: admin, internal, and public. In a production environment, the variants might reside on separate networks that service different types of users - for security reasons. For simplicity, this configuration uses the - management network for all variations. + for security reasons. Also, OpenStack supports multiple regions for + scalability. For simplicity, this configuration uses the management + network for all endpoint variations and the + regionOne region. Create the API endpoint for the Identity service: $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ identity / {print $2}') \ - --publicurl=http://controller:5000/v2.0 \ - --internalurl=http://controller:5000/v2.0 \ - --adminurl=http://controller:35357/v2.0 + --service-id $(keystone service-list | awk '/ identity / {print $2}') \ + --publicurl http://controller:5000/v2.0 \ + --internalurl http://controller:5000/v2.0 \ + --adminurl http://controller:35357/v2.0 \ + --region regionOne +-------------+----------------------------------+ | Property | Value | +-------------+----------------------------------+ -| adminurl | http://controller:35357/v2.0 | -| id | 11f9c625a3b94a3f8e66bf4e5de2679f | +| adminurl | http://controller:35357/v2.0 | +| id | 11f9c625a3b94a3f8e66bf4e5de2679f | | internalurl | http://controller:5000/v2.0 | -| publicurl | http://controller:5000/v2.0 | -| region | regionOne | -| service_id | 15c11a23667e427e91bc31335b45f4bd | +| publicurl | http://controller:5000/v2.0 | +| region | regionOne | +| service_id | 15c11a23667e427e91bc31335b45f4bd | +-------------+----------------------------------+ This command references the ID of the service that you created diff --git a/doc/training-guides/basic-install-guide/section_keystone-users.xml b/doc/training-guides/basic-install-guide/section_keystone-users.xml index 8d97b8cf..0238df3f 100644 --- a/doc/training-guides/basic-install-guide/section_keystone-users.xml +++ b/doc/training-guides/basic-install-guide/section_keystone-users.xml @@ -15,11 +15,11 @@ (endpoint) of the Identity service before you run keystone commands. You can pass the value of the administration token to the - keystone command with the + keystone command with the --os-token option or set the temporary OS_SERVICE_TOKEN environment variable. Similarly, you can pass the location of the Identity service to the keystone command with the - option or set the temporary + --os-endpoint option or set the temporary OS_SERVICE_ENDPOINT environment variable. This guide uses environment variables to reduce command length. For more information, see the @@ -96,12 +96,18 @@ - By default, the Identity service creates a special - _member_ role. The OpenStack dashboard - automatically grants access to users with this role. You must - give the admin user access to this role in - addition to the admin role. - + By default, the dashboard limits access to users with the + _member_ role. + Create the _member_ role: + $ keystone role-create --name _member_ ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| id | 0f198e94ffce416cbcbe344e1843eac8 | +| name | _member_ | ++----------+----------------------------------+ + + Add the admin tenant and user to the _member_ role: $ keystone user-role-add --tenant admin --user admin --role _member_ diff --git a/doc/training-guides/basic-install-guide/section_keystone-verify.xml b/doc/training-guides/basic-install-guide/section_keystone-verify.xml index 91596440..21273803 100644 --- a/doc/training-guides/basic-install-guide/section_keystone-verify.xml +++ b/doc/training-guides/basic-install-guide/section_keystone-verify.xml @@ -87,12 +87,18 @@ As the demo tenant and user, request an authentication token: $ keystone --os-tenant-name demo --os-username demo --os-password DEMO_PASS \ - --os-auth-url http://controller:35357/v2.0 token-get + --os-auth-url http://controller:35357/v2.0 token-get ++-----------+----------------------------------+ +| Property | Value | ++-----------+----------------------------------+ +| expires | 2014-10-10T12:51:33Z | +| id | 1b87ceae9e08411ba4a16e4dada04802 | +| tenant_id | 4aa51bb942be4dd0ac0555d7591f80a6 | +| user_id | 7004dfa0dda84d63aef81cf7f100af01 | ++-----------+----------------------------------+ Replace DEMO_PASS with the password you chose for the demo user in the Identity service. - Lengthy output that includes a token value verifies operation - for the demo tenant and user. As the demo tenant and user, attempt to list diff --git a/doc/training-guides/basic-install-guide/section_launch-instance-neutron.xml b/doc/training-guides/basic-install-guide/section_launch-instance-neutron.xml index be59c28c..39711b51 100644 --- a/doc/training-guides/basic-install-guide/section_launch-instance-neutron.xml +++ b/doc/training-guides/basic-install-guide/section_launch-instance-neutron.xml @@ -6,18 +6,18 @@ xml:id="launch-instance-neutron"> Launch an instance with OpenStack Networking (neutron) - To generate a keypair + To generate a key pair Most cloud images support public key authentication rather than conventional user name/password authentication. Before launching an instance, you must - generate a public/private keypair using ssh-keygen + generate a public/private key pair using ssh-keygen and add the public key to your OpenStack environment. Source the demo tenant credentials: $ source demo-openrc.sh - Generate a keypair: + Generate a key pair: $ ssh-keygen @@ -67,10 +67,10 @@ +--------------------------------------+---------------------+--------+--------+ | ID | Name | Status | Server | +--------------------------------------+---------------------+--------+--------+ -| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.2-x86_64 | ACTIVE | | +| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.3-x86_64 | ACTIVE | | +--------------------------------------+---------------------+--------+--------+ Your first instance uses the - cirros-0.3.2-x86_64 image. + cirros-0.3.3-x86_64 image. List available networks: @@ -97,14 +97,13 @@ group. By default, this security group implements a firewall that blocks remote access to instances. If you would like to permit remote access to your instance, launch it and then - - configure remote access. + configure remote access. Launch the instance: Replace DEMO_NET_ID with the ID of the demo-net tenant network. - $ nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-id=DEMO_NET_ID \ + $ nova boot --flavor m1.tiny --image cirros-0.3.3-x86_64 --nic net-id=DEMO_NET_ID \ --security-group default --key-name demo-key demo-instance1 +--------------------------------------+------------------------------------------------------------+ | Property | Value | @@ -124,7 +123,7 @@ | flavor | m1.tiny (1) | | hostId | | | id | 05682b91-81a1-464c-8f40-8b3da7ee92c5 | -| image | cirros-0.3.2-x86_64 (acafc7c0-40aa-4026-9673-b879898e1fc2) | +| image | cirros-0.3.3-x86_64 (acafc7c0-40aa-4026-9673-b879898e1fc2) | | key_name | demo-key | | metadata | {} | | name | demo-instance1 | @@ -279,16 +278,90 @@ Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '203.0.113.102' (RSA) to the list of known hosts. $ - If your host does not contain the public/private keypair created + If your host does not contain the public/private key pair created in an earlier step, SSH prompts for the default password associated with the cirros user. + + To attach a Block Storage volume to your instance + If your environment includes the Block Storage service, you can + attach a volume to the instance. + + Source the demo tenant credentials: + $ source demo-openrc.sh + + + List volumes: + $ nova volume-list ++--------------------------------------+-----------+--------------+------+-------------+-------------+ +| ID | Status | Display Name | Size | Volume Type | Attached to | ++--------------------------------------+-----------+--------------+------+-------------+-------------+ +| 158bea89-07db-4ac2-8115-66c0d6a4bb48 | available | demo-volume1 | 1 | None | | ++--------------------------------------+-----------+--------------+------+-------------+-------------+ + + + Attach the demo-volume1 volume to + the demo-instance1 instance: + $ nova volume-attach demo-instance1 158bea89-07db-4ac2-8115-66c0d6a4bb48 ++----------+--------------------------------------+ +| Property | Value | ++----------+--------------------------------------+ +| device | /dev/vdb | +| id | 158bea89-07db-4ac2-8115-66c0d6a4bb48 | +| serverId | 05682b91-81a1-464c-8f40-8b3da7ee92c5 | +| volumeId | 158bea89-07db-4ac2-8115-66c0d6a4bb48 | ++----------+--------------------------------------+ + + You must reference volumes using the IDs instead of + names. + + + + List volumes: + $ nova volume-list ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ +| ID | Status | Display Name | Size | Volume Type | Attached to | ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ +| 158bea89-07db-4ac2-8115-66c0d6a4bb48 | in-use | demo-volume1 | 1 | None | 05682b91-81a1-464c-8f40-8b3da7ee92c5 | ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ + The demo-volume1 volume status should indicate + in-use by the ID of the + demo-instance1 instance. + + + Access your instance using SSH from the controller node or any + host on the external network and use the fdisk + command to verify presence of the volume as the + /dev/vdb block storage device: + $ ssh cirros@203.0.113.102 +$ sudo fdisk -l + +Disk /dev/vda: 1073 MB, 1073741824 bytes +255 heads, 63 sectors/track, 130 cylinders, total 2097152 sectors +Units = sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 512 bytes / 512 bytes +Disk identifier: 0x00000000 + + Device Boot Start End Blocks Id System +/dev/vda1 * 16065 2088449 1036192+ 83 Linux + +Disk /dev/vdb: 1073 MB, 1073741824 bytes +16 heads, 63 sectors/track, 2080 cylinders, total 2097152 sectors +Units = sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 512 bytes / 512 bytes +Disk identifier: 0x00000000 + +Disk /dev/vdb doesn't contain a valid partition table + + You must create a partition table and file system to use + the volume. + + + If your instance does not launch or seem to work as you expect, see the - - OpenStack Operations Guide for more - information or use one of the - many other options to seek assistance. We want your environment to work! diff --git a/doc/training-guides/basic-install-guide/section_launch-instance-nova.xml b/doc/training-guides/basic-install-guide/section_launch-instance-nova.xml index e1ead991..9ea79c22 100644 --- a/doc/training-guides/basic-install-guide/section_launch-instance-nova.xml +++ b/doc/training-guides/basic-install-guide/section_launch-instance-nova.xml @@ -6,18 +6,18 @@ xml:id="launch-instance-nova"> Launch an instance with legacy networking (nova-network) - To generate a keypair + To generate a key pair Most cloud images support public key authentication rather than conventional user name/password authentication. Before launching an instance, you must - generate a public/private keypair using ssh-keygen + generate a public/private key pair using ssh-keygen and add the public key to your OpenStack environment. Source the demo tenant credentials: $ source demo-openrc.sh - Generate a keypair: + Generate a key pair: $ ssh-keygen @@ -67,10 +67,10 @@ +--------------------------------------+---------------------+--------+--------+ | ID | Name | Status | Server | +--------------------------------------+---------------------+--------+--------+ -| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.2-x86_64 | ACTIVE | | +| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.3-x86_64 | ACTIVE | | +--------------------------------------+---------------------+--------+--------+ Your first instance uses the - cirros-0.3.2-x86_64 image. + cirros-0.3.3-x86_64 image. List available networks: @@ -102,14 +102,13 @@ group. By default, this security group implements a firewall that blocks remote access to instances. If you would like to permit remote access to your instance, launch it and then - - configure remote access. + configure remote access. Launch the instance: Replace DEMO_NET_ID with the ID of the demo-net tenant network. - $ nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-id=DEMO_NET_ID \ + $ nova boot --flavor m1.tiny --image cirros-0.3.3-x86_64 --nic net-id=DEMO_NET_ID \ --security-group default --key-name demo-key demo-instance1 +--------------------------------------+------------------------------------------------------------+ | Property | Value | @@ -129,7 +128,7 @@ | flavor | m1.tiny (1) | | hostId | | | id | 45ea195c-c469-43eb-83db-1a663bbad2fc | -| image | cirros-0.3.2-x86_64 (acafc7c0-40aa-4026-9673-b879898e1fc2) | +| image | cirros-0.3.3-x86_64 (acafc7c0-40aa-4026-9673-b879898e1fc2) | | key_name | demo-key | | metadata | {} | | name | demo-instance1 | @@ -238,16 +237,92 @@ Are you sure you want to continue connecting (yes/no)? yes Warning: Permanently added '203.0.113.26' (RSA) to the list of known hosts. $ - If your host does not contain the public/private keypair created + If your host does not contain the public/private key pair created in an earlier step, SSH prompts for the default password associated with the cirros user. + + To attach a Block Storage volume to your instance + If your environment includes the Block Storage service, you can + attach a volume to the instance. + + Source the demo tenant credentials: + $ source demo-openrc.sh + + + List volumes: + $ nova volume-list ++--------------------------------------+-----------+--------------+------+-------------+-------------+ +| ID | Status | Display Name | Size | Volume Type | Attached to | ++--------------------------------------+-----------+--------------+------+-------------+-------------+ +| 158bea89-07db-4ac2-8115-66c0d6a4bb48 | available | demo-volume1 | 1 | None | | ++--------------------------------------+-----------+--------------+------+-------------+-------------+ + + + Attach the demo-volume1 volume to + the demo-instance1 instance: + $ nova volume-attach demo-instance1 158bea89-07db-4ac2-8115-66c0d6a4bb48 ++----------+--------------------------------------+ +| Property | Value | ++----------+--------------------------------------+ +| device | /dev/vdb | +| id | 158bea89-07db-4ac2-8115-66c0d6a4bb48 | +| serverId | 45ea195c-c469-43eb-83db-1a663bbad2fc | +| volumeId | 158bea89-07db-4ac2-8115-66c0d6a4bb48 | ++----------+--------------------------------------+ + + You must reference volumes using the IDs instead of + names. + + + + List volumes: + $ nova volume-list ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ +| ID | Status | Display Name | Size | Volume Type | Attached to | ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ +| 158bea89-07db-4ac2-8115-66c0d6a4bb48 | in-use | demo-volume1 | 1 | None | 45ea195c-c469-43eb-83db-1a663bbad2fc | ++--------------------------------------+-----------+--------------+------+-------------+--------------------------------------+ + The demo-volume1 volume status should indicate + in-use by the ID of the + demo-instance1 instance. + + + Access your instance using SSH from the controller node or any + host on the external network and use the fdisk + command to verify presence of the volume as the + /dev/vdb block storage device: + $ ssh cirros@203.0.113.102 +$ sudo fdisk -l + +Disk /dev/vda: 1073 MB, 1073741824 bytes +255 heads, 63 sectors/track, 130 cylinders, total 2097152 sectors +Units = sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 512 bytes / 512 bytes +Disk identifier: 0x00000000 + + Device Boot Start End Blocks Id System +/dev/vda1 * 16065 2088449 1036192+ 83 Linux + +Disk /dev/vdb: 1073 MB, 1073741824 bytes +16 heads, 63 sectors/track, 2080 cylinders, total 2097152 sectors +Units = sectors of 1 * 512 = 512 bytes +Sector size (logical/physical): 512 bytes / 512 bytes +I/O size (minimum/optimal): 512 bytes / 512 bytes +Disk identifier: 0x00000000 + +Disk /dev/vdb doesn't contain a valid partition table + + You must create a partition table and file system to use + the volume. + + + If your instance does not launch or seem to work as you expect, see the - - OpenStack Operations Guide for more + OpenStack Operations Guide for more information or use one of the - many other options to seek assistance. We want your environment to work! diff --git a/doc/training-guides/basic-install-guide/section_neutron-compute-node.xml b/doc/training-guides/basic-install-guide/section_neutron-compute-node.xml new file mode 100644 index 00000000..94bdb359 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_neutron-compute-node.xml @@ -0,0 +1,334 @@ + +
+ Install and configure compute node + The compute node handles connectivity and + security groups + for instances. + + To configure prerequisites + Before you install and configure OpenStack Networking, you + must configure certain kernel networking parameters. + + Edit the /etc/sysctl.conf file to + contain the following parameters: + net.ipv4.conf.all.rp_filter=0 +net.ipv4.conf.default.rp_filter=0 + + + Implement the changes: + # sysctl -p + + + + To install the Networking components + + # apt-get install neutron-plugin-ml2 neutron-plugin-openvswitch-agent + # yum install openstack-neutron-ml2 openstack-neutron-openvswitch + # zypper install --no-recommends openstack-neutron-openvswitch-agent ipset + + SUSE does not use a separate ML2 plug-in package. + + + + + To install and configure the Networking components + + # apt-get install neutron-plugin-openvswitch-agent openvswitch-datapath-dkms + + Debian does not use a separate ML2 plug-in package. + + + + Select the ML2 plug-in: + + + + + + + + + Selecting the ML2 plug-in also populates the + and + options in the + /etc/neutron/neutron.conf file with the + appropriate values. + + + + + To configure the Networking common components + The Networking common component configuration includes the + authentication mechanism, message broker, and plug-in. + + Edit the /etc/neutron/neutron.conf file + and complete the following actions: + + + In the [database] section, comment out + any connection options because compute nodes + do not directly access the database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the + password you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, + configure Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose or the neutron user in the + Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [DEFAULT] section, enable the + Modular Layer 2 (ML2) plug-in, router service, and overlapping + IP addresses: + [DEFAULT] +... +core_plugin = ml2 +service_plugins = router +allow_overlapping_ips = True + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + + To configure the Modular Layer 2 (ML2) plug-in + The ML2 plug-in uses the Open vSwitch (OVS) mechanism (agent) to + build the virtual networking framework for instances. + + Edit the + /etc/neutron/plugins/ml2/ml2_conf.ini + file and complete the following actions: + + + In the [ml2] section, enable the + flat and + generic routing encapsulation (GRE) + network type drivers, GRE tenant networks, and the OVS + mechanism driver: + [ml2] +... +type_drivers = flat,gre +tenant_network_types = gre +mechanism_drivers = openvswitch + + + In the [ml2_type_gre] section, configure + the tunnel identifier (id) range: + [ml2_type_gre] +... +tunnel_id_ranges = 1:1000 + + + In the [securitygroup] section, enable + security groups, enable ipset, and + configure the OVS iptables firewall + driver: + [securitygroup] +... +enable_security_group = True +enable_ipset = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + + + In the [ovs] section, configure the + Open vSwitch (OVS) agent: + [ovs] +... +local_ip = INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS +tunnel_type = gre +enable_tunneling = True + Replace + INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS + with the IP address of the instance tunnels network interface + on your compute node. + + + + + + To configure the Open vSwitch (OVS) service + The OVS service provides the underlying virtual networking framework + for instances. + + Start the OVS service and configure it to start when the + system boots: + # systemctl enable openvswitch.service +# systemctl start openvswitch.service + On SLES: + # service openvswitch-switch start +# chkconfig openvswitch-switch on + On openSUSE: + # systemctl enable openvswitch.service +# systemctl start openvswitch.service + + + Restart the OVS service: + # service openvswitch-switch restart + + + + To configure Compute to use Networking + By default, distribution packages configure Compute to use + legacy networking. You must reconfigure Compute to manage + networks through Networking. + + Edit the /etc/nova/nova.conf file and + complete the following actions: + + + In the [DEFAULT] section, configure + the APIs and drivers: + [DEFAULT] +... +network_api_class = nova.network.neutronv2.api.API +security_group_api = neutron +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver + + By default, Compute uses an internal firewall service. + Since Networking includes a firewall service, you must + disable the Compute firewall service by using the + nova.virt.firewall.NoopFirewallDriver + firewall driver. + + + + In the [neutron] section, configure + access parameters: + [neutron] +... +url = http://controller:9696 +auth_strategy = keystone +admin_auth_url = http://controller:35357/v2.0 +admin_tenant_name = service +admin_username = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose for the neutron user + in the Identity service. + + + + + + To finalize the installation + + The Networking service initialization scripts expect a + symbolic link /etc/neutron/plugin.ini + pointing to the ML2 plug-in configuration file, + /etc/neutron/plugins/ml2/ml2_conf.ini. + If this symbolic link does not exist, create it using the + following command: + # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini + Due to a packaging bug, the Open vSwitch agent initialization + script explicitly looks for the Open vSwitch plug-in configuration + file rather than a symbolic link + /etc/neutron/plugin.ini pointing to the ML2 + plug-in configuration file. Run the following commands to resolve this + issue: + # cp /usr/lib/systemd/system/neutron-openvswitch-agent.service \ + /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig +# sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' \ + /usr/lib/systemd/system/neutron-openvswitch-agent.service + + + The Networking service initialization scripts expect the + variable NEUTRON_PLUGIN_CONF in the + /etc/sysconfig/neutron file to + reference the ML2 plug-in configuration file. Edit the + /etc/sysconfig/neutron file and add the + following: + NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini" + + + Restart the Compute service: + # systemctl restart openstack-nova-compute.service + On SLES: + # service openstack-nova-compute restart + On openSUSE: + # systemctl restart openstack-nova-compute.service + # service nova-compute restart + + + Start the Open vSwitch (OVS) agent and configure it to + start when the system boots: + # systemctl enable neutron-openvswitch-agent.service +# systemctl start neutron-openvswitch-agent.service + On SLES: + # service openstack-neutron-openvswitch-agent start +# chkconfig openstack-neutron-openvswitch-agent on + On openSUSE: + # systemctl enable openstack-neutron-openvswitch-agent.service +# systemctl start openstack-neutron-openvswitch-agent.service + + + Restart the Open vSwitch (OVS) agent: + # service neutron-plugin-openvswitch-agent restart + + + + Verify operation + + Perform these commands on the controller node. + + + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + List agents to verify successful launch of the + neutron agents: + $ neutron agent-list ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ +| id | agent_type | host | alive | admin_state_up | binary | ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ +... +| a5a49051-05eb-4b4f-bfc7-d36235fe9131 | Open vSwitch agent | compute1 | :-) | True | neutron-openvswitch-agent | ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ + + +
diff --git a/doc/training-guides/basic-install-guide/section_neutron-concepts.xml b/doc/training-guides/basic-install-guide/section_neutron-concepts.xml index 01f1f9e7..828f094b 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-concepts.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-concepts.xml @@ -22,13 +22,13 @@ many interfaces connected to subnets. Subnets can access machines on other subnets connected to the same router. Any given Networking set up has at least one external network. - This network, unlike the other networks, is not merely a virtually - defined network. Instead, it represents the view into a slice of - the external network that is accessible outside the OpenStack - installation. IP addresses on the Networking external network are + Unlike the other networks, the external network is not merely a + virtually defined network. Instead, it represents a view into a + slice of the physical, external network accessible outside the + OpenStack installation. IP addresses on the external network are accessible by anybody physically on the outside network. Because - this network merely represents a slice of the outside network, - DHCP is disabled on this network. + the external network merely represents a view into the outside + network, DHCP is disabled on this network. In addition to external networks, any Networking set up has one or more internal networks. These software-defined networks connect directly to the VMs. Only the VMs on any given internal @@ -54,10 +54,10 @@ security groups to block or unblock ports, port ranges, or traffic types for that VM. Each plug-in that Networking uses has its own concepts. While - not vital to operating Networking, understanding these concepts - can help you set up Networking. All Networking installations use a - core plug-in and a security group plug-in (or just the No-Op - security group plug-in). Additionally, Firewall-as-a-Service - (FWaaS) and Load-Balancer-as-a-Service (LBaaS) plug-ins are - available. + not vital to operating the VNI and OpenStack environment, + understanding these concepts can help you set up Networking. + All Networking installations use a core plug-in and a security group + plug-in (or just the No-Op security group plug-in). Additionally, + Firewall-as-a-Service (FWaaS) and Load-Balancer-as-a-Service (LBaaS) + plug-ins are available. diff --git a/doc/training-guides/basic-install-guide/section_neutron-controller-node.xml b/doc/training-guides/basic-install-guide/section_neutron-controller-node.xml new file mode 100644 index 00000000..b9baf208 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_neutron-controller-node.xml @@ -0,0 +1,448 @@ + +
+ Install and configure controller node + + To configure prerequisites + Before you configure OpenStack Networking (neutron), you must create + a database and Identity service credentials including endpoints. + + To create the database, complete these steps: + + + Use the database access client to connect to the database + server as the root user: + $ mysql -u root -p + + + Create the neutron database: + CREATE DATABASE neutron; + + + Grant proper access to the neutron + database: + GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \ + IDENTIFIED BY 'NEUTRON_DBPASS'; +GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \ + IDENTIFIED BY 'NEUTRON_DBPASS'; + Replace NEUTRON_DBPASS with a + suitable password. + + + Exit the database access client. + + + + + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + To create the Identity service credentials, complete these + steps: + + + Create the neutron user: + $ keystone user-create --name neutron --pass NEUTRON_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | 7fd67878dcd04d0393469ef825a7e005 | +| name | neutron | +| username | neutron | ++----------+----------------------------------+ + Replace NEUTRON_PASS with a suitable + password. + + + Link the neutron user to the + service tenant and admin + role: + $ keystone user-role-add --user neutron --tenant service --role admin + + This command provides no output. + + + + Create the neutron service: + $ keystone service-create --name neutron --type network \ + --description "OpenStack Networking" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Networking | +| enabled | True | +| id | 6369ddaf99a447f3a0d41dac5e342161 | +| name | neutron | +| type | network | ++-------------+----------------------------------+ + + + Create the Identity service endpoints: + $ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ network / {print $2}') \ + --publicurl http://controller:9696 \ + --adminurl http://controller:9696 \ + --internalurl http://controller:9696 \ + --region regionOne ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| adminurl | http://controller:9696 | +| id | fa18b41938a94bf6b35e2c152063ee21 | +| internalurl | http://controller:9696 | +| publicurl | http://controller:9696 | +| region | regionOne | +| service_id | 6369ddaf99a447f3a0d41dac5e342161 | ++-------------+----------------------------------+ + + + + + + To install the Networking components + + # apt-get install neutron-server neutron-plugin-ml2 python-neutronclient + # yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which + # zypper install openstack-neutron openstack-neutron-server + + SUSE does not use a separate ML2 plug-in package. + + + + + To install and configure the Networking components + + # apt-get install neutron-server + + Debian does not use a separate ML2 plug-in package. + + + + Select the ML2 plug-in: + + + + + + + + + Selecting the ML2 plug-in also populates the + and + options in the + /etc/neutron/neutron.conf file with the + appropriate values. + + + + + To configure the Networking server component + The Networking server component configuration includes the database, + authentication mechanism, message broker, topology change notifications, + and plug-in. + + Edit the /etc/neutron/neutron.conf file + and complete the following actions: + + + In the [database] section, configure + database access: + [database] +... +connection = mysql://neutron:NEUTRON_DBPASS@controller/neutron + Replace NEUTRON_DBPASS with the + password you chose for the database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the + password you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, + configure Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose or the neutron user in the + Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [DEFAULT] section, enable the + Modular Layer 2 (ML2) plug-in, router service, and overlapping + IP addresses: + [DEFAULT] +... +core_plugin = ml2 +service_plugins = router +allow_overlapping_ips = True + + + In the [DEFAULT] section, configure + Networking to notify Compute of network topology changes: + [DEFAULT] +... +notify_nova_on_port_status_changes = True +notify_nova_on_port_data_changes = True +nova_url = http://controller:8774/v2 +nova_admin_auth_url = http://controller:35357/v2.0 +nova_region_name = regionOne +nova_admin_username = nova +nova_admin_tenant_id = SERVICE_TENANT_ID +nova_admin_password = NOVA_PASS + Replace SERVICE_TENANT_ID with the + service tenant identifier (id) in the Identity + service and NOVA_PASS with the password + you chose for the nova user in the Identity + service. + + To obtain the service tenant + identifier (id): + $ source admin-openrc.sh +$ keystone tenant-get service ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | Service Tenant | +| enabled | True | +| id | f727b5ec2ceb4d71bad86dfc414449bf | +| name | service | ++-------------+----------------------------------+ + + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + + To configure the Modular Layer 2 (ML2) plug-in + The ML2 plug-in uses the + Open vSwitch (OVS) + mechanism (agent) to build the virtual networking framework for + instances. However, the controller node does not need the OVS + components because it does not handle instance network traffic. + + Edit the + /etc/neutron/plugins/ml2/ml2_conf.ini + file and complete the following actions: + + + In the [ml2] section, enable the + flat and + generic routing encapsulation (GRE) + network type drivers, GRE tenant networks, and the OVS + mechanism driver: + [ml2] +... +type_drivers = flat,gre +tenant_network_types = gre +mechanism_drivers = openvswitch + + Once you configure the ML2 plug-in, be aware that disabling + a network type driver and re-enabling it later can lead to + database inconsistency. + + + + In the [ml2_type_gre] section, configure + the tunnel identifier (id) range: + [ml2_type_gre] +... +tunnel_id_ranges = 1:1000 + + + In the [securitygroup] section, enable + security groups, enable ipset, and + configure the OVS iptables firewall + driver: + [securitygroup] +... +enable_security_group = True +enable_ipset = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + + + + + + To configure Compute to use Networking + By default, distribution packages configure Compute to use legacy + networking. You must reconfigure Compute to manage networks through + Networking. + + Edit the /etc/nova/nova.conf file and + complete the following actions: + + + In the [DEFAULT] section, configure + the APIs and drivers: + [DEFAULT] +... +network_api_class = nova.network.neutronv2.api.API +security_group_api = neutron +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver + + By default, Compute uses an internal firewall service. + Since Networking includes a firewall service, you must + disable the Compute firewall service by using the + nova.virt.firewall.NoopFirewallDriver + firewall driver. + + + + In the [neutron] section, configure + access parameters: + [neutron] +... +url = http://controller:9696 +auth_strategy = keystone +admin_auth_url = http://controller:35357/v2.0 +admin_tenant_name = service +admin_username = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose for the neutron user + in the Identity service. + + + + + + To finalize installation + + The Networking service initialization scripts expect a + symbolic link /etc/neutron/plugin.ini + pointing to the ML2 plug-in configuration file, + /etc/neutron/plugins/ml2/ml2_conf.ini. + If this symbolic link does not exist, create it using the + following command: + # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini + + + The Networking service initialization scripts expect the + variable NEUTRON_PLUGIN_CONF in the + /etc/sysconfig/neutron file to + reference the ML2 plug-in configuration file. Edit the + /etc/sysconfig/neutron file and add the + following: + NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini" + + + Populate the database: + # su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ + --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade juno" neutron + + Database population occurs later for Networking because the + script requires complete server and plug-in configuration + files. + + + + Restart the Compute services: + # systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service + On SLES: + # service openstack-nova-api restart +# service openstack-nova-scheduler restart +# service openstack-nova-conductor restart + On openSUSE: + # systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service + # service nova-api restart +# service nova-scheduler restart +# service nova-conductor restart + + + Start the Networking service and configure it to start when the + system boots: + # systemctl enable neutron-server.service +# systemctl start neutron-server.service + On SLES: + # service openstack-neutron start +# chkconfig openstack-neutron on + On openSUSE: + # systemctl enable openstack-neutron.service +# systemctl start openstack-neutron.service + + + Restart the Networking service: + # service neutron-server restart + + + + Verify operation + + Perform these commands on the controller node. + + + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + List loaded extensions to verify successful launch of the + neutron-server process: + $ neutron ext-list ++-----------------------+-----------------------------------------------+ +| alias | name | ++-----------------------+-----------------------------------------------+ +| security-group | security-group | +| l3_agent_scheduler | L3 Agent Scheduler | +| ext-gw-mode | Neutron L3 Configurable external gateway mode | +| binding | Port Binding | +| provider | Provider Network | +| agent | agent | +| quotas | Quota management support | +| dhcp_agent_scheduler | DHCP Agent Scheduler | +| l3-ha | HA Router extension | +| multi-provider | Multi Provider Network | +| external-net | Neutron external network | +| router | Neutron L3 Router | +| allowed-address-pairs | Allowed Address Pairs | +| extraroute | Neutron Extra Route | +| extra_dhcp_opt | Neutron Extra DHCP opts | +| dvr | Distributed Virtual Router | ++-----------------------+-----------------------------------------------+ + + +
diff --git a/doc/training-guides/basic-install-guide/section_neutron-initial-networks.xml b/doc/training-guides/basic-install-guide/section_neutron-initial-networks.xml index ba0031bc..fd48f1de 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-initial-networks.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-initial-networks.xml @@ -20,18 +20,18 @@ Initial networks -
External network - The external network typically provides internet access for - your instances. By default, this network only allows internet + The external network typically provides Internet access for + your instances. By default, this network only allows Internet access from instances using Network Address Translation (NAT). You can - enable internet access to individual instances + enable Internet access to individual instances using a floating IP address and suitable security group rules. The admin tenant owns this network because it provides external network @@ -43,12 +43,14 @@ To create the external network - Source the admin tenant credentials: + Source the admin credentials to gain access to + admin-only CLI commands: $ source admin-openrc.sh Create the network: - $ neutron net-create ext-net --shared --router:external=True + $ neutron net-create ext-net --shared --router:external True \ + --provider:physical_network external --provider:network_type flat Created a new network: +---------------------------+--------------------------------------+ | Field | Value | @@ -56,9 +58,9 @@ | admin_state_up | True | | id | 893aebb9-1c1e-48be-8908-6b947f3237b3 | | name | ext-net | -| provider:network_type | gre | -| provider:physical_network | | -| provider:segmentation_id | 1 | +| provider:network_type | flat | +| provider:physical_network | external | +| provider:segmentation_id | | | router:external | True | | shared | True | | status | ACTIVE | @@ -74,16 +76,6 @@ network node. You should specify an exclusive slice of this subnet for router and floating IP addresses to prevent interference with other devices on the external network. - Replace FLOATING_IP_START and - FLOATING_IP_END with the first and last - IP addresses of the range that you want to allocate for floating IP - addresses. Replace EXTERNAL_NETWORK_CIDR - with the subnet associated with the physical network. Replace - EXTERNAL_NETWORK_GATEWAY with the gateway - associated with the physical network, typically the ".1" IP address. - You should disable DHCP on this subnet because - instances do not connect directly to the external network and floating - IP addresses require manual assignment. To create a subnet on the external network @@ -91,6 +83,16 @@ $ neutron subnet-create ext-net --name ext-subnet \ --allocation-pool start=FLOATING_IP_START,end=FLOATING_IP_END \ --disable-dhcp --gateway EXTERNAL_NETWORK_GATEWAY EXTERNAL_NETWORK_CIDR + Replace FLOATING_IP_START and + FLOATING_IP_END with the first and last + IP addresses of the range that you want to allocate for floating IP + addresses. Replace EXTERNAL_NETWORK_CIDR + with the subnet associated with the physical network. Replace + EXTERNAL_NETWORK_GATEWAY with the gateway + associated with the physical network, typically the ".1" IP address. + You should disable DHCP on this subnet because + instances do not connect directly to the external network and + floating IP addresses require manual assignment. For example, using 203.0.113.0/24 with floating IP address range 203.0.113.101 to 203.0.113.200: @@ -130,41 +132,42 @@ To create the tenant network - Source the demo tenant credentials: + Source the demo credentials to gain access to + user-only CLI commands: $ source demo-openrc.sh Create the network: $ neutron net-create demo-net Created a new network: -+----------------+--------------------------------------+ -| Field | Value | -+----------------+--------------------------------------+ -| admin_state_up | True | -| id | ac108952-6096-4243-adf4-bb6615b3de28 | -| name | demo-net | -| shared | False | -| status | ACTIVE | -| subnets | | -| tenant_id | cdef0071a0194d19ac6bb63802dc9bae | -+----------------+--------------------------------------+ ++-----------------+--------------------------------------+ +| Field | Value | ++-----------------+--------------------------------------+ +| admin_state_up | True | +| id | ac108952-6096-4243-adf4-bb6615b3de28 | +| name | demo-net | +| router:external | False | +| shared | False | +| status | ACTIVE | +| subnets | | +| tenant_id | cdef0071a0194d19ac6bb63802dc9bae | ++-----------------+--------------------------------------+ Like the external network, your tenant network also requires a subnet attached to it. You can specify any valid subnet because the - architecture isolates tenant networks. Replace - TENANT_NETWORK_CIDR with the subnet - you want to associate with the tenant network. Replace - TENANT_NETWORK_GATEWAY with the gateway you - want to associate with this network, typically the ".1" IP address. By - default, this subnet will use DHCP so your instances can obtain IP - addresses. + architecture isolates tenant networks. By default, this subnet will + use DHCP so your instances can obtain IP addresses. To create a subnet on the tenant network Create the subnet: $ neutron subnet-create demo-net --name demo-subnet \ --gateway TENANT_NETWORK_GATEWAY TENANT_NETWORK_CIDR + Replace TENANT_NETWORK_CIDR with the + subnet you want to associate with the tenant network and + TENANT_NETWORK_GATEWAY with the gateway + you want to associate with it, typically the ".1" IP address. Example using 192.168.1.0/24: $ neutron subnet-create demo-net --name demo-subnet \ --gateway 192.168.1.1 192.168.1.0/24 @@ -207,6 +210,7 @@ | external_gateway_info | | | id | 635660ae-a254-4feb-8993-295aa9ec6418 | | name | demo-router | +| routes | | | status | ACTIVE | | tenant_id | cdef0071a0194d19ac6bb63802dc9bae | +-----------------------+--------------------------------------+ diff --git a/doc/training-guides/basic-install-guide/section_neutron-ml2-compute-node.xml b/doc/training-guides/basic-install-guide/section_neutron-ml2-compute-node.xml index bb18bb31..6b8cd34f 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-ml2-compute-node.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-ml2-compute-node.xml @@ -42,15 +42,6 @@ net.ipv4.conf.default.rp_filter=0 To configure the Networking common components The Networking common component configuration includes the authentication mechanism, message broker, and plug-in. - - Respond to prompts for database - management, Identity service credentials, service endpoint - registration, and message broker credentials. - Configure Networking to use the Identity service for authentication: diff --git a/doc/training-guides/basic-install-guide/section_neutron-ml2-controller-node.xml b/doc/training-guides/basic-install-guide/section_neutron-ml2-controller-node.xml index f6a562d8..65de0bea 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-ml2-controller-node.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-ml2-controller-node.xml @@ -74,16 +74,6 @@ IDENTIFIED BY 'NEUTRON_DBPASS'; The Networking server component configuration includes the database, authentication mechanism, message broker, topology change notifier, and plug-in. - - Respond to prompts for - database management, - Identity service - credentials, - service endpoint - registration, and - message broker - credentials. - During the installation, you will also be prompted for which Networking plug-in to use. This will automatically fill the diff --git a/doc/training-guides/basic-install-guide/section_neutron-ml2-network-node.xml b/doc/training-guides/basic-install-guide/section_neutron-ml2-network-node.xml index 84df468a..f8fd94f7 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-ml2-network-node.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-ml2-network-node.xml @@ -45,15 +45,6 @@ net.ipv4.conf.default.rp_filter=0 To configure the Networking common components The Networking common component configuration includes the authentication mechanism, message broker, and plug-in. - - Respond to prompts for database - management, Identity service credentials, service endpoint - registration, and message broker credentials. - Configure Networking to use the Identity service for authentication: diff --git a/doc/training-guides/basic-install-guide/section_neutron-network-node.xml b/doc/training-guides/basic-install-guide/section_neutron-network-node.xml new file mode 100644 index 00000000..1fd4d000 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_neutron-network-node.xml @@ -0,0 +1,550 @@ + +
+ Install and configure network node + The network node primarily handles internal and external routing + and DHCP services for virtual networks. + + To configure prerequisites + Before you install and configure OpenStack Networking, you + must configure certain kernel networking parameters. + + Edit the /etc/sysctl.conf file to + contain the following parameters: + net.ipv4.ip_forward=1 +net.ipv4.conf.all.rp_filter=0 +net.ipv4.conf.default.rp_filter=0 + + + Implement the changes: + # sysctl -p + + + + To install the Networking components + + # apt-get install neutron-plugin-ml2 neutron-plugin-openvswitch-agent \ + neutron-l3-agent neutron-dhcp-agent + # yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch + # zypper install --no-recommends openstack-neutron-openvswitch-agent openstack-neutron-l3-agent \ + openstack-neutron-dhcp-agent openstack-neutron-metadata-agent ipset + + SUSE does not use a separate ML2 plug-in package. + + + + + To install and configure the Networking components + + # apt-get install neutron-plugin-openvswitch-agent openvswitch-datapath-dkms \ + neutron-l3-agent neutron-dhcp-agent + + Debian does not use a separate ML2 plug-in package. + + + + Select the ML2 plug-in: + + + + + + + + + Selecting the ML2 plug-in also populates the + and + options in the + /etc/neutron/neutron.conf file with the + appropriate values. + + + + + To configure the Networking common components + The Networking common component configuration includes the + authentication mechanism, message broker, and plug-in. + + Edit the /etc/neutron/neutron.conf file + and complete the following actions: + + + In the [database] section, comment out + any connection options because network nodes + do not directly access the database. + + + In the [DEFAULT] section, configure + RabbitMQ message broker access: + [DEFAULT] +... +rpc_backend = rabbit +rabbit_host = controller +rabbit_password = RABBIT_PASS + Replace RABBIT_PASS with the + password you chose for the guest account in + RabbitMQ. + + + In the [DEFAULT] and + [keystone_authtoken] sections, + configure Identity service access: + [DEFAULT] +... +auth_strategy = keystone + +[keystone_authtoken] +... +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose or the neutron user in the + Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + + + + In the [DEFAULT] section, enable the + Modular Layer 2 (ML2) plug-in, router service, and overlapping + IP addresses: + [DEFAULT] +... +core_plugin = ml2 +service_plugins = router +allow_overlapping_ips = True + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + + To configure the Modular Layer 2 (ML2) plug-in + The ML2 plug-in uses the + Open vSwitch (OVS) + mechanism (agent) to build the virtual networking framework for + instances. + + Edit the + /etc/neutron/plugins/ml2/ml2_conf.ini + file and complete the following actions: + + + In the [ml2] section, enable the + flat and + generic routing encapsulation (GRE) + network type drivers, GRE tenant networks, and the OVS + mechanism driver: + [ml2] +... +type_drivers = flat,gre +tenant_network_types = gre +mechanism_drivers = openvswitch + + + In the [ml2_type_flat] section, configure + the external network: + [ml2_type_flat] +... +flat_networks = external + + + In the [ml2_type_gre] section, configure + the tunnel identifier (id) range: + [ml2_type_gre] +... +tunnel_id_ranges = 1:1000 + + + In the [securitygroup] section, enable + security groups, enable ipset, and + configure the OVS iptables firewall + driver: + [securitygroup] +... +enable_security_group = True +enable_ipset = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + + + In the [ovs] section, configure the + Open vSwitch (OVS) agent: + [ovs] +... +local_ip = INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS +tunnel_type = gre +enable_tunneling = True +bridge_mappings = external:br-ex + Replace + INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS + with the IP address of the instance tunnels network interface + on your network node. + + + + + + To configure the Layer-3 (L3) agent + The Layer-3 (L3) agent provides + routing services for virtual networks. + + Edit the /etc/neutron/l3_agent.ini file + and complete the following actions: + + + In the [DEFAULT] section, configure + the driver, enable + network + namespaces, and configure the external + network bridge: + [DEFAULT] +... +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +use_namespaces = True +external_network_bridge = br-ex + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + + To configure the DHCP agent + The DHCP agent provides DHCP + services for virtual networks. + + Edit the /etc/neutron/dhcp_agent.ini file + and complete the following actions: + + + In the [DEFAULT] section, configure + the drivers and enable namespaces: + [DEFAULT] +... +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq +use_namespaces = True + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + (Optional) + Tunneling protocols such as GRE include additional packet + headers that increase overhead and decrease space available for the + payload or user data. Without knowledge of the virtual network + infrastructure, instances attempt to send packets using the default + Ethernet maximum transmission unit (MTU) of + 1500 bytes. Internet protocol (IP) networks + contain the path MTU discovery (PMTUD) + mechanism to detect end-to-end MTU and adjust packet size + accordingly. However, some operating systems and networks block or + otherwise lack support for PMTUD causing performance degradation + or connectivity failure. + Ideally, you can prevent these problems by enabling + jumbo frames on the + physical network that contains your tenant virtual networks. + Jumbo frames support MTUs up to approximately 9000 bytes which + negates the impact of GRE overhead on virtual networks. However, + many network devices lack support for jumbo frames and OpenStack + administrators often lack control over network infrastructure. + Given the latter complications, you can also prevent MTU problems + by reducing the instance MTU to account for GRE overhead. + Determining the proper MTU value often takes experimentation, + but 1454 bytes works in most environments. You can configure the + DHCP server that assigns IP addresses to your instances to also + adjust the MTU. + + Some cloud images ignore the DHCP MTU option in which case + you should configure it using metadata, script, or other suitable + method. + + + + Edit the /etc/neutron/dhcp_agent.ini + file and complete the following action: + + + In the [DEFAULT] section, enable the + dnsmasq configuration file: + [DEFAULT] +... +dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf + + + + + Create and edit the + /etc/neutron/dnsmasq-neutron.conf file and + complete the following action: + + + Enable the DHCP MTU option (26) and configure it to + 1454 bytes: + dhcp-option-force=26,1454 + + + + + Kill any existing + dnsmasq processes: + # pkill dnsmasq + + + + + + To configure the metadata agent + The metadata agent + provides configuration information such as credentials to + instances. + + Edit the /etc/neutron/metadata_agent.ini + file and complete the following actions: + + + In the [DEFAULT] section, configure + access parameters: + [DEFAULT] +... +auth_url = http://controller:5000/v2.0 +auth_region = regionOne +admin_tenant_name = service +admin_user = neutron +admin_password = NEUTRON_PASS + Replace NEUTRON_PASS with the + password you chose for the neutron user in + the Identity service. + + + In the [DEFAULT] section, configure the + metadata host: + [DEFAULT] +... +nova_metadata_ip = controller + + + In the [DEFAULT] section, configure the + metadata proxy shared secret: + [DEFAULT] +... +metadata_proxy_shared_secret = METADATA_SECRET + Replace METADATA_SECRET with a + suitable secret for the metadata proxy. + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] + section: + [DEFAULT] +... +verbose = True + + + + + On the controller node, edit the + /etc/nova/nova.conf file and complete the + following action: + + + In the [neutron] section, enable the + metadata proxy and configure the secret: + [neutron] +... +service_metadata_proxy = True +metadata_proxy_shared_secret = METADATA_SECRET + Replace METADATA_SECRET with + the secret you chose for the metadata proxy. + + + + + On the controller node, restart the + Compute API service: + # systemctl restart openstack-nova-api.service + On SLES: + # service openstack-nova-api restart + On openSUSE: + # systemctl restart openstack-nova-api.service + # service nova-api restart + + + + To configure the Open vSwitch (OVS) service + The OVS service provides the underlying virtual networking + framework for instances. The integration bridge + br-int handles internal instance network + traffic within OVS. The external bridge br-ex + handles external instance network traffic within OVS. The + external bridge requires a port on the physical external network + interface to provide instances with external network access. In + essence, this port connects the virtual and physical external + networks in your environment. + + Start the OVS service and configure it to start when the + system boots: + # systemctl enable openvswitch.service +# systemctl start openvswitch.service + On SLES: + # service openvswitch-switch start +# chkconfig openvswitch-switch on + On openSUSE: + # systemctl enable openvswitch.service +# systemctl start openvswitch.service + + + Restart the OVS service: + # service openvswitch-switch restart + + + Add the external bridge: + # ovs-vsctl add-br br-ex + + + Add a port to the external bridge that connects to the + physical external network interface: + Replace INTERFACE_NAME with the + actual interface name. For example, eth2 + or ens256. + # ovs-vsctl add-port br-ex INTERFACE_NAME + + Depending on your network interface driver, you may need + to disable generic receive offload + (GRO) to achieve suitable throughput between + your instances and the external network. + To temporarily disable GRO on the external network + interface while testing your environment: + # ethtool -K INTERFACE_NAME gro off + + + + + To finalize the installation + + The Networking service initialization scripts expect a + symbolic link /etc/neutron/plugin.ini + pointing to the ML2 plug-in configuration file, + /etc/neutron/plugins/ml2/ml2_conf.ini. + If this symbolic link does not exist, create it using the + following command: + # ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini + Due to a packaging bug, the Open vSwitch agent initialization + script explicitly looks for the Open vSwitch plug-in configuration + file rather than a symbolic link + /etc/neutron/plugin.ini pointing to the ML2 + plug-in configuration file. Run the following commands to resolve this + issue: + # cp /usr/lib/systemd/system/neutron-openvswitch-agent.service \ + /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig +# sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' \ + /usr/lib/systemd/system/neutron-openvswitch-agent.service + + + The Networking service initialization scripts expect the + variable NEUTRON_PLUGIN_CONF in the + /etc/sysconfig/neutron file to + reference the ML2 plug-in configurarion file. Edit the + /etc/sysconfig/neutron file and add the + following: + NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini" + + + Start the Networking services and configure them to start + when the system boots: + # systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service \ + neutron-dhcp-agent.service neutron-metadata-agent.service \ + neutron-ovs-cleanup.service +# systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service \ + neutron-dhcp-agent.service neutron-metadata-agent.service + + Do not explictly start the + neutron-ovs-cleanup + service. + + On SLES: + # service openstack-neutron-openvswitch-agent start +# service openstack-neutron-l3-agent start +# service openstack-neutron-dhcp-agent start +# service openstack-neutron-metadata-agent start +# chkconfig openstack-neutron-openvswitch-agent on +# chkconfig openstack-neutron-l3-agent on +# chkconfig openstack-neutron-dhcp-agent on +# chkconfig openstack-neutron-metadata-agent on +# chkconfig openstack-neutron-ovs-cleanup on + On openSUSE: + # systemctl enable openstack-neutron-openvswitch-agent.service openstack-neutron-l3-agent.service \ + openstack-neutron-dhcp-agent.service openstack-neutron-metadata-agent.service \ + openstack-neutron-ovs-cleanup.service +# systemctl start openstack-neutron-openvswitch-agent.service openstack-neutron-l3-agent.service \ + openstack-neutron-dhcp-agent.service openstack-neutron-metadata-agent.service + + Do not explictly start the + openstack-neutron-ovs-cleanup + service. + + + + Restart the Networking services: + # service neutron-plugin-openvswitch-agent restart +# service neutron-l3-agent restart +# service neutron-dhcp-agent restart +# service neutron-metadata-agent restart + + + + Verify operation + + Perform these commands on the controller node. + + + Source the admin credentials to gain access to + admin-only CLI commands: + $ source admin-openrc.sh + + + List agents to verify successful launch of the + neutron agents: + $ neutron agent-list ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ +| id | agent_type | host | alive | admin_state_up | binary | ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ +| 30275801-e17a-41e4-8f53-9db63544f689 | Metadata agent | network | :-) | True | neutron-metadata-agent | +| 4bd8c50e-7bad-4f3b-955d-67658a491a15 | Open vSwitch agent | network | :-) | True | neutron-openvswitch-agent | +| 756e5bba-b70f-4715-b80e-e37f59803d20 | L3 agent | network | :-) | True | neutron-l3-agent | +| 9c45473c-6d6d-4f94-8df1-ebd0b6838d5f | DHCP agent | network | :-) | True | neutron-dhcp-agent | ++--------------------------------------+--------------------+---------+-------+----------------+---------------------------+ + + +
diff --git a/doc/training-guides/basic-install-guide/section_neutron-ovs-network-node.xml b/doc/training-guides/basic-install-guide/section_neutron-ovs-network-node.xml index b8580b55..a20daee4 100644 --- a/doc/training-guides/basic-install-guide/section_neutron-ovs-network-node.xml +++ b/doc/training-guides/basic-install-guide/section_neutron-ovs-network-node.xml @@ -42,17 +42,6 @@ # zypper install openstack-neutron openstack-neutron-l3-agent \ openstack-neutron-dhcp-agent openstack-neutron-metadata-agent
- - Respond to prompts for database - management, [keystone_authtoken] - settings, RabbitMQ - credentials and API endpoint - registration. - Configure Networking agents to start at boot time: # for s in neutron-{dhcp,metadata,l3}-agent; do chkconfig $s on; done diff --git a/doc/training-guides/basic-install-guide/section_nova-compute-install.xml b/doc/training-guides/basic-install-guide/section_nova-compute-install.xml index 169eddc8..a7f445aa 100644 --- a/doc/training-guides/basic-install-guide/section_nova-compute-install.xml +++ b/doc/training-guides/basic-install-guide/section_nova-compute-install.xml @@ -22,23 +22,14 @@ To install and configure the Compute hypervisor components Install the packages: - # apt-get install nova-compute - # yum install openstack-nova-compute - # zypper install openstack-nova-compute genisoimage + # apt-get install nova-compute sysfsutils + # yum install openstack-nova-compute sysfsutils + # zypper install openstack-nova-compute genisoimage kvm Edit the /etc/nova/nova.conf file and complete the following actions: - - In the [database] section, configure - database access: - [database] -... -connection = mysql://nova:NOVA_DBPASS@controller/nova - Replace NOVA_DBPASS with the password - you chose for the Compute database. - In the [DEFAULT] section, configure RabbitMQ message broker access: @@ -52,32 +43,40 @@ rabbit_password = RABBIT_PASS RabbitMQ.
- In the [keystone_authtoken] section, + In the [DEFAULT] and + [keystone_authtoken] sections, configure Identity service access: - + [DEFAULT] +... +auth_strategy = keystone + [keystone_authtoken] ... auth_uri = http://controller:5000/v2.0 -auth_host = controller -auth_port = 35357 -auth_protocol = http +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = nova admin_password = NOVA_PASS Replace NOVA_PASS with the password you chose for the nova user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + In the [DEFAULT] section, configure the - my_ip key: + my_ip option: [DEFAULT] ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS Replace MANAGEMENT_INTERFACE_IP_ADDRESS with the IP address of the management network interface on your - first compute node, typically 10.0.0.31 in the + compute node, typically 10.0.0.31 for the first node in the example architecture. @@ -98,7 +97,7 @@ novncproxy_base_url = http://controller:6080/vnc_auto Replace MANAGEMENT_INTERFACE_IP_ADDRESS with the IP address of the management network interface on your - first compute node, typically 10.0.0.31 in the + compute node, typically 10.0.0.31 for the first node in the example architecture. @@ -110,11 +109,35 @@ novncproxy_base_url = http://controller:6080/vnc_auto
- In the [DEFAULT] section, configure the + In the [glance] section, configure the location of the Image Service: + [glance] +... +host = controller + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] section: [DEFAULT] ... -glance_host = controller +verbose = True + + +
+ + + + Ensure the kernel module nbd is + loaded. + # modprobe nbd + + + Ensure the module will be loaded on every boot. + On openSUSE by adding nbd in the + /etc/modules-load.d/nbd.conf file. + On SLES by adding or modifying the following line in the + /etc/sysconfig/kernel file. + MODULES_LOADED_ON_BOOT = "nbd" @@ -125,16 +148,6 @@ glance_host = controller Install the packages: # apt-get install nova-compute - - Respond to the prompts for - database management, - Identity service - credentials, - service endpoint - registration, and - message broker - credentials.. -
To finalize installation @@ -159,21 +172,6 @@ glance_host = controller [libvirt] ... virt_type = qemu - - On Ubuntu 12.04, kernels backported from newer releases may - not automatically load the KVM modules for hardware acceleration - when the compute node boots. In this case, launching an instance - will fail with the following message in the - /var/log/nova/nova-compute.log file: - libvirtError: internal error: no supported architecture for os type 'hvm' - As a workaround for this issue, you must add the appropriate - module for your compute node to the - /etc/modules file. - For systems with Intel processors: - # echo 'kvm_intel' >> /etc/modules - For systems with AMD processors: - # echo 'kvm_amd' >> /etc/modules - @@ -184,36 +182,24 @@ virt_type = qemu Start the Compute service including its dependencies and configure them to start automatically when the system boots: - - - For RHEL, CentOS, and compatible derivatives: - # service libvirtd start -# service messagebus start -# service openstack-nova-compute start -# chkconfig libvirtd on -# chkconfig messagebus on -# chkconfig openstack-nova-compute on - - - For Fedora: - # service libvirtd start -# service dbus start -# service openstack-nova-compute start -# chkconfig libvirtd on -# chkconfig dbus on -# chkconfig openstack-nova-compute on - - - # service libvirtd start + # systemctl enable libvirtd.service openstack-nova-compute.service +# systemctl start libvirtd.service +# systemctl start openstack-nova-compute.service + On SLES: + # service libvirtd start # chkconfig libvirtd on # service openstack-nova-compute start # chkconfig openstack-nova-compute on + On openSUSE: + # systemctl enable libvirtd.service openstack-nova-compute.service +# systemctl start libvirtd.service +# systemctl start openstack-nova-compute.service By default, the Ubuntu packages create an SQLite database. Because this configuration uses a SQL database server, you can remove the SQLite database file: - # rm /var/lib/nova/nova.sqlite + # rm -f /var/lib/nova/nova.sqlite
diff --git a/doc/training-guides/basic-install-guide/section_nova-controller-install.xml b/doc/training-guides/basic-install-guide/section_nova-controller-install.xml index 649913dc..9f4b34c5 100644 --- a/doc/training-guides/basic-install-guide/section_nova-controller-install.xml +++ b/doc/training-guides/basic-install-guide/section_nova-controller-install.xml @@ -21,19 +21,20 @@ Create the nova database: - mysql> CREATE DATABASE nova; + CREATE DATABASE nova; Grant proper access to the nova database: - mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; -mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; + GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \ + IDENTIFIED BY 'NOVA_DBPASS'; +GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \ + IDENTIFIED BY 'NOVA_DBPASS'; Replace NOVA_DBPASS with a suitable password. - Exit the database access client: - mysql> exit + Exit the database access client. @@ -48,30 +49,62 @@ Create the nova user: - $ keystone user-create --name=nova --pass=NOVA_PASS --email=EMAIL_ADDRESS + $ keystone user-create --name nova --pass NOVA_PASS ++----------+----------------------------------+ +| Property | Value | ++----------+----------------------------------+ +| email | | +| enabled | True | +| id | 387dd4f7e46d4f72965ee99c76ae748c | +| name | nova | +| username | nova | ++----------+----------------------------------+ Replace NOVA_PASS with a suitable - password and EMAIL_ADDRESS with - a suitable e-mail address. + password. Link the nova user to the service tenant and admin role: - $ keystone user-role-add --user=nova --tenant=service --role=admin + $ keystone user-role-add --user nova --tenant service --role admin + + This command provides no output. + Create the nova service: - $ keystone service-create --name=nova --type=compute --description="OpenStack Compute" + $ keystone service-create --name nova --type compute \ + --description "OpenStack Compute" ++-------------+----------------------------------+ +| Property | Value | ++-------------+----------------------------------+ +| description | OpenStack Compute | +| enabled | True | +| id | 6c7854f52ce84db795557ebc0373f6b9 | +| name | nova | +| type | compute | ++-------------+----------------------------------+ Create the Compute service endpoints: $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ compute / {print $2}') \ - --publicurl=http://controller:8774/v2/%\(tenant_id\)s \ - --internalurl=http://controller:8774/v2/%\(tenant_id\)s \ - --adminurl=http://controller:8774/v2/%\(tenant_id\)s + --service-id $(keystone service-list | awk '/ compute / {print $2}') \ + --publicurl http://controller:8774/v2/%\(tenant_id\)s \ + --internalurl http://controller:8774/v2/%\(tenant_id\)s \ + --adminurl http://controller:8774/v2/%\(tenant_id\)s \ + --region regionOne ++-------------+-----------------------------------------+ +| Property | Value | ++-------------+-----------------------------------------+ +| adminurl | http://controller:8774/v2/%(tenant_id)s | +| id | c397438bd82c41198ec1a9d85cb7cc74 | +| internalurl | http://controller:8774/v2/%(tenant_id)s | +| publicurl | http://controller:8774/v2/%(tenant_id)s | +| region | regionOne | +| service_id | 6c7854f52ce84db795557ebc0373f6b9 | ++-------------+-----------------------------------------+ @@ -83,9 +116,9 @@ # yum install openstack-nova-api openstack-nova-cert openstack-nova-conductor \ openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler \ python-novaclient - # zypper install openstack-nova-api openstack-nova-scheduler \ - openstack-nova-cert openstack-nova-conductor openstack-nova-console \ - openstack-nova-consoleauth openstack-nova-novncproxy python-novaclient + # zypper install openstack-nova-api openstack-nova-scheduler openstack-nova-cert \ + openstack-nova-conductor openstack-nova-consoleauth openstack-nova-novncproxy \ + python-novaclient iptables Edit the /etc/nova/nova.conf file and @@ -113,25 +146,33 @@ rabbit_password = RABBIT_PASS RabbitMQ. - In the [keystone_authtoken] section, + In the [DEFAULT] and + [keystone_authtoken] sections, configure Identity service access: - + [DEFAULT] +... +auth_strategy = keystone + [keystone_authtoken] ... auth_uri = http://controller:5000/v2.0 -auth_host = controller -auth_port = 35357 -auth_protocol = http +identity_uri = http://controller:35357 admin_tenant_name = service admin_user = nova admin_password = NOVA_PASS Replace NOVA_PASS with the password you chose for the nova user in the Identity service. + + Comment out any auth_host, + auth_port, and + auth_protocol options because the + identity_uri option replaces them. + In the [DEFAULT] section, configure the - my_ip key to use the management interface IP + my_ip option to use the management interface IP address of the controller node: [DEFAULT] ... @@ -147,11 +188,18 @@ vncserver_listen = 10.0.0.11 vncserver_proxyclient_address = 10.0.0.11 - In the [DEFAULT] section, configure the + In the [glance] section, configure the location of the Image Service: + [glance] +... +host = controller + + + (Optional) To assist with troubleshooting, + enable verbose logging in the [DEFAULT] section: [DEFAULT] ... -glance_host = controller +verbose = True @@ -167,16 +215,6 @@ glance_host = controller # apt-get install nova-api nova-cert nova-conductor nova-consoleauth \ nova-novncproxy nova-scheduler python-novaclient - - Respond to prompts for - database management, - Identity service - credentials, - service endpoint - registration, and - message broker - credentials. - Edit the /etc/nova/nova.conf file and complete the following actions: @@ -207,7 +245,14 @@ vncserver_proxyclient_address = 10.0.0.11 Start the Compute services and configure them to start when the system boots: - # service openstack-nova-api start + # systemctl enable openstack-nova-api.service openstack-nova-cert.service \ + openstack-nova-consoleauth.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service openstack-nova-novncproxy.service +# systemctl start openstack-nova-api.service openstack-nova-cert.service \ + openstack-nova-consoleauth.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service openstack-nova-novncproxy.service + On SLES: + # service openstack-nova-api start # service openstack-nova-cert start # service openstack-nova-consoleauth start # service openstack-nova-scheduler start @@ -219,12 +264,19 @@ vncserver_proxyclient_address = 10.0.0.11 # chkconfig openstack-nova-scheduler on # chkconfig openstack-nova-conductor on # chkconfig openstack-nova-novncproxy on + On openSUSE: + # systemctl enable openstack-nova-api.service openstack-nova-cert.service \ + openstack-nova-consoleauth.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service openstack-nova-novncproxy.service +# systemctl start openstack-nova-api.service openstack-nova-cert.service \ + openstack-nova-consoleauth.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service openstack-nova-novncproxy.service By default, the Ubuntu packages create an SQLite database. Because this configuration uses a SQL database server, you can remove the SQLite database file: - # rm /var/lib/nova/nova.sqlite + # rm -f /var/lib/nova/nova.sqlite diff --git a/doc/training-guides/basic-install-guide/section_nova-networking-compute-node.xml b/doc/training-guides/basic-install-guide/section_nova-networking-compute-node.xml index cdcea247..dc9c87b1 100644 --- a/doc/training-guides/basic-install-guide/section_nova-networking-compute-node.xml +++ b/doc/training-guides/basic-install-guide/section_nova-networking-compute-node.xml @@ -14,52 +14,22 @@ To install legacy networking components - # apt-get install nova-network nova-api-metadata + # apt-get install nova-network nova-api-metadata + # apt-get install nova-network nova-api # yum install openstack-nova-network openstack-nova-api # zypper install openstack-nova-network openstack-nova-api To configure legacy networking - - Run the following commands: - Replace INTERFACE_NAME with the - actual interface name for the external network. For example, - eth1 or ens224. - # openstack-config --set /etc/nova/nova.conf DEFAULT \ - network_api_class nova.network.api.API -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - security_group_api nova -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - network_manager nova.network.manager.FlatDHCPManager -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - firewall_driver nova.virt.libvirt.firewall.IptablesFirewallDriver -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - network_size 254 -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - allow_same_net_traffic False -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - multi_host True -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - send_arp_for_ha True -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - share_dhcp_address True -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - force_dhcp_release True -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - flat_network_bridge br100 -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - flat_interface INTERFACE_NAME -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - public_interface INTERFACE_NAME - - - Edit the /etc/nova/nova.conf file and add the - following keys to the [DEFAULT] section: - Replace INTERFACE_NAME with the - actual interface name for the external network. For example, - eth1 or ens224. - [DEFAULT] + + Edit the /etc/nova/nova.conf file and + complete the following actions: + + + In the [DEFAULT] section, configure + the network parameters: + [DEFAULT] ... network_api_class = nova.network.api.API security_group_api = nova @@ -74,6 +44,11 @@ force_dhcp_release = True flat_network_bridge = br100 flat_interface = INTERFACE_NAME public_interface = INTERFACE_NAME + Replace INTERFACE_NAME with the + actual interface name for the external network. For example, + eth1 or ens224. + + Restart the services: @@ -81,14 +56,16 @@ public_interface = INTERFACE_NAME # service nova-api-metadata restart Start the services and configure them to start when the system boots: - # service openstack-nova-network start -# service openstack-nova-metadata-api start -# chkconfig openstack-nova-network on -# chkconfig openstack-nova-metadata-api on - # service openstack-nova-network start + # systemctl enable openstack-nova-network.service openstack-nova-metadata-api.service +# systemctl start openstack-nova-network.service openstack-nova-metadata-api.service + On SLES: + # service openstack-nova-network start # service openstack-nova-api-metadata start # chkconfig openstack-nova-network on # chkconfig openstack-nova-api-metadata on + On openSUSE: + # systemctl enable openstack-nova-network.service openstack-nova-metadata-api.service +# systemctl start openstack-nova-network.service penstack-nova-metadata-api.service diff --git a/doc/training-guides/basic-install-guide/section_nova-networking-controller-node.xml b/doc/training-guides/basic-install-guide/section_nova-networking-controller-node.xml index 30dd821d..94c9a092 100644 --- a/doc/training-guides/basic-install-guide/section_nova-networking-controller-node.xml +++ b/doc/training-guides/basic-install-guide/section_nova-networking-controller-node.xml @@ -5,30 +5,36 @@ version="5.0" xml:id="nova-networking-controller-node"> Configure controller node - Legacy networking primarily involves compute nodes. However, you must - configure the controller node to use it. + Legacy networking primarily involves compute nodes. However, + you must configure the controller node to use legacy + networking. To configure legacy networking - - Run the following commands: - # openstack-config --set /etc/nova/nova.conf DEFAULT \ - network_api_class nova.network.api.API -# openstack-config --set /etc/nova/nova.conf DEFAULT \ - security_group_api nova - - - Edit the /etc/nova/nova.conf file and add the - following keys to the [DEFAULT] section: - [DEFAULT] + + Edit the /etc/nova/nova.conf file and + complete the following actions: + + + In the [DEFAULT] section, configure + the network and security group APIs: + [DEFAULT] ... network_api_class = nova.network.api.API security_group_api = nova + + Restart the Compute services: - # service openstack-nova-api restart + # systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service + On SLES: + # service openstack-nova-api restart # service openstack-nova-scheduler restart # service openstack-nova-conductor restart + On openSUSE: + # systemctl restart openstack-nova-api.service openstack-nova-scheduler.service \ + openstack-nova-conductor.service # service nova-api restart # service nova-scheduler restart # service nova-conductor restart diff --git a/doc/training-guides/basic-install-guide/section_nova-networking-initial-network.xml b/doc/training-guides/basic-install-guide/section_nova-networking-initial-network.xml index 23c7702b..8657553e 100644 --- a/doc/training-guides/basic-install-guide/section_nova-networking-initial-network.xml +++ b/doc/training-guides/basic-install-guide/section_nova-networking-initial-network.xml @@ -7,8 +7,8 @@ Create initial network Before launching your first instance, you must create the necessary virtual network infrastructure to which the instance will connect. - This network typically provides internet access - from instances. You can enable internet access + This network typically provides Internet access + from instances. You can enable Internet access to individual instances using a floating IP address and suitable security group rules. The admin diff --git a/doc/training-guides/basic-install-guide/section_nova-verify.xml b/doc/training-guides/basic-install-guide/section_nova-verify.xml index 1c6ed7c6..2c36b826 100644 --- a/doc/training-guides/basic-install-guide/section_nova-verify.xml +++ b/doc/training-guides/basic-install-guide/section_nova-verify.xml @@ -20,15 +20,15 @@ List service components to verify successful launch of each process: $ nova service-list -+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ -| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | -+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ -| nova-cert | controller | internal | enabled | up | 2014-06-29T22:23:16.000000 | - | -| nova-consoleauth | controller | internal | enabled | up | 2014-06-29T22:23:10.000000 | - | -| nova-scheduler | controller | internal | enabled | up | 2014-06-29T22:23:14.000000 | - | -| nova-conductor | controller | internal | enabled | up | 2014-06-29T22:23:11.000000 | - | -| nova-compute | compute1 | nova | enabled | up | 2014-06-29T22:23:11.000000 | - | -+------------------+-------------+----------+---------+-------+----------------------------+-----------------+ ++----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ +| Id | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | ++----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ +| 1 | nova-conductor | controller | internal | enabled | up | 2014-09-16T23:54:02.000000 | - | +| 2 | nova-consoleauth | controller | internal | enabled | up | 2014-09-16T23:54:04.000000 | - | +| 3 | nova-scheduler | controller | internal | enabled | up | 2014-09-16T23:54:07.000000 | - | +| 4 | nova-cert | controller | internal | enabled | up | 2014-09-16T23:54:00.000000 | - | +| 5 | nova-compute | compute1 | nova | enabled | up | 2014-09-16T23:54:06.000000 | - | ++----+------------------+------------+----------+---------+-------+----------------------------+-----------------+ This output should indicate four components enabled on the controller node one component enabled on the compute node. @@ -41,7 +41,7 @@ +--------------------------------------+---------------------+--------+--------+ | ID | Name | Status | Server | +--------------------------------------+---------------------+--------+--------+ -| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.2-x86_64 | ACTIVE | | +| acafc7c0-40aa-4026-9673-b879898e1fc2 | cirros-0.3.3-x86_64 | ACTIVE | | +--------------------------------------+---------------------+--------+--------+ diff --git a/doc/training-guides/basic-install-guide/section_sahara-install.xml b/doc/training-guides/basic-install-guide/section_sahara-install.xml new file mode 100644 index 00000000..a1f88f11 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_sahara-install.xml @@ -0,0 +1,97 @@ + +
+ Install the Data processing service + This procedure installs the Data processing service (sahara) on the + controller node. + To install the Data processing service on the controller: + + + Install required packages: + # yum install openstack-sahara python-saharaclient + # zypper install openstack-sahara python-saharaclient + + + You need to install required packages. For now, sahara + doesn't have packages for Ubuntu and Debian. + Documentation will be updated once packages are available. The rest + of this document assumes that you have sahara service packages + installed on the system. + + + Edit /etc/sahara/sahara.conf configuration file + + First, edit parameter in + the [database] section. The URL provided here + should point to an empty database. For instance, connection + string for MySQL database will be: + connection = mysql://sahara:SAHARA_DBPASS@controller/sahara + + Switch to the [keystone_authtoken] + section. The parameter should point to + the public Identity API endpoint. + should point to the admin Identity API endpoint. For example: + auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 + + Next specify admin_user, + admin_password and + admin_tenant_name. These parameters must specify + a keystone user which has the admin role in the + given tenant. These credentials allow sahara to authenticate and + authorize its users. + + Switch to the [DEFAULT] section. + Proceed to the networking parameters. If you are using Neutron + for networking, then set use_neutron=true. + Otherwise if you are using nova-network set + the given parameter to false. + + That should be enough for the first run. If you want to + increase logging level for troubleshooting, there are two parameters + in the config: verbose and + debug. If the former is set to + true, sahara will + start to write logs of INFO level and above. If + debug is set to + true, sahara will write all the logs, including + the DEBUG ones. + + + + If you use the Data processing service with MySQL database, + then for storing big job binaries in sahara internal database you must + configure size of max allowed packet. Edit my.cnf + file and change parameter: + [mysqld] +max_allowed_packet = 256M + and restart MySQL server. + + Create database schema: + # sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head + + You must register the Data processing service with the Identity + service so that other OpenStack services can locate it. Register the + service and specify the endpoint: + $ keystone service-create --name sahara --type data_processing \ + --description "Data processing service" +$ keystone endpoint-create \ + --service-id $(keystone service-list | awk '/ sahara / {print $2}') \ + --publicurl http://controller:8386/v1.1/%\(tenant_id\)s \ + --internalurl http://controller:8386/v1.1/%\(tenant_id\)s \ + --adminurl http://controller:8386/v1.1/%\(tenant_id\)s \ + --region regionOne + + Start the sahara service: + # systemctl start openstack-sahara-all + # service openstack-sahara-all start + + (Optional) Enable the Data processing service to start on boot + # systemctl enable openstack-sahara-all + # chkconfig openstack-sahara-all on + + +
diff --git a/doc/training-guides/basic-install-guide/section_sahara-verify.xml b/doc/training-guides/basic-install-guide/section_sahara-verify.xml new file mode 100644 index 00000000..03440c43 --- /dev/null +++ b/doc/training-guides/basic-install-guide/section_sahara-verify.xml @@ -0,0 +1,26 @@ + +
+ Verify the Data processing service installation + To verify that the Data processing service (sahara) is installed and + configured correctly, try requesting clusters list using sahara + client. + + + Source the demo tenant credentials: + $ source demo-openrc.sh + + + Retrieve sahara clusters list: + $ sahara cluster-list + You should see output similar to this: + +------+----+--------+------------+ +| name | id | status | node_count | ++------+----+--------+------------+ ++------+----+--------+------------+ + + +
diff --git a/doc/training-guides/basic-install-guide/section_trove-install.xml b/doc/training-guides/basic-install-guide/section_trove-install.xml index d174c684..2a462b56 100644 --- a/doc/training-guides/basic-install-guide/section_trove-install.xml +++ b/doc/training-guides/basic-install-guide/section_trove-install.xml @@ -13,26 +13,25 @@ OpenStack environment with at least the following components installed: Compute, Image Service, Identity. - - Ubuntu 14.04 Only - The Database module is only available under Ubuntu 14.04. - Packages are not available for 12.04, or via the Ubuntu Cloud - Archive. - + + + If you want to do backup and restore, you also need Object Storage. + + + If you want to provision datastores on block-storage volumes, you also need Block Storage. + + To install the Database module on the controller: Install required packages: - # apt-get install python-trove python-troveclient python-glanceclient \ - trove-common trove-api trove-taskmanager + # apt-get install python-trove python-troveclient \ + trove-common trove-api trove-taskmanager trove-conductor # yum install openstack-trove python-troveclient # zypper install openstack-trove python-troveclient - Respond to the prompts for database management and - [keystone_authtoken] settings, + Respond to the prompts for database management and [keystone_authtoken] settings, and API endpoint registration. The trove-manage db_sync command runs automatically. @@ -51,27 +50,38 @@ service tenant and give the user the admin role: - $ keystone user-create --name=trove --pass=TROVE_PASS \ - --email=trove@example.com -$ keystone user-role-add --user=trove --tenant=service --role=admin + $ keystone user-create --name trove --pass TROVE_PASS +$ keystone user-role-add --user trove --tenant service --role admin + Replace TROVE_PASS with a + suitable password. - Edit the following configuration files, taking the below + All configuration files should be placed at /etc/trove directory. + Edit the following configuration files, taking the below actions for each file: + api-paste.ini trove.conf trove-taskmanager.conf trove-conductor.conf - + + You need to take upstream api-paste.ini and change content below in it: + [composite:trove] +auth_uri = http://controller:5000/v2.0 +identity_uri = http://controller:35357 +auth_host = controller +admin_tenant_name = service +admin_user = trove +admin_password = TROVE_PASS Edit the [DEFAULT] section of - each file and set appropriate values for the OpenStack service - URLs, logging and messaging configuration, and SQL + each file (except api-paste.ini) and set appropriate values for the OpenStack service + URLs (can be handled by Keystone service catalog), logging and messaging configuration, and SQL connections: [DEFAULT] log_dir = /var/log/trove @@ -83,74 +93,38 @@ sql_connection = mysql://trove:TROVE_DBPASS@cont notifier_queue_hostname = controller - + Configure the Database module to use the RabbitMQ message broker by - setting the rabbit_password in the [DEFAULT] + setting the following options in the [DEFAULT] configuration group of each file: [DEFAULT] -... +control_exchange = trove +rabbit_host = controller +rabbit_userid = guest rabbit_password = RABBIT_PASS -... +rabbit_virtual_host= / +rpc_backend = trove.openstack.common.rpc.impl_kombu - - Set these configuration keys to configure the Database module to use - the RabbitMQ message broker: - # openstack-config --set /etc/trove/trove.conf \ - DEFAULT rpc_backend rabbit -# openstack-config --set /etc/trove/trove-taskmanager.conf \ - DEFAULT rpc_backend rabbit -# openstack-config --set /etc/trove/trove-conductor.conf \ - DEFAULT rpc_backend rabbit -# openstack-config --set /etc/trove/trove.conf DEFAULT \ - rabbit_host controller -# openstack-config --set /etc/trove/trove-taskmanager.conf DEFAULT \ - rabbit_host controller -# openstack-config --set /etc/trove/trove-conductor.conf DEFAULT \ - rabbit_host controller -# openstack-config --set /etc/trove/trove.conf DEFAULT \ - rabbit_password RABBIT_PASS -# openstack-config --set /etc/trove/trove-taskmanager.conf DEFAULT \ - rabbit_password RABBIT_PASS -# openstack-config --set /etc/trove/trove-conductor.conf DEFAULT \ - rabbit_password RABBIT_PASS - - - Edit the [filter:authtoken] section - of the api-paste.ini file so it matches the - listing shown below: - [filter:authtoken] -auth_host = controller -auth_port = 35357 -auth_protocol = http -admin_user = trove -admin_password = ADMIN_PASS -admin_token = ADMIN_TOKEN -admin_tenant_name = service -signing_dir = /var/cache/trove - - Edit the trove.conf file so it includes appropriate values for the default datastore and network label regex as shown below: [DEFAULT] -default_datastore = mysql -.... # Config option for showing the IP address that nova doles out add_addresses = True network_label_regex = ^NETWORK_LABEL$ -.... +control_exchange = trove + Edit the trove-taskmanager.conf file - so it includes the appropriate service credentials required to + so it includes the required settings to connect to the OpenStack Compute service as shown below: [DEFAULT] -.... # Configuration options for talking to nova via the novaclient. # These options are for an admin user in your keystone config. # It proxy's the token received from the user to send to nova via this admin users creds, @@ -158,15 +132,19 @@ network_label_regex = ^NETWORK_LABEL$ nova_proxy_admin_user = admin nova_proxy_admin_pass = ADMIN_PASS nova_proxy_admin_tenant_name = service -... +taskmanager_manager = trove.taskmanager.manager.Manager +log_file=trove-taskmanager.log + Prepare the trove admin database: $ mysql -u root -p mysql> CREATE DATABASE trove; -mysql> GRANT ALL PRIVILEGES ON trove.* TO trove@'localhost' IDENTIFIED BY 'TROVE_DBPASS'; -mysql> GRANT ALL PRIVILEGES ON trove.* TO trove@'%' IDENTIFIED BY 'TROVE_DBPASS'; +mysql> GRANT ALL PRIVILEGES ON trove.* TO trove@'localhost' \ +IDENTIFIED BY 'TROVE_DBPASS'; +mysql> GRANT ALL PRIVILEGES ON trove.* TO trove@'%' \ +IDENTIFIED BY 'TROVE_DBPASS'; @@ -174,7 +152,7 @@ nova_proxy_admin_tenant_name = service Initialize the database: - # su -s /bin/sh -c "trove-manage db_sync" trove + # trove-manage db_sync Create a datastore. You need to create a separate datastore for @@ -184,12 +162,6 @@ nova_proxy_admin_tenant_name = service - - Create a datastore. You need to create a separate datastore for - each type of database you want to use, for example, MySQL, MongoDB, Cassandra. - This example shows you how to create a datastore for a MySQL database: - # su -s /bin/sh -c "trove-manage datastore_update mysql ''" trove - Create a trove image. Create an image for the type of database you want to use, @@ -209,48 +181,70 @@ rabbit_password = RABBIT_PASS nova_proxy_admin_user = admin nova_proxy_admin_pass = ADMIN_PASS nova_proxy_admin_tenant_name = service -trove_auth_url = http://controller:35357/v2.0 +trove_auth_url = http://controller:35357/v2.0 +log_file = trove-guestagent.log - - Update the datastore to use the new image, using the - trove-manage command. - This example shows you how to create a MySQL 5.5 datastore: - # trove-manage --config-file=/etc/trove/trove.conf datastore_version_update \ - mysql mysql-5.5 mysql glance_image_ID mysql-server-5.5 1 + Update the datastore and version to use the specific image with the trove-manage command. + #trove-manage datastore_update datastore_name datastore_version + #trove-manage datastore_version_update datastore_name version_name \ + datastore_manager glance_image_id packages active + This example shows you how to create a MySQL datastore with version 5.5: + #trove-manage datastore_update mysql '' + #trove-manage datastore_version_update mysql 5.5 mysql glance_image_ID mysql-server-5.5 1 + #trove-manage datastore_update mysql 5.5 + + + Upload post-provisioning configuration validation rules: + + #trove-manage db_load_datastore_config_parameters datastore_name version_name \ + /etc/datastore_name/validation-rules.json + Example for uplodating rules for MySQL datastore: + # trove-manage db_load_datastore_config_parameters \ + mysql 5.5 "$PYBASEDIR"/trove/templates/mysql/validation-rules.json + You must register the Database module with the Identity service so that other OpenStack services can locate it. Register the service and specify the endpoint: - $ keystone service-create --name=trove --type=database \ - --description="OpenStack Database Service" + $ keystone service-create --name trove --type database \ + --description "OpenStack Database Service" $ keystone endpoint-create \ - --service-id=$(keystone service-list | awk '/ trove / {print $2}') \ - --publicurl=http://controller:8779/v1.0/%\(tenant_id\)s \ - --internalurl=http://controller:8779/v1.0/%\(tenant_id\)s \ - --adminurl=http://controller:8779/v1.0/%\(tenant_id\)s + --service-id $(keystone service-list | awk '/ trove / {print $2}') \ + --publicurl http://controller:8779/v1.0/%\(tenant_id\)s \ + --internalurl http://controller:8779/v1.0/%\(tenant_id\)s \ + --adminurl http://controller:8779/v1.0/%\(tenant_id\)s \ + --region regionOne - - - Start Database - services and configure them to start when the system - boots: - Restart Database services: - - # service trove-api restart + + Restart the Database services: + # service trove-api restart # service trove-taskmanager restart # service trove-conductor restart - - # service openstack-trove-api start + + + Start the Database services and configure them to start when the + system boots: + # systemctl enable openstack-trove-api.service openstack-trove-taskmanager.service \ + openstack-trove-conductor.service +# systemctl start openstack-trove-api.service openstack-trove-taskmanager.service \ + openstack-trove-conductor.service + On SLES: + # service openstack-trove-api start # service openstack-trove-taskmanager start # service openstack-trove-conductor start # chkconfig openstack-trove-api on # chkconfig openstack-trove-taskmanager on # chkconfig openstack-trove-conductor on + On openSUSE: + # systemctl enable openstack-trove-api.service openstack-trove-taskmanager.service \ + openstack-trove-conductor.service +# systemctl start openstack-trove-api.service openstack-trove-taskmanager.service \ + openstack-trove-conductor.service diff --git a/doc/training-guides/basic-install-guide/section_trove-verify.xml b/doc/training-guides/basic-install-guide/section_trove-verify.xml index 7c40f016..1cfe1169 100644 --- a/doc/training-guides/basic-install-guide/section_trove-verify.xml +++ b/doc/training-guides/basic-install-guide/section_trove-verify.xml @@ -31,7 +31,7 @@ This example shows you how to create a MySQL 5.5 database: - $ trove create name 2 --size=2 --databases=DBNAME \ + $ trove create name 2 --size=2 --databases DBNAME \ --users USER:PASSWORD --datastore_version mysql-5.5 \ --datastore mysql diff --git a/doc/training-guides/bk_preface.xml b/doc/training-guides/bk_preface.xml index f596478a..28a9a3ab 100644 --- a/doc/training-guides/bk_preface.xml +++ b/doc/training-guides/bk_preface.xml @@ -10,7 +10,7 @@ - + OpenStack Training Guides Are Under Construction We need your help! This is a community driven project to provide the user group community access to OpenStack training materials. We cannot make this work without your help. diff --git a/doc/training-guides/st-training-guides.xml b/doc/training-guides/st-training-guides.xml index 861e2687..74d7c270 100644 --- a/doc/training-guides/st-training-guides.xml +++ b/doc/training-guides/st-training-guides.xml @@ -99,12 +99,10 @@ - + - -