Merge "Remove old install scripts"
This commit is contained in:
commit
3804090879
@ -1,48 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Cinder related packages and after installation, it #
|
||||
# will configure Cinder, populate the database. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
install_cinder() {
|
||||
|
||||
# 1. Install Cinder
|
||||
apt-get install -y cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms
|
||||
|
||||
# 2. Configure iscsi services
|
||||
sed -i 's/false/true/g' /etc/default/iscsitarget
|
||||
|
||||
# 3. Restart the services
|
||||
service iscsitarget start
|
||||
service open-iscsi start
|
||||
|
||||
# 4. Install the templates
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/api-paste.ini" /etc/cinder/api-paste.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/cinder.conf" /etc/cinder/cinder.conf
|
||||
|
||||
# 5. MySQL database
|
||||
cinder-manage db sync
|
||||
|
||||
# 6. Format the disks -- see if something else is available instead of
|
||||
# fdisk
|
||||
bash format_volumes # Need expert advice on this ....
|
||||
|
||||
pvcreate /dev/sdb
|
||||
vgcreate cinder-volumes /dev/sdb
|
||||
|
||||
# 7. Restart Cinder related services
|
||||
for i in $( ls /etc/init.d/cinder-* ); do $i restart; done
|
||||
}
|
||||
install_cinder
|
@ -1,62 +0,0 @@
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:cinder.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: openstack_volume_api_v1
|
||||
/v2: openstack_volume_api_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
|
||||
|
||||
[composite:openstack_volume_api_v2]
|
||||
use = call:cinder.api.middleware.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth apiv2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:apiv1]
|
||||
paste.app_factory = cinder.api.v1.router:APIRouter.factory
|
||||
|
||||
[app:apiv2]
|
||||
paste.app_factory = cinder.api.v2.router:APIRouter.factory
|
||||
|
||||
[pipeline:apiversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = cinder.api.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 192.168.100.51
|
||||
service_port = 5000
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = cinder
|
||||
admin_password = service_pass
|
||||
signing_dir = /var/lib/cinder
|
@ -1,13 +0,0 @@
|
||||
[DEFAULT]
|
||||
rootwrap_config=/etc/cinder/rootwrap.conf
|
||||
sql_connection = mysql://cinderUser:cinderPass@10.10.10.51/cinder
|
||||
api_paste_config = /etc/cinder/api-paste.ini
|
||||
iscsi_helper=ietadm
|
||||
volume_name_template = volume-%s
|
||||
volume_group = cinder-volumes
|
||||
verbose = True
|
||||
auth_strategy = keystone
|
||||
#osapi_volume_listen_port=5900
|
||||
state_path = /var/lib/cinder
|
||||
lock_path = /var/lock/cinder
|
||||
volumes_dir = /var/lib/cinder/volumes
|
@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will configure Cinder volumes #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
echo -n "Enter location of disk to be used for formatting: "
|
||||
read $disk
|
||||
# Assuming /dev/sdb for now
|
||||
|
||||
cat <<EOF | fdisk /dev/sdb
|
||||
n
|
||||
p
|
||||
1
|
||||
|
||||
t
|
||||
8e
|
||||
w
|
||||
EOF
|
||||
partprobe
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Glance related packages and after installation, it #
|
||||
# will configure Glance #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
glance_conf() {
|
||||
|
||||
# 1. Install Glance
|
||||
apt-get install -y glance
|
||||
|
||||
# 2. Install the config files
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/glance-api.conf" /etc/glance/glance-api.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/glance-api-paste.ini" /etc/glance/glance-api-paste.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/glance-registry-paste.ini" /etc/glance/glance-registry-paste.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/glance-registry.conf" /etc/glance/glance-registry.conf
|
||||
|
||||
# 3. Restart Glance services
|
||||
service glance-api restart
|
||||
service glance-registry restart
|
||||
|
||||
# 4. Sync Glance database
|
||||
glance-manage db_sync
|
||||
|
||||
# 5. Upload CirrOS image to Glance
|
||||
source "$SCRIPT_DIR/../Keystone/Scripts/Credentials.sh"
|
||||
# CirrOS image downloaded in PreInstall.sh
|
||||
glance image-create --name myFirstImage --is-public true --container-format bare --disk-format qcow2 < "$SCRIPT_DIR"/cirros-*-x86_64-disk.img
|
||||
|
||||
# 6. Check the image
|
||||
glance image-list
|
||||
}
|
||||
|
||||
echo "Running Glance configuration"
|
||||
glance_conf
|
@ -1,63 +0,0 @@
|
||||
# Use this pipeline for no auth or image caching - DEFAULT
|
||||
[pipeline:glance-api]
|
||||
pipeline = versionnegotiation unauthenticated-context rootapp
|
||||
|
||||
# Use this pipeline for image caching and no auth
|
||||
[pipeline:glance-api-caching]
|
||||
pipeline = versionnegotiation unauthenticated-context cache rootapp
|
||||
|
||||
# Use this pipeline for caching w/ management interface but no auth
|
||||
[pipeline:glance-api-cachemanagement]
|
||||
pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-api-keystone]
|
||||
pipeline = versionnegotiation authtoken context rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with image caching
|
||||
[pipeline:glance-api-keystone+caching]
|
||||
pipeline = versionnegotiation authtoken context cache rootapp
|
||||
|
||||
# Use this pipeline for keystone auth with caching and cache management
|
||||
[pipeline:glance-api-keystone+cachemanagement]
|
||||
pipeline = versionnegotiation authtoken context cache cachemanage rootapp
|
||||
|
||||
[composite:rootapp]
|
||||
paste.composite_factory = glance.api:root_app_factory
|
||||
/: apiversions
|
||||
/v1: apiv1app
|
||||
/v2: apiv2app
|
||||
|
||||
[app:apiversions]
|
||||
paste.app_factory = glance.api.versions:create_resource
|
||||
|
||||
[app:apiv1app]
|
||||
paste.app_factory = glance.api.v1.router:API.factory
|
||||
|
||||
[app:apiv2app]
|
||||
paste.app_factory = glance.api.v2.router:API.factory
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
|
||||
|
||||
[filter:cache]
|
||||
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
|
||||
|
||||
[filter:cachemanage]
|
||||
paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
delay_auth_decision = true
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = service_pass
|
@ -1,356 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
|
||||
# Which backend scheme should Glance use by default is not specified
|
||||
# in a request to add a new image to Glance? Known schemes are determined
|
||||
# by the known_stores option below.
|
||||
# Default: 'file'
|
||||
default_store = file
|
||||
|
||||
# List of which store classes and store class locations are
|
||||
# currently known to glance at startup.
|
||||
#known_stores = glance.store.filesystem.Store,
|
||||
# glance.store.http.Store,
|
||||
# glance.store.rbd.Store,
|
||||
# glance.store.s3.Store,
|
||||
# glance.store.swift.Store,
|
||||
|
||||
|
||||
# Maximum image size (in bytes) that may be uploaded through the
|
||||
# Glance API server. Defaults to 1 TB.
|
||||
# WARNING: this value should only be increased after careful consideration
|
||||
# and must be set to a value under 8 EB (9223372036854775808).
|
||||
#image_size_cap = 1099511627776
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9292
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/api.log
|
||||
|
||||
# Backlog requests when creating socket
|
||||
backlog = 4096
|
||||
|
||||
# TCP_KEEPIDLE value in seconds when creating socket.
|
||||
# Not supported on OS X.
|
||||
#tcp_keepidle = 600
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
# Number of Glance API worker processes to start.
|
||||
# On machines with more than one CPU increasing this value
|
||||
# may improve performance (especially if using SSL with
|
||||
# compression turned on). It is typically recommended to set
|
||||
# this value to the number of CPUs present on your machine.
|
||||
workers = 1
|
||||
|
||||
# Role used to identify an authenticated user as administrator
|
||||
#admin_role = admin
|
||||
|
||||
# Allow unauthenticated users to access the API with read-only
|
||||
# privileges. This only applies when using ContextMiddleware.
|
||||
#allow_anonymous_access = False
|
||||
|
||||
# Allow access to version 1 of glance api
|
||||
#enable_v1_api = True
|
||||
|
||||
# Allow access to version 2 of glance api
|
||||
#enable_v2_api = True
|
||||
|
||||
# Return the URL that references where the data is stored on
|
||||
# the backend storage system. For example, if using the
|
||||
# file system store a URL of 'file:///path/to/image' will
|
||||
# be returned to the user in the 'direct_url' meta-data field.
|
||||
# The default value is false.
|
||||
#show_image_direct_url = False
|
||||
|
||||
# ================= Syslog Options ============================
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified
|
||||
# by `log_file`
|
||||
#use_syslog = False
|
||||
|
||||
# Facility to use. If unset defaults to LOG_USER.
|
||||
#syslog_log_facility = LOG_LOCAL0
|
||||
|
||||
# ================= SSL Options ===============================
|
||||
|
||||
# Certificate file to use when starting API server securely
|
||||
#cert_file = /path/to/certfile
|
||||
|
||||
# Private key file to use when starting API server securely
|
||||
#key_file = /path/to/keyfile
|
||||
|
||||
# CA certificate file to use to verify connecting clients
|
||||
#ca_file = /path/to/cafile
|
||||
|
||||
# ================= Security Options ==========================
|
||||
|
||||
# AES key for encrypting store 'location' metadata, including
|
||||
# -- if used -- Swift or S3 credentials
|
||||
# Should be set to a random string of length 16, 24 or 32 bytes
|
||||
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
|
||||
|
||||
# ============ Registry Options ===============================
|
||||
|
||||
# Address to find the registry server
|
||||
registry_host = 0.0.0.0
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
||||
|
||||
# What protocol to use when connecting to the registry server?
|
||||
# Set to https for secure HTTP communication
|
||||
registry_client_protocol = http
|
||||
|
||||
# The path to the key file to use in SSL connections to the
|
||||
# registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
|
||||
#registry_client_key_file = /path/to/key/file
|
||||
|
||||
# The path to the cert file to use in SSL connections to the
|
||||
# registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
|
||||
#registry_client_cert_file = /path/to/cert/file
|
||||
|
||||
# The path to the certifying authority cert file to use in SSL connections
|
||||
# to the registry server, if any. Alternately, you may set the
|
||||
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
|
||||
#registry_client_ca_file = /path/to/ca/file
|
||||
|
||||
# When using SSL in connections to the registry server, do not require
|
||||
# validation via a certifying authority. This is the registry's equivalent of
|
||||
# specifying --insecure on the command line using glanceclient for the API
|
||||
# Default: False
|
||||
#registry_client_insecure = False
|
||||
|
||||
# The period of time, in seconds, that the API server will wait for a registry
|
||||
# request to complete. A value of '0' implies no timeout.
|
||||
# Default: 600
|
||||
#registry_client_timeout = 600
|
||||
|
||||
# Whether to automatically create the database tables.
|
||||
# Default: False
|
||||
#db_auto_create = False
|
||||
|
||||
# ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when images are create, updated or deleted.
|
||||
# There are three methods of sending notifications, logging (via the
|
||||
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
|
||||
# message queue), or noop (no notifications sent, the default)
|
||||
notifier_strategy = noop
|
||||
|
||||
# Configuration options if sending notifications via rabbitmq (these are
|
||||
# the defaults)
|
||||
rabbit_host = localhost
|
||||
rabbit_port = 5672
|
||||
rabbit_use_ssl = false
|
||||
rabbit_userid = guest
|
||||
rabbit_password = guest
|
||||
rabbit_virtual_host = /
|
||||
rabbit_notification_exchange = glance
|
||||
rabbit_notification_topic = notifications
|
||||
rabbit_durable_queues = False
|
||||
|
||||
# Configuration options if sending notifications via Qpid (these are
|
||||
# the defaults)
|
||||
qpid_notification_exchange = glance
|
||||
qpid_notification_topic = notifications
|
||||
qpid_hostname = localhost
|
||||
qpid_port = 5672
|
||||
qpid_username =
|
||||
qpid_password =
|
||||
qpid_sasl_mechanisms =
|
||||
qpid_reconnect_timeout = 0
|
||||
qpid_reconnect_limit = 0
|
||||
qpid_reconnect_interval_min = 0
|
||||
qpid_reconnect_interval_max = 0
|
||||
qpid_reconnect_interval = 0
|
||||
qpid_heartbeat = 5
|
||||
# Set to 'ssl' to enable SSL
|
||||
qpid_protocol = tcp
|
||||
qpid_tcp_nodelay = True
|
||||
|
||||
# ============ Filesystem Store Options ========================
|
||||
|
||||
# Directory that the Filesystem backend store
|
||||
# writes image data to
|
||||
filesystem_store_datadir = /var/lib/glance/images/
|
||||
|
||||
# ============ Swift Store Options =============================
|
||||
|
||||
# Version of the authentication service to use
|
||||
# Valid versions are '2' for keystone and '1' for swauth and rackspace
|
||||
swift_store_auth_version = 2
|
||||
|
||||
# Address where the Swift authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'https://'
|
||||
# For swauth, use something like '127.0.0.1:8080/v1.0/'
|
||||
swift_store_auth_address = 127.0.0.1:5000/v2.0/
|
||||
|
||||
# User to authenticate against the Swift authentication service
|
||||
# If you use Swift authentication service, set it to 'account':'user'
|
||||
# where 'account' is a Swift storage account and 'user'
|
||||
# is a user in that account
|
||||
swift_store_user = jdoe:jdoe
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# Swift authentication service
|
||||
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in Swift
|
||||
swift_store_container = glance
|
||||
|
||||
# Do we create the container if it does not exist?
|
||||
swift_store_create_container_on_put = False
|
||||
|
||||
# What size, in MB, should Glance start chunking image files
|
||||
# and do a large object manifest in Swift? By default, this is
|
||||
# the maximum object size in Swift, which is 5GB
|
||||
swift_store_large_object_size = 5120
|
||||
|
||||
# When doing a large object manifest, what size, in MB, should
|
||||
# Glance write chunks to Swift? This amount of data is written
|
||||
# to a temporary disk buffer during the process of chunking
|
||||
# the image file, and the default is 200MB
|
||||
swift_store_large_object_chunk_size = 200
|
||||
|
||||
# Whether to use ServiceNET to communicate with the Swift storage servers.
|
||||
# (If you aren't RACKSPACE, leave this False!)
|
||||
#
|
||||
# To use ServiceNET for authentication, prefix hostname of
|
||||
# `swift_store_auth_address` with 'snet-'.
|
||||
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
|
||||
swift_enable_snet = False
|
||||
|
||||
# If set to True enables multi-tenant storage mode which causes Glance images
|
||||
# to be stored in tenant specific Swift accounts.
|
||||
#swift_store_multi_tenant = False
|
||||
|
||||
# A list of swift ACL strings that will be applied as both read and
|
||||
# write ACLs to the containers created by Glance in multi-tenant
|
||||
# mode. This grants the specified tenants/users read and write access
|
||||
# to all newly created image objects. The standard swift ACL string
|
||||
# formats are allowed, including:
|
||||
# <tenant_id>:<username>
|
||||
# <tenant_name>:<username>
|
||||
# *:<username>
|
||||
# Multiple ACLs can be combined using a comma separated list, for
|
||||
# example: swift_store_admin_tenants = service:glance,*:admin
|
||||
#swift_store_admin_tenants =
|
||||
|
||||
# The region of the swift endpoint to be used for single tenant. This setting
|
||||
# is only necessary if the tenant has multiple swift endpoints.
|
||||
#swift_store_region =
|
||||
|
||||
# ============ S3 Store Options =============================
|
||||
|
||||
# Address where the S3 authentication service lives
|
||||
# Valid schemes are 'http://' and 'https://'
|
||||
# If no scheme specified, default to 'http://'
|
||||
s3_store_host = 127.0.0.1:8080/v1.0/
|
||||
|
||||
# User to authenticate against the S3 authentication service
|
||||
s3_store_access_key = <20-char AWS access key>
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# S3 authentication service
|
||||
s3_store_secret_key = <40-char AWS secret key>
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in S3. Note that S3 has a flat namespace,
|
||||
# so you need a unique bucket name for your glance images. An
|
||||
# easy way to do this is append your AWS access key to "glance".
|
||||
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
|
||||
# your AWS access key if you use it in your bucket name below!
|
||||
s3_store_bucket = <lowercased 20-char aws access key>glance
|
||||
|
||||
# Do we create the bucket if it does not exist?
|
||||
s3_store_create_bucket_on_put = False
|
||||
|
||||
# When sending images to S3, the data will first be written to a
|
||||
# temporary buffer on disk. By default the platform's temporary directory
|
||||
# will be used. If required, an alternative directory can be specified here.
|
||||
#s3_store_object_buffer_dir = /path/to/dir
|
||||
|
||||
# When forming a bucket URL, boto will either set the bucket name as the
|
||||
# subdomain or as the first token of the path. Amazon's S3 service will
|
||||
# accept it as the subdomain, but Swift's S3 middleware requires it be
|
||||
# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
|
||||
#s3_store_bucket_url_format = subdomain
|
||||
|
||||
# ============ RBD Store Options =============================
|
||||
|
||||
# Ceph configuration file path
|
||||
# If using cephx authentication, this file should
|
||||
# include a reference to the right keyring
|
||||
# in a client.<USER> section
|
||||
rbd_store_ceph_conf = /etc/ceph/ceph.conf
|
||||
|
||||
# RADOS user to authenticate as (only applicable if using cephx)
|
||||
rbd_store_user = glance
|
||||
|
||||
# RADOS pool in which images are stored
|
||||
rbd_store_pool = images
|
||||
|
||||
# Images will be chunked into objects of this size (in megabytes).
|
||||
# For best performance, this should be a power of two
|
||||
rbd_store_chunk_size = 8
|
||||
|
||||
# ============ Delayed Delete Options =============================
|
||||
|
||||
# Turn on/off delayed delete
|
||||
delayed_delete = False
|
||||
|
||||
# Delayed delete time in seconds
|
||||
scrub_time = 43200
|
||||
|
||||
# Directory that the scrubber will use to remind itself of what to delete
|
||||
# Make sure this is also set in glance-scrubber.conf
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
# =============== Image Cache Options =============================
|
||||
|
||||
# Base directory that the Image Cache uses
|
||||
image_cache_dir = /var/lib/glance/image-cache/
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = %SERVICE_TENANT_NAME%
|
||||
admin_user = %SERVICE_USER%
|
||||
admin_password = %SERVICE_PASSWORD%
|
||||
|
||||
[paste_deploy]
|
||||
# Name of the paste configuration file that defines the available pipelines
|
||||
#config_file = glance-api-paste.ini
|
||||
|
||||
# Partial name of a pipeline in your paste configuration file with the
|
||||
# service name removed. For example, if your paste section name is
|
||||
# [pipeline:glance-api-keystone], you would configure the flavor below
|
||||
# as 'keystone'.
|
||||
flavor = keystone
|
@ -1,25 +0,0 @@
|
||||
# Use this pipeline for no auth - DEFAULT
|
||||
[pipeline:glance-registry]
|
||||
pipeline = unauthenticated-context registryapp
|
||||
|
||||
# Use this pipeline for keystone auth
|
||||
[pipeline:glance-registry-keystone]
|
||||
pipeline = authtoken context registryapp
|
||||
|
||||
[app:registryapp]
|
||||
paste.app_factory = glance.registry.api.v1:API.factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
|
||||
|
||||
[filter:unauthenticated-context]
|
||||
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = glance
|
||||
admin_password = service_pass
|
@ -1,90 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
#verbose = False
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
#debug = False
|
||||
|
||||
# Address to bind the registry server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the registry server to
|
||||
bind_port = 9191
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = /var/log/glance/registry.log
|
||||
|
||||
# Backlog requests when creating socket
|
||||
backlog = 4096
|
||||
|
||||
# TCP_KEEPIDLE value in seconds when creating socket.
|
||||
# Not supported on OS X.
|
||||
#tcp_keepidle = 600
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
sql_connection = mysql://glanceUser:glancePass@10.10.10.51/glance
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
# Limit the api to return `param_limit_max` items in a call to a container. If
|
||||
# a larger `limit` query param is provided, it will be reduced to this value.
|
||||
api_limit_max = 1000
|
||||
|
||||
# If a `limit` query param is not provided in an api request, it will
|
||||
# default to `limit_param_default`
|
||||
limit_param_default = 25
|
||||
|
||||
# Role used to identify an authenticated user as administrator
|
||||
#admin_role = admin
|
||||
|
||||
# Whether to automatically create the database tables.
|
||||
# Default: False
|
||||
#db_auto_create = False
|
||||
|
||||
# ================= Syslog Options ============================
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified
|
||||
# by `log_file`
|
||||
#use_syslog = False
|
||||
|
||||
# Facility to use. If unset defaults to LOG_USER.
|
||||
#syslog_log_facility = LOG_LOCAL1
|
||||
|
||||
# ================= SSL Options ===============================
|
||||
|
||||
# Certificate file to use when starting registry server securely
|
||||
#cert_file = /path/to/certfile
|
||||
|
||||
# Private key file to use when starting registry server securely
|
||||
#key_file = /path/to/keyfile
|
||||
|
||||
# CA certificate file to use to verify connecting clients
|
||||
#ca_file = /path/to/cafile
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = %SERVICE_TENANT_NAME%
|
||||
admin_user = %SERVICE_USER%
|
||||
admin_password = %SERVICE_PASSWORD%
|
||||
|
||||
[paste_deploy]
|
||||
# Name of the paste configuration file that defines the available pipelines
|
||||
#config_file = glance-registry-paste.ini
|
||||
|
||||
# Partial name of a pipeline in your paste configuration file with the
|
||||
# service name removed. For example, if your paste section name is
|
||||
# [pipeline:glance-registry-keystone], you would configure the flavor below
|
||||
# as 'keystone'.
|
||||
flavor = keystone
|
@ -1,29 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Horizon related packages. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
install_horizon() {
|
||||
|
||||
# 1. Install Horizon
|
||||
apt-get install -y openstack-dashboard memcached
|
||||
|
||||
# 2. Restart apache2 and memcached
|
||||
service apache2 restart
|
||||
service memcached restart
|
||||
|
||||
echo " You are done with the OpenStack installation "
|
||||
}
|
||||
install_horizon
|
@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Keystone related packages and after installation, #
|
||||
# it will configure Keystone, populate the database. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
pre_keystone() {
|
||||
|
||||
# 1. Database - MySQL and Python MySQL DB Connector
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password password '$MYSQL_PASS''
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password '$MYSQL_PASS''
|
||||
apt-get install -y mysql-server python-mysqldb
|
||||
|
||||
# Configure MySQL to listen to other all IP addresses
|
||||
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
|
||||
|
||||
# Restart MySQL service
|
||||
service mysql restart
|
||||
|
||||
# 2. Install RabbitMQ
|
||||
apt-get install -y rabbitmq-server
|
||||
apt-get install -y ntp
|
||||
apt-get install -y vlan bridge-utils
|
||||
|
||||
# Enable IP Forwarding
|
||||
sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
|
||||
sysctl net.ipv4.ip_forward=1
|
||||
}
|
||||
|
||||
keystone_conf() {
|
||||
|
||||
# 1. Install Keystone
|
||||
apt-get -y install keystone
|
||||
|
||||
# Create database Keystone, Glance, Quantum, Nova, and Cinder
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "create database keystone"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "GRANT ALL ON keystone.* TO 'keystoneUser'@'%' IDENTIFIED BY 'keystonePass';"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "create database glance"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "GRANT ALL ON glance.* TO 'glanceUser'@'%' IDENTIFIED BY 'glancePass';"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "create database quantum"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "GRANT ALL ON quantum.* TO 'quantumUser'@'%' IDENTIFIED BY 'quantumPass';"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "create database nova"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "GRANT ALL ON nova.* TO 'novaUser'@'%' IDENTIFIED BY 'novaPass';"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "create database cinder"
|
||||
mysql -u "root" -p"$MYSQL_PASS" -e "GRANT ALL ON cinder.* TO 'cinderUser'@'%' IDENTIFIED BY 'cinderPass';"
|
||||
|
||||
# 2. Configure Keystone scripts (copy the template file)
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/Keystone.conf" /etc/keystone/keystone.conf
|
||||
|
||||
# 3. Restart the Keystone services
|
||||
service keystone restart
|
||||
|
||||
# 4. Populate the database using db_sync
|
||||
keystone-manage db_sync
|
||||
|
||||
# Create user and grant access to the user
|
||||
sh "$SCRIPT_DIR/Scripts/keystone_basic.sh"
|
||||
sh "$SCRIPT_DIR/Scripts/keystone_endpoints_basic.sh"
|
||||
|
||||
# Load the authentication credentials
|
||||
source "$SCRIPT_DIR/Scripts/Credentials.sh"
|
||||
|
||||
# List Keystone users
|
||||
keystone user-list
|
||||
}
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
# Create and populate required MySQL databases
|
||||
echo -n "Enter MySQL root password: "
|
||||
read MYSQL_PASS
|
||||
else
|
||||
MYSQL_PASS=$1
|
||||
fi
|
||||
|
||||
echo "Running pre_keystone"
|
||||
pre_keystone
|
||||
|
||||
echo "Running keystone_conf"
|
||||
keystone_conf
|
@ -1,4 +0,0 @@
|
||||
export OS_TENANT_NAME=admin
|
||||
export OS_USERNAME=admin
|
||||
export OS_PASSWORD=admin_pass
|
||||
export OS_AUTH_URL="http://192.168.100.51:5000/v2.0/"
|
@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
# Inspired by https://github.com/mseknibilel/OpenStack-Grizzly-Install-Guide
|
||||
###############################################################################
|
||||
# #
|
||||
# This script adds users and grants them roles #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
HOST_IP=10.10.10.51
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin_pass}
|
||||
SERVICE_PASSWORD=${SERVICE_PASSWORD:-service_pass}
|
||||
export SERVICE_TOKEN="ADMIN"
|
||||
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
|
||||
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
|
||||
|
||||
get_id () {
|
||||
echo `$@ | awk '/ id / { print $4 }'`
|
||||
}
|
||||
|
||||
# Tenants
|
||||
ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
|
||||
SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
|
||||
|
||||
|
||||
# Users
|
||||
ADMIN_USER=$(get_id keystone user-create --name=admin --pass="$ADMIN_PASSWORD" --email=admin@domain.com)
|
||||
|
||||
|
||||
# Roles
|
||||
ADMIN_ROLE=$(get_id keystone role-create --name=admin)
|
||||
KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
|
||||
KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
|
||||
|
||||
# Add roles to users in tenants
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $ADMIN_TENANT
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONEADMIN_ROLE --tenant-id $ADMIN_TENANT
|
||||
keystone user-role-add --user-id $ADMIN_USER --role-id $KEYSTONESERVICE_ROLE --tenant-id $ADMIN_TENANT
|
||||
|
||||
# The member role is used by horizon and swift
|
||||
MEMBER_ROLE=$(get_id keystone role-create --name=Member)
|
||||
|
||||
# Configure service users/roles
|
||||
NOVA_USER=$(get_id keystone user-create --name=nova --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=nova@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NOVA_USER --role-id $ADMIN_ROLE
|
||||
|
||||
GLANCE_USER=$(get_id keystone user-create --name=glance --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=glance@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $GLANCE_USER --role-id $ADMIN_ROLE
|
||||
|
||||
NEUTRON_USER=$(get_id keystone user-create --name=neutron --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=neutron@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $NEUTRON_USER --role-id $ADMIN_ROLE
|
||||
|
||||
CINDER_USER=$(get_id keystone user-create --name=cinder --pass="$SERVICE_PASSWORD" --tenant-id $SERVICE_TENANT --email=cinder@domain.com)
|
||||
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $CINDER_USER --role-id $ADMIN_ROLE
|
@ -1,136 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
# Inspired by https://github.com/mseknibilel/OpenStack-Grizzly-Install-Guide
|
||||
###############################################################################
|
||||
# #
|
||||
# This script creates keystone services and endpoints #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Host address
|
||||
HOST_IP=10.10.10.51
|
||||
EXT_HOST_IP=192.168.100.51
|
||||
|
||||
# MySQL definitions
|
||||
MYSQL_USER=keystoneUser
|
||||
MYSQL_DATABASE=keystone
|
||||
MYSQL_HOST=$HOST_IP
|
||||
MYSQL_PASSWORD=keystonePass
|
||||
|
||||
# Keystone definitions
|
||||
KEYSTONE_REGION=RegionOne
|
||||
export SERVICE_TOKEN=ADMIN
|
||||
export SERVICE_ENDPOINT="http://${HOST_IP}:35357/v2.0"
|
||||
|
||||
while getopts "u:D:p:m:K:R:E:T:vh" opt; do
|
||||
case $opt in
|
||||
u)
|
||||
MYSQL_USER=$OPTARG
|
||||
;;
|
||||
D)
|
||||
MYSQL_DATABASE=$OPTARG
|
||||
;;
|
||||
p)
|
||||
MYSQL_PASSWORD=$OPTARG
|
||||
;;
|
||||
m)
|
||||
MYSQL_HOST=$OPTARG
|
||||
;;
|
||||
K)
|
||||
MASTER=$OPTARG
|
||||
;;
|
||||
R)
|
||||
KEYSTONE_REGION=$OPTARG
|
||||
;;
|
||||
E)
|
||||
export SERVICE_ENDPOINT=$OPTARG
|
||||
;;
|
||||
T)
|
||||
export SERVICE_TOKEN=$OPTARG
|
||||
;;
|
||||
v)
|
||||
set -x
|
||||
;;
|
||||
h)
|
||||
cat <<EOF
|
||||
Usage: $0 [-m mysql_hostname] [-u mysql_username] [-D mysql_database] [-p mysql_password]
|
||||
[-K keystone_master ] [ -R keystone_region ] [ -E keystone_endpoint_url ]
|
||||
[ -T keystone_token ]
|
||||
|
||||
Add -v for verbose mode, -h to display this message.
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
\?)
|
||||
echo "Unknown option -$OPTARG" >&2
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
echo "Option -$OPTARG requires an argument" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$KEYSTONE_REGION" ]; then
|
||||
echo "Keystone region not set. Please set with -R option or set KEYSTONE_REGION variable." >&2
|
||||
missing_args="true"
|
||||
fi
|
||||
|
||||
if [ -z "$SERVICE_TOKEN" ]; then
|
||||
echo "Keystone service token not set. Please set with -T option or set SERVICE_TOKEN variable." >&2
|
||||
missing_args="true"
|
||||
fi
|
||||
|
||||
if [ -z "$SERVICE_ENDPOINT" ]; then
|
||||
echo "Keystone service endpoint not set. Please set with -E option or set SERVICE_ENDPOINT variable." >&2
|
||||
missing_args="true"
|
||||
fi
|
||||
|
||||
if [ -z "$MYSQL_PASSWORD" ]; then
|
||||
echo "MySQL password not set. Please set with -p option or set MYSQL_PASSWORD variable." >&2
|
||||
missing_args="true"
|
||||
fi
|
||||
|
||||
if [ -n "$missing_args" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
keystone service-create --name nova --type compute --description 'OpenStack Compute Service'
|
||||
keystone service-create --name cinder --type volume --description 'OpenStack Volume Service'
|
||||
keystone service-create --name glance --type image --description 'OpenStack Image Service'
|
||||
keystone service-create --name keystone --type identity --description 'OpenStack Identity'
|
||||
keystone service-create --name ec2 --type ec2 --description 'OpenStack EC2 service'
|
||||
keystone service-create --name neutron --type network --description 'OpenStack Networking service'
|
||||
|
||||
create_endpoint () {
|
||||
case $1 in
|
||||
compute)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8774/v2/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8774/v2/$(tenant_id)s'
|
||||
;;
|
||||
volume)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8776/v1/$(tenant_id)s' --adminurl 'http://'"$HOST_IP"':8776/v1/$(tenant_id)s' --internalurl 'http://'"$HOST_IP"':8776/v1/$(tenant_id)s'
|
||||
;;
|
||||
image)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9292/' --adminurl 'http://'"$HOST_IP"':9292/' --internalurl 'http://'"$HOST_IP"':9292/'
|
||||
;;
|
||||
identity)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':5000/v2.0' --adminurl 'http://'"$HOST_IP"':35357/v2.0' --internalurl 'http://'"$HOST_IP"':5000/v2.0'
|
||||
;;
|
||||
ec2)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':8773/services/Cloud' --adminurl 'http://'"$HOST_IP"':8773/services/Admin' --internalurl 'http://'"$HOST_IP"':8773/services/Cloud'
|
||||
;;
|
||||
network)
|
||||
keystone endpoint-create --region $KEYSTONE_REGION --service-id $2 --publicurl 'http://'"$EXT_HOST_IP"':9696/' --adminurl 'http://'"$HOST_IP"':9696/' --internalurl 'http://'"$HOST_IP"':9696/'
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
for i in compute volume image object-store identity ec2 network; do
|
||||
id=`mysql -h "$MYSQL_HOST" -u "$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" -ss -e "SELECT id FROM service WHERE type='"$i"';"` || exit 1
|
||||
create_endpoint $i $id
|
||||
done
|
@ -1,303 +0,0 @@
|
||||
[DEFAULT]
|
||||
# A "shared secret" between keystone and other OpenStack services
|
||||
# admin_token = ADMIN
|
||||
|
||||
# The IP address of the network interface to listen on
|
||||
# public_bind_host = 0.0.0.0
|
||||
# admin_bind_host = 0.0.0.0
|
||||
|
||||
# The port number which the public service listens on
|
||||
# public_port = 5000
|
||||
|
||||
# The port number which the public admin listens on
|
||||
# admin_port = 35357
|
||||
|
||||
# The base endpoint URLs for keystone that are advertised to clients
|
||||
# (NOTE: this does NOT affect how keystone listens for connections)
|
||||
# public_endpoint = http://localhost:%(public_port)d/
|
||||
# admin_endpoint = http://localhost:%(admin_port)d/
|
||||
|
||||
# The port number which the OpenStack Compute service listens on
|
||||
# compute_port = 8774
|
||||
|
||||
# Path to your policy definition containing identity actions
|
||||
# policy_file = policy.json
|
||||
|
||||
# Rule to check if no matching policy definition is found
|
||||
# FIXME(dolph): This should really be defined as [policy] default_rule
|
||||
# policy_default_rule = admin_required
|
||||
|
||||
# Role for migrating membership relationships
|
||||
# During a SQL upgrade, the following values will be used to create a new role
|
||||
# that will replace records in the user_tenant_membership table with explicit
|
||||
# role grants. After migration, the member_role_id will be used in the API
|
||||
# add_user_to_project, and member_role_name will be ignored.
|
||||
# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
|
||||
# member_role_name = _member_
|
||||
|
||||
# === Logging Options ===
|
||||
# Print debugging output
|
||||
# (includes plaintext request logging, potentially including passwords)
|
||||
# debug = False
|
||||
|
||||
# Print more verbose output
|
||||
# verbose = False
|
||||
|
||||
# Name of log file to output to. If not set, logging will go to stdout.
|
||||
log_file = keystone.log
|
||||
|
||||
# The directory to keep log files in (will be prepended to --logfile)
|
||||
log_dir = /var/log/keystone
|
||||
|
||||
# Use syslog for logging.
|
||||
# use_syslog = False
|
||||
|
||||
# syslog facility to receive log lines
|
||||
# syslog_log_facility = LOG_USER
|
||||
|
||||
# If this option is specified, the logging configuration file specified is
|
||||
# used and overrides any other logging options specified. Please see the
|
||||
# Python logging module documentation for details on logging configuration
|
||||
# files.
|
||||
# log_config = logging.conf
|
||||
|
||||
# A logging.Formatter log message format string which may use any of the
|
||||
# available logging.LogRecord attributes.
|
||||
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
|
||||
|
||||
# Format string for %(asctime)s in log records.
|
||||
# log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# onready allows you to send a notification when the process is ready to serve
|
||||
# For example, to have it notify using systemd, one could set shell command:
|
||||
# onready = systemd-notify --ready
|
||||
# or a module with notify() method:
|
||||
# onready = keystone.common.systemd
|
||||
|
||||
[sql]
|
||||
# The SQLAlchemy connection string used to connect to the database
|
||||
connection = mysql://keystoneUser:keystonePass@10.10.10.51/keystone
|
||||
|
||||
# the timeout before idle sql connections are reaped
|
||||
# idle_timeout = 200
|
||||
|
||||
[identity]
|
||||
driver = keystone.identity.backends.sql.Identity
|
||||
|
||||
# This references the domain to use for all Identity API v2 requests (which are
|
||||
# not aware of domains). A domain with this ID will be created for you by
|
||||
# keystone-manage db_sync in migration 008. The domain referenced by this ID
|
||||
# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
|
||||
# There is nothing special about this domain, other than the fact that it must
|
||||
# exist to order to maintain support for your v2 clients.
|
||||
# default_domain_id = default
|
||||
|
||||
[trust]
|
||||
driver = keystone.trust.backends.sql.Trust
|
||||
|
||||
# delegation and impersonation features can be optionally disabled
|
||||
# enabled = True
|
||||
|
||||
[catalog]
|
||||
# dynamic, sql-based backend (supports API/CLI-based management commands)
|
||||
driver = keystone.catalog.backends.sql.Catalog
|
||||
|
||||
# static, file-based backend (does *NOT* support any management commands)
|
||||
# driver = keystone.catalog.backends.templated.TemplatedCatalog
|
||||
|
||||
# template_file = default_catalog.templates
|
||||
|
||||
[token]
|
||||
driver = keystone.token.backends.sql.Token
|
||||
|
||||
# Amount of time a token should remain valid (in seconds)
|
||||
# expiration = 86400
|
||||
|
||||
[policy]
|
||||
driver = keystone.policy.backends.sql.Policy
|
||||
|
||||
[ec2]
|
||||
driver = keystone.contrib.ec2.backends.sql.Ec2
|
||||
|
||||
[ssl]
|
||||
#enable = True
|
||||
#certfile = /etc/keystone/ssl/certs/keystone.pem
|
||||
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#cert_required = True
|
||||
|
||||
[signing]
|
||||
#token_format = PKI
|
||||
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
|
||||
#keyfile = /etc/keystone/ssl/private/signing_key.pem
|
||||
#ca_certs = /etc/keystone/ssl/certs/ca.pem
|
||||
#key_size = 1024
|
||||
#valid_days = 3650
|
||||
#ca_password = None
|
||||
|
||||
[ldap]
|
||||
# url = ldap://localhost
|
||||
# user = dc=Manager,dc=example,dc=com
|
||||
# password = None
|
||||
# suffix = cn=example,cn=com
|
||||
# use_dumb_member = False
|
||||
# allow_subtree_delete = False
|
||||
# dumb_member = cn=dumb,dc=example,dc=com
|
||||
|
||||
# Maximum results per page; a value of zero ('0') disables paging (default)
|
||||
# page_size = 0
|
||||
|
||||
# The LDAP dereferencing option for queries. This can be either 'never',
|
||||
# 'searching', 'always', 'finding' or 'default'. The 'default' option falls
|
||||
# back to using default dereferencing configured by your ldap.conf.
|
||||
# alias_dereferencing = default
|
||||
|
||||
# The LDAP scope for queries, this can be either 'one'
|
||||
# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)
|
||||
# query_scope = one
|
||||
|
||||
# user_tree_dn = ou=Users,dc=example,dc=com
|
||||
# user_filter =
|
||||
# user_objectclass = inetOrgPerson
|
||||
# user_domain_id_attribute = businessCategory
|
||||
# user_id_attribute = cn
|
||||
# user_name_attribute = sn
|
||||
# user_mail_attribute = email
|
||||
# user_pass_attribute = userPassword
|
||||
# user_enabled_attribute = enabled
|
||||
# user_enabled_mask = 0
|
||||
# user_enabled_default = True
|
||||
# user_attribute_ignore = tenant_id,tenants
|
||||
# user_allow_create = True
|
||||
# user_allow_update = True
|
||||
# user_allow_delete = True
|
||||
# user_enabled_emulation = False
|
||||
# user_enabled_emulation_dn =
|
||||
|
||||
# tenant_tree_dn = ou=Groups,dc=example,dc=com
|
||||
# tenant_filter =
|
||||
# tenant_objectclass = groupOfNames
|
||||
# tenant_domain_id_attribute = businessCategory
|
||||
# tenant_id_attribute = cn
|
||||
# tenant_member_attribute = member
|
||||
# tenant_name_attribute = ou
|
||||
# tenant_desc_attribute = desc
|
||||
# tenant_enabled_attribute = enabled
|
||||
# tenant_attribute_ignore =
|
||||
# tenant_allow_create = True
|
||||
# tenant_allow_update = True
|
||||
# tenant_allow_delete = True
|
||||
# tenant_enabled_emulation = False
|
||||
# tenant_enabled_emulation_dn =
|
||||
|
||||
# role_tree_dn = ou=Roles,dc=example,dc=com
|
||||
# role_filter =
|
||||
# role_objectclass = organizationalRole
|
||||
# role_id_attribute = cn
|
||||
# role_name_attribute = ou
|
||||
# role_member_attribute = roleOccupant
|
||||
# role_attribute_ignore =
|
||||
# role_allow_create = True
|
||||
# role_allow_update = True
|
||||
# role_allow_delete = True
|
||||
|
||||
# group_tree_dn =
|
||||
# group_filter =
|
||||
# group_objectclass = groupOfNames
|
||||
# group_id_attribute = cn
|
||||
# group_name_attribute = ou
|
||||
# group_member_attribute = member
|
||||
# group_desc_attribute = desc
|
||||
# group_attribute_ignore =
|
||||
# group_allow_create = True
|
||||
# group_allow_update = True
|
||||
# group_allow_delete = True
|
||||
|
||||
[auth]
|
||||
methods = password,token
|
||||
password = keystone.auth.plugins.password.Password
|
||||
token = keystone.auth.plugins.token.Token
|
||||
|
||||
[filter:debug]
|
||||
paste.filter_factory = keystone.common.wsgi:Debug.factory
|
||||
|
||||
[filter:token_auth]
|
||||
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
|
||||
|
||||
[filter:admin_token_auth]
|
||||
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
|
||||
|
||||
[filter:xml_body]
|
||||
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
|
||||
|
||||
[filter:json_body]
|
||||
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
|
||||
|
||||
[filter:user_crud_extension]
|
||||
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
|
||||
|
||||
[filter:crud_extension]
|
||||
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
|
||||
|
||||
[filter:ec2_extension]
|
||||
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
|
||||
|
||||
[filter:s3_extension]
|
||||
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
|
||||
|
||||
[filter:url_normalize]
|
||||
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
|
||||
|
||||
[filter:stats_monitoring]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
|
||||
|
||||
[filter:stats_reporting]
|
||||
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
|
||||
|
||||
[filter:access_log]
|
||||
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
|
||||
|
||||
[app:public_service]
|
||||
paste.app_factory = keystone.service:public_app_factory
|
||||
|
||||
[app:service_v3]
|
||||
paste.app_factory = keystone.service:v3_app_factory
|
||||
|
||||
[app:admin_service]
|
||||
paste.app_factory = keystone.service:admin_app_factory
|
||||
|
||||
[pipeline:public_api]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
|
||||
|
||||
[pipeline:admin_api]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
|
||||
|
||||
[pipeline:api_v3]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
|
||||
|
||||
[app:public_version_service]
|
||||
paste.app_factory = keystone.service:public_version_app_factory
|
||||
|
||||
[app:admin_version_service]
|
||||
paste.app_factory = keystone.service:admin_version_app_factory
|
||||
|
||||
[pipeline:public_version_api]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
|
||||
|
||||
[pipeline:admin_version_api]
|
||||
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
|
||||
|
||||
[composite:main]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = public_api
|
||||
/v3 = api_v3
|
||||
/ = public_version_api
|
||||
|
||||
[composite:admin]
|
||||
use = egg:Paste#urlmap
|
||||
/v2.0 = admin_api
|
||||
/v3 = api_v3
|
||||
/ = admin_version_api
|
@ -1,57 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Quantum related packages and after installation, it#
|
||||
# will configure Quantum, populate the database. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
quantum_singlenode() {
|
||||
|
||||
# 1. Install Quantum, OVS etc.
|
||||
apt-get install -y quantum-server openvswitch-switch openvswitch-datapath-dkms quantum-plugin-openvswitch quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent
|
||||
|
||||
# br-int will be used for VM integration
|
||||
ovs-vsctl add-br br-int
|
||||
# br-ex is used for Internet access (not covered in this guide)
|
||||
ovs-vsctl add-br br-ex
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/interfaces-single" /etc/network/interfaces
|
||||
ovs-vsctl add-port br-ex eth1
|
||||
# May need to do this ...
|
||||
#iptables --table nat --append POSTROUTING --out-interface eth2 -j MASQUERADE
|
||||
#ptables --append FORWARD --in-interface br-ex -j ACCEPT
|
||||
|
||||
# 2. Install Quantum configuration files
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/ovs_quantum_plugin.ini" /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/api-paste.ini" /etc/quantum/api-paste.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/quantum.conf" /etc/quantum/quantum.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/metadata_agent.ini" /etc/quantum/metadata_agent.ini
|
||||
|
||||
# 3. Restart Quantum server
|
||||
for i in $( ls /etc/init.d/quantum-* ); do sudo $i restart; done
|
||||
service dnsmasq restart
|
||||
}
|
||||
|
||||
quantum_multinode() {
|
||||
|
||||
# Single node for now.
|
||||
quantum_singlenode
|
||||
}
|
||||
|
||||
# For now it is just single node
|
||||
|
||||
if [ "$1" == "Single" ]; then
|
||||
quantum_singlenode
|
||||
else
|
||||
quantum_multinode
|
||||
fi
|
@ -1,30 +0,0 @@
|
||||
[composite:quantum]
|
||||
use = egg:Paste#urlmap
|
||||
/: quantumversions
|
||||
/v2.0: quantumapi_v2_0
|
||||
|
||||
[composite:quantumapi_v2_0]
|
||||
use = call:quantum.auth:pipeline_factory
|
||||
noauth = extensions quantumapiapp_v2_0
|
||||
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = quantum
|
||||
admin_password = service_pass
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = quantum.api.extensions:plugin_aware_extension_middleware_factory
|
||||
|
||||
[app:quantumversions]
|
||||
paste.app_factory = quantum.api.versions:Versions.factory
|
||||
|
||||
[app:quantumapiapp_v2_0]
|
||||
paste.app_factory = quantum.api.v2.router:APIRouter.factory
|
@ -1,34 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
|
||||
# OpenStack Single Node.
|
||||
|
||||
# OpenStack Management Network
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
gateway 10.10.10.1
|
||||
|
||||
# Expose OpenStack API to External Network/Internet
|
||||
auto eth1
|
||||
iface eth1 inet manual
|
||||
up ifconfig $IFACE 0.0.0.0 up
|
||||
up ip link set $IFACE promisc on
|
||||
down ip link set $IFACE promisc off
|
||||
down ifconfig $IFACE down
|
||||
|
||||
# VM Internet Access
|
||||
auto br-ex
|
||||
iface br-ex inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
||||
gateway 192.168.100.1
|
||||
dns-nameservers 8.8.8.8
|
@ -1,26 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Show debugging output in log (sets DEBUG log level output)
|
||||
# debug = True
|
||||
auth_url = http://10.10.10.51:35357/v2.0
|
||||
auth_region = RegionOne
|
||||
admin_tenant_name = service
|
||||
admin_user = quantum
|
||||
admin_password = service_pass
|
||||
|
||||
metadata_proxy_shared_secret = OpenStack_Training
|
||||
|
||||
# The Quantum user information for accessing the Quantum API.
|
||||
auth_url = http://localhost:35357/v2.0
|
||||
|
||||
# IP address used by Nova metadata server
|
||||
nova_metadata_ip = 127.0.0.1
|
||||
|
||||
# TCP Port used by Nova metadata server
|
||||
nova_metadata_port = 8775
|
||||
|
||||
|
||||
# When proxying metadata requests, Quantum signs the Instance-ID header with a
|
||||
# shared secret to prevent spoofing. You may select any string for a secret,
|
||||
# but it must match here and in the configuration used by the Nova Metadata
|
||||
# Server. NOTE: Nova uses a different key: quantum_metadata_proxy_shared_secret
|
||||
# metadata_proxy_shared_secret =
|
@ -1,140 +0,0 @@
|
||||
[DATABASE]
|
||||
# This line MUST be changed to actually run the plugin.
|
||||
# Example:
|
||||
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
|
||||
# Replace 127.0.0.1 above with the IP address of the database used by the
|
||||
# main quantum server. (Leave it as is if the database runs on this host.)
|
||||
sql_connection = mysql://quantumUser:quantumPass@10.10.10.51/quantum
|
||||
# Database reconnection retry times - in event connectivity is lost
|
||||
# set to -1 implies an infinite retry count
|
||||
# sql_max_retries = 10
|
||||
# Database reconnection interval in seconds - if the initial connection to the
|
||||
# database fails
|
||||
reconnect_interval = 2
|
||||
# Enable the use of eventlets db_pool for MySQL. The flags sql_min_pool_size,
|
||||
# sql_max_pool_size and sql_idle_timeout are relevant only if this is enabled.
|
||||
# sql_dbpool_enable = False
|
||||
# Minimum number of SQL connections to keep open in a pool
|
||||
# sql_min_pool_size = 1
|
||||
# Maximum number of SQL connections to keep open in a pool
|
||||
# sql_max_pool_size = 5
|
||||
# Timeout in seconds before idle sql connections are reaped
|
||||
# sql_idle_timeout = 3600
|
||||
# Maximum number of SQL connections to keep open in a QueuePool in SQLAlchemy
|
||||
# Example sqlalchemy_pool_size = 5
|
||||
# Maximum number of overflow connections in a QueuePool in SQLAlchemy
|
||||
# Example sqlalchemy_max_overflow = 10
|
||||
# Timeout of the open connections QueuePool in SQLAlchemy
|
||||
# Example sqlalchemy_pool_timeout = 30
|
||||
|
||||
[OVS]
|
||||
tenant_network_type = gre
|
||||
tunnel_id_ranges = 1:1000
|
||||
integration_bridge = br-int
|
||||
tunnel_bridge = br-tun
|
||||
local_ip = 10.10.10.51
|
||||
enable_tunneling = True
|
||||
|
||||
# (StrOpt) Type of network to allocate for tenant networks. The
|
||||
# default value 'local' is useful only for single-box testing and
|
||||
# provides no connectivity between hosts. You MUST either change this
|
||||
# to 'vlan' and configure network_vlan_ranges below or change this to
|
||||
# 'gre' and configure tunnel_id_ranges below in order for tenant
|
||||
# networks to provide connectivity between hosts. Set to 'none' to
|
||||
# disable creation of tenant networks.
|
||||
#
|
||||
# Default: tenant_network_type = local
|
||||
# Example: tenant_network_type = gre
|
||||
|
||||
# (ListOpt) Comma-separated list of
|
||||
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
|
||||
# of VLAN IDs on named physical networks that are available for
|
||||
# allocation. All physical networks listed are available for flat and
|
||||
# VLAN provider network creation. Specified ranges of VLAN IDs are
|
||||
# available for tenant network allocation if tenant_network_type is
|
||||
# 'vlan'. If empty, only gre and local networks may be created.
|
||||
#
|
||||
# Default: network_vlan_ranges =
|
||||
# Example: network_vlan_ranges = physnet1:1000:2999
|
||||
|
||||
# (BoolOpt) Set to True in the server and the agents to enable support
|
||||
# for GRE networks. Requires kernel support for OVS patch ports and
|
||||
# GRE tunneling.
|
||||
#
|
||||
# Default: enable_tunneling = False
|
||||
|
||||
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
|
||||
# enumerating ranges of GRE tunnel IDs that are available for tenant
|
||||
# network allocation if tenant_network_type is 'gre'.
|
||||
#
|
||||
# Default: tunnel_id_ranges =
|
||||
# Example: tunnel_id_ranges = 1:1000
|
||||
|
||||
# Do not change this parameter unless you have a good reason to.
|
||||
# This is the name of the OVS integration bridge. There is one per hypervisor.
|
||||
# The integration bridge acts as a virtual "patch bay". All VM VIFs are
|
||||
# attached to this bridge and then "patched" according to their network
|
||||
# connectivity.
|
||||
#
|
||||
# Default: integration_bridge = br-int
|
||||
|
||||
# Only used for the agent if tunnel_id_ranges (above) is not empty for
|
||||
# the server. In most cases, the default value should be fine.
|
||||
#
|
||||
# Default: tunnel_bridge = br-tun
|
||||
|
||||
# Peer patch port in integration bridge for tunnel bridge
|
||||
# int_peer_patch_port = patch-tun
|
||||
|
||||
# Peer patch port in tunnel bridge for integration bridge
|
||||
# tun_peer_patch_port = patch-int
|
||||
|
||||
# Uncomment this line for the agent if tunnel_id_ranges (above) is not
|
||||
# empty for the server. Set local-ip to be the local IP address of
|
||||
# this hypervisor.
|
||||
#
|
||||
# Default: local_ip =
|
||||
|
||||
# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
|
||||
# mapping physical network names to the agent's node-specific OVS
|
||||
# bridge names to be used for flat and VLAN networks. The length of
|
||||
# bridge names should be no more than 11. Each bridge must
|
||||
# exist, and should have a physical network interface configured as a
|
||||
# port. All physical networks listed in network_vlan_ranges on the
|
||||
# server should have mappings to appropriate bridges on each agent.
|
||||
#
|
||||
# Default: bridge_mappings =
|
||||
# Example: bridge_mappings = physnet1:br-eth1
|
||||
|
||||
[AGENT]
|
||||
# Agent's polling interval in seconds
|
||||
polling_interval = 2
|
||||
|
||||
[SECURITYGROUP]
|
||||
# Firewall driver for realizing quantum security group function
|
||||
# firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
#-----------------------------------------------------------------------------
|
||||
# Sample Configurations.
|
||||
#-----------------------------------------------------------------------------
|
||||
#
|
||||
# 1. With VLANs on eth1.
|
||||
# [DATABASE]
|
||||
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
|
||||
# [OVS]
|
||||
# network_vlan_ranges = default:2000:3999
|
||||
# tunnel_id_ranges =
|
||||
# integration_bridge = br-int
|
||||
# bridge_mappings = default:br-eth1
|
||||
# [AGENT]
|
||||
# Add the following setting, if you want to log to a file
|
||||
#
|
||||
# 2. With tunneling.
|
||||
# [DATABASE]
|
||||
# sql_connection = mysql://root:nova@127.0.0.1:3306/ovs_quantum
|
||||
# [OVS]
|
||||
# network_vlan_ranges =
|
||||
# tunnel_id_ranges = 1:1000
|
||||
# integration_bridge = br-int
|
||||
# tunnel_bridge = br-tun
|
||||
# local_ip = 10.0.0.3
|
@ -1,289 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Default log level is INFO
|
||||
# verbose and debug has the same result.
|
||||
# One of them will set DEBUG log level output
|
||||
# debug = False
|
||||
# verbose = False
|
||||
|
||||
# Where to store Quantum state files. This directory must be writable by the
|
||||
# user executing the agent.
|
||||
# state_path = /var/lib/quantum
|
||||
|
||||
# Where to store lock files
|
||||
lock_path = $state_path/lock
|
||||
|
||||
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
|
||||
# log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
# use_syslog -> syslog
|
||||
# log_file and log_dir -> log_dir/log_file
|
||||
# (not log_file) and log_dir -> log_dir/{binary_name}.log
|
||||
# use_stderr -> stderr
|
||||
# (not user_stderr) and (not log_file) -> stdout
|
||||
# publish_errors -> notification system
|
||||
|
||||
# use_syslog = False
|
||||
# syslog_log_facility = LOG_USER
|
||||
|
||||
# use_stderr = True
|
||||
# log_file =
|
||||
# log_dir =
|
||||
|
||||
# publish_errors = False
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9696
|
||||
|
||||
# Path to the extensions. Note that this can be a colon-separated list of
|
||||
# paths. For example:
|
||||
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
|
||||
# The __path__ of quantum.extensions is appended to this, so if your
|
||||
# extensions are in there you don't need to specify them here
|
||||
# api_extensions_path =
|
||||
|
||||
# Quantum plugin provider module
|
||||
# core_plugin =
|
||||
core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
|
||||
|
||||
# Advanced service modules
|
||||
# service_plugins =
|
||||
|
||||
# Paste configuration file
|
||||
api_paste_config = /etc/quantum/api-paste.ini
|
||||
|
||||
# The strategy to be used for auth.
|
||||
# Supported values are 'keystone'(default), 'noauth'.
|
||||
# auth_strategy = keystone
|
||||
|
||||
# Base MAC address. The first 3 octets will remain unchanged. If the
|
||||
# 4h octet is not 00, it will also used. The others will be
|
||||
# randomly generated.
|
||||
# 3 octet
|
||||
# base_mac = fa:16:3e:00:00:00
|
||||
# 4 octet
|
||||
# base_mac = fa:16:3e:4f:00:00
|
||||
|
||||
# Maximum amount of retries to generate a unique MAC address
|
||||
# mac_generation_retries = 16
|
||||
|
||||
# DHCP Lease duration (in seconds)
|
||||
# dhcp_lease_duration = 120
|
||||
|
||||
# Allow sending resource operation notification to DHCP agent
|
||||
# dhcp_agent_notification = True
|
||||
|
||||
# Enable or disable bulk create/update/delete operations
|
||||
# allow_bulk = True
|
||||
# Enable or disable pagination
|
||||
# allow_pagination = False
|
||||
# Enable or disable sorting
|
||||
# allow_sorting = False
|
||||
# Enable or disable overlapping IPs for subnets
|
||||
# Attention: the following parameter MUST be set to False if Quantum is
|
||||
# being used in conjunction with nova security groups and/or metadata service.
|
||||
# allow_overlapping_ips = False
|
||||
# Ensure that configured gateway is on subnet
|
||||
# force_gateway_on_subnet = False
|
||||
|
||||
|
||||
# RPC configuration options. Defined in rpc __init__
|
||||
# The messaging module to use, defaults to kombu.
|
||||
# rpc_backend = quantum.openstack.common.rpc.impl_kombu
|
||||
# Size of RPC thread pool
|
||||
# rpc_thread_pool_size = 64,
|
||||
# Size of RPC connection pool
|
||||
# rpc_conn_pool_size = 30
|
||||
# Seconds to wait for a response from call or multicall
|
||||
# rpc_response_timeout = 60
|
||||
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
|
||||
# rpc_cast_timeout = 30
|
||||
# Modules of exceptions that are permitted to be recreated
|
||||
# upon receiving exception data from an rpc call.
|
||||
# allowed_rpc_exception_modules = quantum.openstack.common.exception, nova.exception
|
||||
# AMQP exchange to connect to if using RabbitMQ or QPID
|
||||
control_exchange = quantum
|
||||
|
||||
# If passed, use a fake RabbitMQ provider
|
||||
# fake_rabbit = False
|
||||
|
||||
# Configuration options if sending notifications via kombu rpc (these are
|
||||
# the defaults)
|
||||
# SSL version to use (valid only if SSL enabled)
|
||||
# kombu_ssl_version =
|
||||
# SSL key file (valid only if SSL enabled)
|
||||
# kombu_ssl_keyfile =
|
||||
# SSL cert file (valid only if SSL enabled)
|
||||
# kombu_ssl_certfile =
|
||||
# SSL certification authority file (valid only if SSL enabled)'
|
||||
# kombu_ssl_ca_certs =
|
||||
# IP address of the RabbitMQ installation
|
||||
# rabbit_host = localhost
|
||||
# Password of the RabbitMQ server
|
||||
# rabbit_password = guest
|
||||
# Port where RabbitMQ server is running/listening
|
||||
# rabbit_port = 5672
|
||||
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
|
||||
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
|
||||
# rabbit_hosts = localhost:5672
|
||||
# User ID used for RabbitMQ connections
|
||||
# rabbit_userid = guest
|
||||
# Location of a virtual RabbitMQ installation.
|
||||
# rabbit_virtual_host = /
|
||||
# Maximum retries with trying to connect to RabbitMQ
|
||||
# (the default of 0 implies an infinite retry count)
|
||||
# rabbit_max_retries = 0
|
||||
# RabbitMQ connection retry interval
|
||||
# rabbit_retry_interval = 1
|
||||
# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
|
||||
# wipe RabbitMQ database when changing this option. (boolean value)
|
||||
# rabbit_ha_queues = false
|
||||
|
||||
# QPID
|
||||
# rpc_backend=quantum.openstack.common.rpc.impl_qpid
|
||||
# Qpid broker hostname
|
||||
# qpid_hostname = localhost
|
||||
# Qpid broker port
|
||||
# qpid_port = 5672
|
||||
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
|
||||
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
|
||||
# qpid_hosts = localhost:5672
|
||||
# Username for qpid connection
|
||||
# qpid_username = ''
|
||||
# Password for qpid connection
|
||||
# qpid_password = ''
|
||||
# Space separated list of SASL mechanisms to use for auth
|
||||
# qpid_sasl_mechanisms = ''
|
||||
# Seconds between connection keepalive heartbeats
|
||||
# qpid_heartbeat = 60
|
||||
# Transport to use, either 'tcp' or 'ssl'
|
||||
# qpid_protocol = tcp
|
||||
# Disable Nagle algorithm
|
||||
# qpid_tcp_nodelay = True
|
||||
|
||||
# ZMQ
|
||||
# rpc_backend=quantum.openstack.common.rpc.impl_zmq
|
||||
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
|
||||
# The "host" option should point or resolve to this address.
|
||||
# rpc_zmq_bind_address = *
|
||||
|
||||
# ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when network/subnet/port are create, updated or deleted.
|
||||
# There are three methods of sending notifications: logging (via the
|
||||
# log_file directive), rpc (via a message queue) and
|
||||
# noop (no notifications sent, the default)
|
||||
|
||||
# Notification_driver can be defined multiple times
|
||||
# Do nothing driver
|
||||
# notification_driver = quantum.openstack.common.notifier.no_op_notifier
|
||||
# Logging driver
|
||||
# notification_driver = quantum.openstack.common.notifier.log_notifier
|
||||
# RPC driver. DHCP agents needs it.
|
||||
notification_driver = quantum.openstack.common.notifier.rpc_notifier
|
||||
|
||||
# default_notification_level is used to form actual topic name(s) or to set logging level
|
||||
default_notification_level = INFO
|
||||
|
||||
# default_publisher_id is a part of the notification payload
|
||||
# host = myhost.com
|
||||
# default_publisher_id = $host
|
||||
|
||||
# Defined in rpc_notifier, can be comma separated values.
|
||||
# The actual topic names will be %s.%(default_notification_level)s
|
||||
notification_topics = notifications
|
||||
|
||||
# Default maximum number of items returned in a single response,
|
||||
# value == infinite and value < 0 means no max limit, and value must
|
||||
# greater than 0. If the number of items requested is greater than
|
||||
# pagination_max_limit, server will just return pagination_max_limit
|
||||
# of number of items.
|
||||
# pagination_max_limit = -1
|
||||
|
||||
# Maximum number of DNS nameservers per subnet
|
||||
# max_dns_nameservers = 5
|
||||
|
||||
# Maximum number of host routes per subnet
|
||||
# max_subnet_host_routes = 20
|
||||
|
||||
# Maximum number of fixed ips per port
|
||||
# max_fixed_ips_per_port = 5
|
||||
|
||||
# =========== items for agent management extension =============
|
||||
# Seconds to regard the agent as down.
|
||||
# agent_down_time = 5
|
||||
# =========== end of items for agent management extension =====
|
||||
|
||||
# =========== items for agent scheduler extension =============
|
||||
# Driver to use for scheduling network to DHCP agent
|
||||
# network_scheduler_driver = quantum.scheduler.dhcp_agent_scheduler.ChanceScheduler
|
||||
# Driver to use for scheduling router to a default L3 agent
|
||||
# router_scheduler_driver = quantum.scheduler.l3_agent_scheduler.ChanceScheduler
|
||||
|
||||
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
|
||||
# networks to first DHCP agent which sends get_active_networks message to
|
||||
# quantum server
|
||||
# network_auto_schedule = True
|
||||
|
||||
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
|
||||
# routers to first L3 agent which sends sync_routers message to quantum server
|
||||
# router_auto_schedule = True
|
||||
# =========== end of items for agent scheduler extension =====
|
||||
|
||||
[QUOTAS]
|
||||
# resource name(s) that are supported in quota features
|
||||
# quota_items = network,subnet,port
|
||||
|
||||
# default number of resource allowed per tenant, minus for unlimited
|
||||
# default_quota = -1
|
||||
|
||||
# number of networks allowed per tenant, and minus means unlimited
|
||||
# quota_network = 10
|
||||
|
||||
# number of subnets allowed per tenant, and minus means unlimited
|
||||
# quota_subnet = 10
|
||||
|
||||
# number of ports allowed per tenant, and minus means unlimited
|
||||
# quota_port = 50
|
||||
|
||||
# number of security groups allowed per tenant, and minus means unlimited
|
||||
# quota_security_group = 10
|
||||
|
||||
# number of security group rules allowed per tenant, and minus means unlimited
|
||||
# quota_security_group_rule = 100
|
||||
|
||||
# default driver to use for quota checks
|
||||
# quota_driver = quantum.quota.ConfDriver
|
||||
|
||||
[DEFAULT_SERVICETYPE]
|
||||
# Description of the default service type (optional)
|
||||
# description = "default service type"
|
||||
# Enter a service definition line for each advanced service provided
|
||||
# by the default service type.
|
||||
# Each service definition should be in the following format:
|
||||
# <service>:<plugin>[:driver]
|
||||
|
||||
[AGENT]
|
||||
# Use "sudo quantum-rootwrap /etc/quantum/rootwrap.conf" to use the real
|
||||
# root filter facility.
|
||||
# Change to "sudo" to skip the filtering and just run the comand directly
|
||||
# root_helper = sudo
|
||||
root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf
|
||||
|
||||
# =========== items for agent management extension =============
|
||||
# seconds between nodes reporting state to server, should be less than
|
||||
# agent_down_time
|
||||
# report_interval = 4
|
||||
|
||||
# =========== end of items for agent management extension =====
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = quantum
|
||||
admin_password = service_pass
|
||||
signing_dir = /var/lib/quantum/keystone-signing
|
@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# This script will install Nova related packages and after installation, it #
|
||||
# will configure Nova, populate the database. #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
# Note: No Internet access required -- packages downloaded by PreInstall.sh
|
||||
echo "Internet connection is not required for this script to run"
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
nova_singlenode() {
|
||||
|
||||
# 1. Install Nova, OVS etc.
|
||||
apt-get install -y kvm libvirt-bin pm-utils nova-api nova-cert novnc nova-consoleauth nova-scheduler nova-novncproxy nova-doc nova-conductor nova-compute-kvm
|
||||
|
||||
# 2. Install Nova configuration files
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/nova/api-paste.ini" /etc/nova/api-paste.ini
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/nova/nova.conf" /etc/nova/nova.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/libvirt/qemu.conf" /etc/libvirt/qemu.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/libvirt/libvirtd.conf" /etc/libvirt/libvirtd.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/libvirt/libvirt-bin.conf" /etc/init/libvirt-bin.conf
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/libvirt/libvirt-bin" /etc/default/libvirt-bin
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/SingleNode/nova/nova-compute.conf" /etc/nova/nova-compute.conf
|
||||
|
||||
# Destroy default virtual bridges
|
||||
virsh net-destroy default
|
||||
virsh net-undefine default
|
||||
|
||||
service dbus restart && service libvirt-bin restart
|
||||
|
||||
# 3. Synch database
|
||||
nova-manage db sync
|
||||
|
||||
# 4. Restart Nova services
|
||||
for i in $( ls /etc/init.d/nova-* ); do sudo $i restart; done
|
||||
|
||||
# 5. This is just because I like to see the smiley faces :)
|
||||
nova-manage service list
|
||||
}
|
||||
|
||||
nova_multinode() {
|
||||
|
||||
# Single node for now.
|
||||
nova_singlenode
|
||||
}
|
||||
|
||||
# For now it is just single node
|
||||
|
||||
if [ "$1" == "Single" ]; then
|
||||
nova_singlenode
|
||||
else
|
||||
nova_multinode
|
||||
fi
|
@ -1,11 +0,0 @@
|
||||
# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
|
||||
# This is a POSIX shell fragment
|
||||
|
||||
# Start libvirtd to handle qemu/kvm:
|
||||
start_libvirtd="yes"
|
||||
|
||||
# options passed to libvirtd, add "-l" to listen on tcp
|
||||
libvirtd_opts="-d -l"
|
||||
|
||||
# pass in location of kerberos keytab
|
||||
#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab
|
@ -1,88 +0,0 @@
|
||||
description "libvirt daemon"
|
||||
author "Dustin Kirkland <kirkland@canonical.com>"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on starting rc RUNLEVEL=[016]
|
||||
|
||||
expect daemon
|
||||
respawn
|
||||
|
||||
# daemonize
|
||||
env libvirtd_opts="-d -l"
|
||||
# whether libvirtd should run at boot/shutdown
|
||||
env start_libvirtd="yes"
|
||||
# by default wait 30 seconds for vms to shut down
|
||||
env libvirtd_shutdown_timeout=30
|
||||
# uris for which to shut down vms
|
||||
env libvirt_uris='qemu:///system lxc:///'
|
||||
|
||||
pre-start script
|
||||
[ -r /etc/default/libvirt-bin ] && . /etc/default/libvirt-bin
|
||||
[ ! "x$start_libvirtd" = "xyes" ] && { stop; exit 0; }
|
||||
mkdir -p /var/run/libvirt
|
||||
# Clean up a pidfile that might be left around
|
||||
rm -f /var/run/libvirtd.pid
|
||||
end script
|
||||
|
||||
pre-stop script
|
||||
[ -r /etc/default/libvirt-bin ] && . /etc/default/libvirt-bin
|
||||
|
||||
log_msg()
|
||||
{
|
||||
logf="/var/log/libvirt/shutdownlog.log"
|
||||
logger -p daemon.debug -s -t libvirt -- "$@" >> $logf 2>&1
|
||||
}
|
||||
|
||||
run_virsh()
|
||||
{
|
||||
# We parse the output for things like domain state;
|
||||
# make sure the output is in the language we expect.
|
||||
LANG=C virsh "$@"
|
||||
}
|
||||
|
||||
if [ -z "$RUNLEVEL" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$RUNLEVEL" -ne 0 ] && [ "$RUNLEVEL" -ne 1 ] && [ "$RUNLEVEL" -ne 6 ]; then
|
||||
exit 0
|
||||
fi
|
||||
log_msg "libvirt-bin: entering pre-stop at $(date)"
|
||||
|
||||
for uri in $libvirt_uris; do
|
||||
for domain in $(run_virsh -c "$uri" list | awk '$3 == "running" {print $2}'); do
|
||||
log_msg "libvirt-bin: attempting clean shutdown of $domain at $(date)"
|
||||
run_virsh -c "$uri" shutdown "$domain" >/dev/null
|
||||
done
|
||||
done
|
||||
|
||||
delay=$libvirtd_shutdown_timeout
|
||||
while [ $delay -gt 0 ]; do
|
||||
for uri in $libvirt_uris; do
|
||||
if ! run_virsh -c "$uri" list | awk '$3 == "running" {exit 1}'; then
|
||||
# VMs at this URI are still running. Wait, then
|
||||
# start at the beginning looking for running VMs.
|
||||
sleep 1
|
||||
delay=$(($delay - 1))
|
||||
continue 2
|
||||
fi
|
||||
done
|
||||
break
|
||||
done
|
||||
|
||||
for uri in $libvirt_uris; do
|
||||
for domain in $(run_virsh -c "$uri" list | awk '$3 == "running" {print $2}'); do
|
||||
log_msg "destroying $domain"
|
||||
run_virsh -c "$uri" destroy "$domain" >/dev/null
|
||||
done
|
||||
done
|
||||
log_msg "libvirt-bin: exiting pre-stop at $(date)"
|
||||
end script
|
||||
|
||||
# /etc/default/libvirt-bin will be deprecated soon.
|
||||
# If you used to set $libvirtd_opts in /etc/default/libvirt-bin,
|
||||
# change the 'exec' line here instead.
|
||||
script
|
||||
[ -r /etc/default/libvirt-bin ] && . /etc/default/libvirt-bin
|
||||
exec /usr/sbin/libvirtd $libvirtd_opts
|
||||
end script
|
@ -1,395 +0,0 @@
|
||||
# Master libvirt daemon configuration file
|
||||
#
|
||||
# For further information consult http://libvirt.org/format.html
|
||||
#
|
||||
# NOTE: the tests/daemon-conf regression test script requires
|
||||
# that each "PARAMETER = VALUE" line in this file have the parameter
|
||||
# name just after a leading "#".
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Network connectivity controls
|
||||
#
|
||||
|
||||
# Flag listening for secure TLS connections on the public TCP/IP port.
|
||||
# NB, must pass the --listen flag to the libvirtd process for this to
|
||||
# have any effect.
|
||||
#
|
||||
# It is necessary to setup a CA and issue server certificates before
|
||||
# using this capability.
|
||||
#
|
||||
# This is enabled by default, uncomment this to disable it
|
||||
listen_tls = 0
|
||||
|
||||
# Listen for unencrypted TCP connections on the public TCP/IP port.
|
||||
# NB, must pass the --listen flag to the libvirtd process for this to
|
||||
# have any effect.
|
||||
#
|
||||
# Using the TCP socket requires SASL authentication by default. Only
|
||||
# SASL mechanisms which support data encryption are allowed. This is
|
||||
# DIGEST_MD5 and GSSAPI (Kerberos5)
|
||||
#
|
||||
# This is disabled by default, uncomment this to enable it.
|
||||
listen_tcp = 1
|
||||
|
||||
|
||||
|
||||
# Override the port for accepting secure TLS connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
#tls_port = "16514"
|
||||
|
||||
# Override the port for accepting insecure TCP connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
#tcp_port = "16509"
|
||||
|
||||
|
||||
# Override the default configuration which binds to all network
|
||||
# interfaces. This can be a numeric IPv4/6 address, or hostname
|
||||
#
|
||||
#listen_addr = "192.168.0.1"
|
||||
|
||||
|
||||
# Flag toggling mDNS advertizement of the libvirt service.
|
||||
#
|
||||
# Alternatively can disable for all services on a host by
|
||||
# stopping the Avahi daemon
|
||||
#
|
||||
# This is disabled by default, uncomment this to enable it
|
||||
#mdns_adv = 1
|
||||
|
||||
# Override the default mDNS advertizement name. This must be
|
||||
# unique on the immediate broadcast network.
|
||||
#
|
||||
# The default is "Virtualization Host HOSTNAME", where HOSTNAME
|
||||
# is subsituted for the short hostname of the machine (without domain)
|
||||
#
|
||||
#mdns_name = "Virtualization Host Joe Demo"
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# UNIX socket access controls
|
||||
#
|
||||
|
||||
# Set the UNIX domain socket group ownership. This can be used to
|
||||
# allow a 'trusted' set of users access to management capabilities
|
||||
# without becoming root.
|
||||
#
|
||||
# This is restricted to 'root' by default.
|
||||
unix_sock_group = "libvirtd"
|
||||
|
||||
# Set the UNIX socket permissions for the R/O socket. This is used
|
||||
# for monitoring VM status only
|
||||
#
|
||||
# Default allows any user. If setting group ownership may want to
|
||||
# restrict this to:
|
||||
#unix_sock_ro_perms = "0777"
|
||||
|
||||
# Set the UNIX socket permissions for the R/W socket. This is used
|
||||
# for full management of VMs
|
||||
#
|
||||
# Default allows only root. If PolicyKit is enabled on the socket,
|
||||
# the default will change to allow everyone (eg, 0777)
|
||||
#
|
||||
# If not using PolicyKit and setting group ownership for access
|
||||
# control then you may want to relax this to:
|
||||
unix_sock_rw_perms = "0770"
|
||||
|
||||
# Set the name of the directory in which sockets will be found/created.
|
||||
#unix_sock_dir = "/var/run/libvirt"
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authentication.
|
||||
#
|
||||
# - none: do not perform auth checks. If you can connect to the
|
||||
# socket you are allowed. This is suitable if there are
|
||||
# restrictions on connecting to the socket (eg, UNIX
|
||||
# socket permissions), or if there is a lower layer in
|
||||
# the network providing auth (eg, TLS/x509 certificates)
|
||||
#
|
||||
# - sasl: use SASL infrastructure. The actual auth scheme is then
|
||||
# controlled from /etc/sasl2/libvirt.conf. For the TCP
|
||||
# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
|
||||
# For non-TCP or TLS sockets, any scheme is allowed.
|
||||
#
|
||||
# - polkit: use PolicyKit to authenticate. This is only suitable
|
||||
# for use on the UNIX sockets. The default policy will
|
||||
# require a user to supply their own password to gain
|
||||
# full read/write access (aka sudo like), while anyone
|
||||
# is allowed read/only access.
|
||||
#
|
||||
# Set an authentication scheme for UNIX read-only sockets
|
||||
# By default socket permissions allow anyone to connect
|
||||
#
|
||||
# To restrict monitoring of domains you may wish to enable
|
||||
# an authentication mechanism here
|
||||
auth_unix_ro = "none"
|
||||
|
||||
# Set an authentication scheme for UNIX read-write sockets
|
||||
# By default socket permissions only allow root. If PolicyKit
|
||||
# support was compiled into libvirt, the default will be to
|
||||
# use 'polkit' auth.
|
||||
#
|
||||
# If the unix_sock_rw_perms are changed you may wish to enable
|
||||
# an authentication mechanism here
|
||||
auth_unix_rw = "none"
|
||||
|
||||
# Change the authentication scheme for TCP sockets.
|
||||
#
|
||||
# If you don't enable SASL, then all TCP traffic is cleartext.
|
||||
# Don't do this outside of a dev/test scenario. For real world
|
||||
# use, always enable SASL and use the GSSAPI or DIGEST-MD5
|
||||
# mechanism in /etc/sasl2/libvirt.conf
|
||||
auth_tcp = "none"
|
||||
|
||||
# Change the authentication scheme for TLS sockets.
|
||||
#
|
||||
# TLS sockets already have encryption provided by the TLS
|
||||
# layer, and limited authentication is done by certificates
|
||||
#
|
||||
# It is possible to make use of any SASL authentication
|
||||
# mechanism as well, by using 'sasl' for this option
|
||||
#auth_tls = "none"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# TLS x509 certificate configuration
|
||||
#
|
||||
|
||||
|
||||
# Override the default server key file path
|
||||
#
|
||||
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
|
||||
# Override the default server certificate file path
|
||||
#
|
||||
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
|
||||
# Override the default CA certificate path
|
||||
#
|
||||
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||
|
||||
# Specify a certificate revocation list.
|
||||
#
|
||||
# Defaults to not using a CRL, uncomment to enable it
|
||||
#crl_file = "/etc/pki/CA/crl.pem"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authorization controls
|
||||
#
|
||||
|
||||
|
||||
# Flag to disable verification of our own server certificates
|
||||
#
|
||||
# When libvirtd starts it performs some sanity checks against
|
||||
# its own certificates.
|
||||
#
|
||||
# Default is to always run sanity checks. Uncommenting this
|
||||
# will disable sanity checks which is not a good idea
|
||||
#tls_no_sanity_certificate = 1
|
||||
|
||||
# Flag to disable verification of client certificates
|
||||
#
|
||||
# Client certificate verification is the primary authentication mechanism.
|
||||
# Any client which does not present a certificate signed by the CA
|
||||
# will be rejected.
|
||||
#
|
||||
# Default is to always verify. Uncommenting this will disable
|
||||
# verification - make sure an IP whitelist is set
|
||||
#tls_no_verify_certificate = 1
|
||||
|
||||
|
||||
# A whitelist of allowed x509 Distinguished Names
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
|
||||
#
|
||||
# See the POSIX fnmatch function for the format of the wildcards.
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no DN's are checked
|
||||
#tls_allowed_dn_list = ["DN1", "DN2"]
|
||||
|
||||
|
||||
# A whitelist of allowed SASL usernames. The format for usernames
|
||||
# depends on the SASL authentication mechanism. Kerberos usernames
|
||||
# look like username@REALM
|
||||
#
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "*@EXAMPLE.COM"
|
||||
#
|
||||
# See the POSIX fnmatch function for the format of the wildcards.
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no Username's are checked
|
||||
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Processing controls
|
||||
#
|
||||
|
||||
# The maximum number of concurrent client connections to allow
|
||||
# over all sockets combined.
|
||||
#max_clients = 20
|
||||
|
||||
|
||||
# The minimum limit sets the number of workers to start up
|
||||
# initially. If the number of active clients exceeds this,
|
||||
# then more threads are spawned, up to max_workers limit.
|
||||
# Typically you'd want max_workers to equal maximum number
|
||||
# of clients allowed
|
||||
#min_workers = 5
|
||||
#max_workers = 20
|
||||
|
||||
|
||||
# The number of priority workers. If all workers from above
|
||||
# pool will stuck, some calls marked as high priority
|
||||
# (notably domainDestroy) can be executed in this pool.
|
||||
#prio_workers = 5
|
||||
|
||||
# Total global limit on concurrent RPC calls. Should be
|
||||
# at least as large as max_workers. Beyond this, RPC requests
|
||||
# will be read into memory and queued. This directly impact
|
||||
# memory usage, currently each request requires 256 KB of
|
||||
# memory. So by default up to 5 MB of memory is used
|
||||
#
|
||||
# XXX this isn't actually enforced yet, only the per-client
|
||||
# limit is used so far
|
||||
#max_requests = 20
|
||||
|
||||
# Limit on concurrent requests from a single client
|
||||
# connection. To avoid one client monopolizing the server
|
||||
# this should be a small fraction of the global max_requests
|
||||
# and max_workers parameter
|
||||
#max_client_requests = 5
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Logging controls
|
||||
#
|
||||
|
||||
# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
|
||||
# basically 1 will log everything possible
|
||||
#log_level = 3
|
||||
|
||||
# Logging filters:
|
||||
# A filter allows to select a different logging level for a given category
|
||||
# of logs
|
||||
# The format for a filter is one of:
|
||||
# x:name
|
||||
# x:+name
|
||||
# where name is a string which is matched against source file name,
|
||||
# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
|
||||
# tells libvirt to log stack trace for each message matching name,
|
||||
# and x is the minimal level where matching messages should be logged:
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple filter can be defined in a single @filters, they just need to be
|
||||
# separated by spaces.
|
||||
#
|
||||
# e.g. to only get warning or errors from the remote layer and only errors
|
||||
# from the event layer:
|
||||
#log_filters="3:remote 4:event"
|
||||
|
||||
# Logging outputs:
|
||||
# An output is one of the places to save logging information
|
||||
# The format for an output can be:
|
||||
# x:stderr
|
||||
# output goes to stderr
|
||||
# x:syslog:name
|
||||
# use syslog for the output and use the given name as the ident
|
||||
# x:file:file_path
|
||||
# output to a file, with the given filepath
|
||||
# In all case the x prefix is the minimal level, acting as a filter
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple output can be defined, they just need to be separated by spaces.
|
||||
# e.g. to log all warnings and errors to syslog under the libvirtd ident:
|
||||
#log_outputs="3:syslog:libvirtd"
|
||||
#
|
||||
|
||||
# Log debug buffer size: default 64
|
||||
# The daemon keeps an internal debug log buffer which will be dumped in case
|
||||
# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
|
||||
# the default buffer size in kilobytes.
|
||||
# If value is 0 or less the debug log buffer is deactivated
|
||||
#log_buffer_size = 64
|
||||
|
||||
|
||||
##################################################################
|
||||
#
|
||||
# Auditing
|
||||
#
|
||||
# This setting allows usage of the auditing subsystem to be altered:
|
||||
#
|
||||
# audit_level == 0 -> disable all auditing
|
||||
# audit_level == 1 -> enable auditing, only if enabled on host (default)
|
||||
# audit_level == 2 -> enable auditing, and exit if disabled on host
|
||||
#
|
||||
#audit_level = 2
|
||||
#
|
||||
# If set to 1, then audit messages will also be sent
|
||||
# via libvirt logging infrastructure. Defaults to 0
|
||||
#
|
||||
#audit_logging = 1
|
||||
|
||||
###################################################################
|
||||
# UUID of the host:
|
||||
# Provide the UUID of the host here in case the command
|
||||
# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
|
||||
# 'dmidecode' does not provide a valid UUID and none is provided here, a
|
||||
# temporary UUID will be generated.
|
||||
# Keep the format of the example UUID below. UUID must not have all digits
|
||||
# be the same.
|
||||
|
||||
# NB This default all-zeros UUID will not work. Replace
|
||||
# it with the output of the 'uuidgen' command and then
|
||||
# uncomment this entry
|
||||
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
###################################################################
|
||||
# Keepalive protocol:
|
||||
# This allows libvirtd to detect broken client connections or even
|
||||
# dead client. A keepalive message is sent to a client after
|
||||
# keepalive_interval seconds of inactivity to check if the client is
|
||||
# still responding; keepalive_count is a maximum number of keepalive
|
||||
# messages that are allowed to be sent to the client without getting
|
||||
# any response before the connection is considered broken. In other
|
||||
# words, the connection is automatically closed approximately after
|
||||
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||
# message received from the client. If keepalive_interval is set to
|
||||
# -1, libvirtd will never send keepalive requests; however clients
|
||||
# can still send them and the deamon will send responses. When
|
||||
# keepalive_count is set to 0, connections will be automatically
|
||||
# closed after keepalive_interval seconds of inactivity without
|
||||
# sending any keepalive messages.
|
||||
#
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
#
|
||||
# If set to 1, libvirtd will refuse to talk to clients that do not
|
||||
# support keepalive protocol. Defaults to 0.
|
||||
#
|
||||
#keepalive_required = 1
|
@ -1,410 +0,0 @@
|
||||
# Master configuration file for the QEMU driver.
|
||||
# All settings described here are optional - if omitted, sensible
|
||||
# defaults are used.
|
||||
|
||||
# VNC is configured to listen on 127.0.0.1 by default.
|
||||
# To make it listen on all public interfaces, uncomment
|
||||
# this next option.
|
||||
#
|
||||
# NB, strong recommendation to enable TLS + x509 certificate
|
||||
# verification when allowing public access
|
||||
#
|
||||
#vnc_listen = "0.0.0.0"
|
||||
|
||||
# Enable this option to have VNC served over an automatically created
|
||||
# unix socket. This prevents unprivileged access from users on the
|
||||
# host machine, though most VNC clients do not support it.
|
||||
#
|
||||
# This will only be enabled for VNC configurations that do not have
|
||||
# a hardcoded 'listen' or 'socket' value. This setting takes preference
|
||||
# over vnc_listen.
|
||||
#
|
||||
#vnc_auto_unix_socket = 1
|
||||
|
||||
# Enable use of TLS encryption on the VNC server. This requires
|
||||
# a VNC client which supports the VeNCrypt protocol extension.
|
||||
# Examples include vinagre, virt-viewer, virt-manager and vencrypt
|
||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
||||
#
|
||||
# It is necessary to setup CA and issue a server certificate
|
||||
# before enabling this.
|
||||
#
|
||||
#vnc_tls = 1
|
||||
|
||||
|
||||
# Use of TLS requires that x509 certificates be issued. The
|
||||
# default it to keep them in /etc/pki/libvirt-vnc. This directory
|
||||
# must contain
|
||||
#
|
||||
# ca-cert.pem - the CA master certificate
|
||||
# server-cert.pem - the server certificate signed with ca-cert.pem
|
||||
# server-key.pem - the server private key
|
||||
#
|
||||
# This option allows the certificate directory to be changed
|
||||
#
|
||||
#vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
|
||||
|
||||
|
||||
# The default TLS configuration only uses certificates for the server
|
||||
# allowing the client to verify the server's identity and establish
|
||||
# an encrypted channel.
|
||||
#
|
||||
# It is possible to use x509 certificates for authentication too, by
|
||||
# issuing a x509 certificate to every client who needs to connect.
|
||||
#
|
||||
# Enabling this option will reject any client who does not have a
|
||||
# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
|
||||
#
|
||||
#vnc_tls_x509_verify = 1
|
||||
|
||||
|
||||
# The default VNC password. Only 8 letters are significant for
|
||||
# VNC passwords. This parameter is only used if the per-domain
|
||||
# XML config does not already provide a password. To allow
|
||||
# access without passwords, leave this commented out. An empty
|
||||
# string will still enable passwords, but be rejected by QEMU,
|
||||
# effectively preventing any use of VNC. Obviously change this
|
||||
# example here before you set this.
|
||||
#
|
||||
#vnc_password = "XYZ12345"
|
||||
|
||||
|
||||
# Enable use of SASL encryption on the VNC server. This requires
|
||||
# a VNC client which supports the SASL protocol extension.
|
||||
# Examples include vinagre, virt-viewer and virt-manager
|
||||
# itself. UltraVNC, RealVNC, TightVNC do not support this
|
||||
#
|
||||
# It is necessary to configure /etc/sasl2/qemu.conf to choose
|
||||
# the desired SASL plugin (eg, GSSPI for Kerberos)
|
||||
#
|
||||
#vnc_sasl = 1
|
||||
|
||||
|
||||
# The default SASL configuration file is located in /etc/sasl2/
|
||||
# When running libvirtd unprivileged, it may be desirable to
|
||||
# override the configs in this location. Set this parameter to
|
||||
# point to the directory, and create a qemu.conf in that location
|
||||
#
|
||||
#vnc_sasl_dir = "/some/directory/sasl2"
|
||||
|
||||
|
||||
# QEMU implements an extension for providing audio over a VNC connection,
|
||||
# though if your VNC client does not support it, your only chance for getting
|
||||
# sound output is through regular audio backends. By default, libvirt will
|
||||
# disable all QEMU sound backends if using VNC, since they can cause
|
||||
# permissions issues. Enabling this option will make libvirtd honor the
|
||||
# QEMU_AUDIO_DRV environment variable when using VNC.
|
||||
#
|
||||
#vnc_allow_host_audio = 0
|
||||
|
||||
|
||||
|
||||
# SPICE is configured to listen on 127.0.0.1 by default.
|
||||
# To make it listen on all public interfaces, uncomment
|
||||
# this next option.
|
||||
#
|
||||
# NB, strong recommendation to enable TLS + x509 certificate
|
||||
# verification when allowing public access
|
||||
#
|
||||
#spice_listen = "0.0.0.0"
|
||||
|
||||
|
||||
# Enable use of TLS encryption on the SPICE server.
|
||||
#
|
||||
# It is necessary to setup CA and issue a server certificate
|
||||
# before enabling this.
|
||||
#
|
||||
#spice_tls = 1
|
||||
|
||||
|
||||
# Use of TLS requires that x509 certificates be issued. The
|
||||
# default it to keep them in /etc/pki/libvirt-spice. This directory
|
||||
# must contain
|
||||
#
|
||||
# ca-cert.pem - the CA master certificate
|
||||
# server-cert.pem - the server certificate signed with ca-cert.pem
|
||||
# server-key.pem - the server private key
|
||||
#
|
||||
# This option allows the certificate directory to be changed.
|
||||
#
|
||||
#spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"
|
||||
|
||||
|
||||
# The default SPICE password. This parameter is only used if the
|
||||
# per-domain XML config does not already provide a password. To
|
||||
# allow access without passwords, leave this commented out. An
|
||||
# empty string will still enable passwords, but be rejected by
|
||||
# QEMU, effectively preventing any use of SPICE. Obviously change
|
||||
# this example here before you set this.
|
||||
#
|
||||
#spice_password = "XYZ12345"
|
||||
|
||||
|
||||
# Override the port for creating both VNC and SPICE sessions (min).
|
||||
# This defaults to 5900 and increases for consecutive sessions
|
||||
# or when ports are occupied, until it hits the maximum.
|
||||
#
|
||||
# Minimum must be greater than or equal to 5900 as lower number would
|
||||
# result into negative vnc display number.
|
||||
#
|
||||
# Maximum must be less than 65536, because higher numbers do not make
|
||||
# sense as a port number.
|
||||
#
|
||||
#remote_display_port_min = 5900
|
||||
#remote_display_port_max = 65535
|
||||
|
||||
|
||||
# The default security driver is SELinux. If SELinux is disabled
|
||||
# on the host, then the security driver will automatically disable
|
||||
# itself. If you wish to disable QEMU SELinux security driver while
|
||||
# leaving SELinux enabled for the host in general, then set this
|
||||
# to 'none' instead. It's also possible to use more than one security
|
||||
# driver at the same time, for this use a list of names separated by
|
||||
# comma and delimited by square brackets. For example:
|
||||
#
|
||||
# security_driver = [ "selinux", "apparmor" ]
|
||||
#
|
||||
# Notes: The DAC security driver is always enabled; as a result, the
|
||||
# value of security_driver cannot contain "dac". The value "none" is
|
||||
# a special value; security_driver can be set to that value in
|
||||
# isolation, but it cannot appear in a list of drivers.
|
||||
#
|
||||
#security_driver = "selinux"
|
||||
|
||||
# If set to non-zero, then the default security labeling
|
||||
# will make guests confined. If set to zero, then guests
|
||||
# will be unconfined by default. Defaults to 1.
|
||||
#security_default_confined = 1
|
||||
|
||||
# If set to non-zero, then attempts to create unconfined
|
||||
# guests will be blocked. Defaults to 0.
|
||||
#security_require_confined = 1
|
||||
|
||||
# The user for QEMU processes run by the system instance. It can be
|
||||
# specified as a user name or as a user id. The qemu driver will try to
|
||||
# parse this value first as a name and then, if the name doesn't exist,
|
||||
# as a user id.
|
||||
#
|
||||
# Since a sequence of digits is a valid user name, a leading plus sign
|
||||
# can be used to ensure that a user id will not be interpreted as a user
|
||||
# name.
|
||||
#
|
||||
# Some examples of valid values are:
|
||||
#
|
||||
# user = "qemu" # A user named "qemu"
|
||||
# user = "+0" # Super user (uid=0)
|
||||
# user = "100" # A user named "100" or a user with uid=100
|
||||
#
|
||||
#user = "root"
|
||||
|
||||
# The group for QEMU processes run by the system instance. It can be
|
||||
# specified in a similar way to user.
|
||||
#group = "root"
|
||||
|
||||
# Whether libvirt should dynamically change file ownership
|
||||
# to match the configured user/group above. Defaults to 1.
|
||||
# Set to 0 to disable file ownership changes.
|
||||
#dynamic_ownership = 1
|
||||
|
||||
|
||||
# What cgroup controllers to make use of with QEMU guests
|
||||
#
|
||||
# - 'cpu' - use for schedular tunables
|
||||
# - 'devices' - use for device whitelisting
|
||||
# - 'memory' - use for memory tunables
|
||||
# - 'blkio' - use for block devices I/O tunables
|
||||
# - 'cpuset' - use for CPUs and memory nodes
|
||||
# - 'cpuacct' - use for CPUs statistics.
|
||||
#
|
||||
# NB, even if configured here, they won't be used unless
|
||||
# the administrator has mounted cgroups, e.g.:
|
||||
#
|
||||
# mkdir /dev/cgroup
|
||||
# mount -t cgroup -o devices,cpu,memory,blkio,cpuset none /dev/cgroup
|
||||
#
|
||||
# They can be mounted anywhere, and different controllers
|
||||
# can be mounted in different locations. libvirt will detect
|
||||
# where they are located.
|
||||
#
|
||||
#cgroup_controllers = [ "cpu", "devices", "memory", "blkio", "cpuset", "cpuacct" ]
|
||||
|
||||
# This is the basic set of devices allowed / required by
|
||||
# all virtual machines.
|
||||
#
|
||||
# As well as this, any configured block backed disks,
|
||||
# all sound device, and all PTY devices are allowed.
|
||||
#
|
||||
# This will only need setting if newer QEMU suddenly
|
||||
# wants some device we don't already know about.
|
||||
#
|
||||
#cgroup_device_acl = [
|
||||
# "/dev/null", "/dev/full", "/dev/zero",
|
||||
# "/dev/random", "/dev/urandom",
|
||||
# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
|
||||
# "/dev/rtc","/dev/hpet"
|
||||
#]
|
||||
|
||||
|
||||
# The default format for Qemu/KVM guest save images is raw; that is, the
|
||||
# memory from the domain is dumped out directly to a file. If you have
|
||||
# guests with a large amount of memory, however, this can take up quite
|
||||
# a bit of space. If you would like to compress the images while they
|
||||
# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
|
||||
# for save_image_format. Note that this means you slow down the process of
|
||||
# saving a domain in order to save disk space; the list above is in descending
|
||||
# order by performance and ascending order by compression ratio.
|
||||
#
|
||||
# save_image_format is used when you use 'virsh save' at scheduled
|
||||
# saving, and it is an error if the specified save_image_format is
|
||||
# not valid, or the requested compression program can't be found.
|
||||
#
|
||||
# dump_image_format is used when you use 'virsh dump' at emergency
|
||||
# crashdump, and if the specified dump_image_format is not valid, or
|
||||
# the requested compression program can't be found, this falls
|
||||
# back to "raw" compression.
|
||||
#
|
||||
#save_image_format = "raw"
|
||||
#dump_image_format = "raw"
|
||||
|
||||
# When a domain is configured to be auto-dumped when libvirtd receives a
|
||||
# watchdog event from qemu guest, libvirtd will save dump files in directory
|
||||
# specified by auto_dump_path. Default value is /var/lib/libvirt/qemu/dump
|
||||
#
|
||||
#auto_dump_path = "/var/lib/libvirt/qemu/dump"
|
||||
|
||||
# When a domain is configured to be auto-dumped, enabling this flag
|
||||
# has the same effect as using the VIR_DUMP_BYPASS_CACHE flag with the
|
||||
# virDomainCoreDump API. That is, the system will avoid using the
|
||||
# file system cache while writing the dump file, but may cause
|
||||
# slower operation.
|
||||
#
|
||||
#auto_dump_bypass_cache = 0
|
||||
|
||||
# When a domain is configured to be auto-started, enabling this flag
|
||||
# has the same effect as using the VIR_DOMAIN_START_BYPASS_CACHE flag
|
||||
# with the virDomainCreateWithFlags API. That is, the system will
|
||||
# avoid using the file system cache when restoring any managed state
|
||||
# file, but may cause slower operation.
|
||||
#
|
||||
#auto_start_bypass_cache = 0
|
||||
|
||||
# If provided by the host and a hugetlbfs mount point is configured,
|
||||
# a guest may request huge page backing. When this mount point is
|
||||
# unspecified here, determination of a host mount point in /proc/mounts
|
||||
# will be attempted. Specifying an explicit mount overrides detection
|
||||
# of the same in /proc/mounts. Setting the mount point to "" will
|
||||
# disable guest hugepage backing.
|
||||
#
|
||||
# NB, within this mount point, guests will create memory backing files
|
||||
# in a location of $MOUNTPOINT/libvirt/qemu
|
||||
#
|
||||
#hugetlbfs_mount = "/dev/hugepages"
|
||||
|
||||
|
||||
# If clear_emulator_capabilities is enabled, libvirt will drop all
|
||||
# privileged capabilities of the QEmu/KVM emulator. This is enabled by
|
||||
# default.
|
||||
#
|
||||
# Warning: Disabling this option means that a compromised guest can
|
||||
# exploit the privileges and possibly do damage to the host.
|
||||
#
|
||||
#clear_emulator_capabilities = 1
|
||||
|
||||
|
||||
# If enabled, libvirt will have QEMU set its process name to
|
||||
# "qemu:VM_NAME", where VM_NAME is the name of the VM. The QEMU
|
||||
# process will appear as "qemu:VM_NAME" in process listings and
|
||||
# other system monitoring tools. By default, QEMU does not set
|
||||
# its process title, so the complete QEMU command (emulator and
|
||||
# its arguments) appear in process listings.
|
||||
#
|
||||
#set_process_name = 1
|
||||
|
||||
|
||||
# If max_processes is set to a positive integer, libvirt will use
|
||||
# it to set the maximum number of processes that can be run by qemu
|
||||
# user. This can be used to override default value set by host OS.
|
||||
# The same applies to max_files which sets the limit on the maximum
|
||||
# number of opened files.
|
||||
#
|
||||
#max_processes = 0
|
||||
#max_files = 0
|
||||
|
||||
|
||||
|
||||
# mac_filter enables MAC addressed based filtering on bridge ports.
|
||||
# This currently requires ebtables to be installed.
|
||||
#
|
||||
#mac_filter = 1
|
||||
|
||||
|
||||
# By default, PCI devices below non-ACS switch are not allowed to be assigned
|
||||
# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
|
||||
# be assigned to guests.
|
||||
#
|
||||
#relaxed_acs_check = 1
|
||||
|
||||
|
||||
# If allow_disk_format_probing is enabled, libvirt will probe disk
|
||||
# images to attempt to identify their format, when not otherwise
|
||||
# specified in the XML. This is disabled by default.
|
||||
#
|
||||
# WARNING: Enabling probing is a security hole in almost all
|
||||
# deployments. It is strongly recommended that users update their
|
||||
# guest XML <disk> elements to include <driver type='XXXX'/>
|
||||
# elements instead of enabling this option.
|
||||
#
|
||||
#allow_disk_format_probing = 1
|
||||
|
||||
|
||||
# To enable 'Sanlock' project based locking of the file
|
||||
# content (to prevent two VMs writing to the same
|
||||
# disk), uncomment this
|
||||
#
|
||||
#lock_manager = "sanlock"
|
||||
|
||||
|
||||
|
||||
# Set limit of maximum APIs queued on one domain. All other APIs
|
||||
# over this threshold will fail on acquiring job lock. Specially,
|
||||
# setting to zero turns this feature off.
|
||||
# Note, that job lock is per domain.
|
||||
#
|
||||
#max_queued = 0
|
||||
|
||||
###################################################################
|
||||
# Keepalive protocol:
|
||||
# This allows qemu driver to detect broken connections to remote
|
||||
# libvirtd during peer-to-peer migration. A keepalive message is
|
||||
# sent to the deamon after keepalive_interval seconds of inactivity
|
||||
# to check if the deamon is still responding; keepalive_count is a
|
||||
# maximum number of keepalive messages that are allowed to be sent
|
||||
# to the deamon without getting any response before the connection
|
||||
# is considered broken. In other words, the connection is
|
||||
# automatically closed approximately after
|
||||
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||
# message received from the deamon. If keepalive_interval is set to
|
||||
# -1, qemu driver will not send keepalive requests during
|
||||
# peer-to-peer migration; however, the remote libvirtd can still
|
||||
# send them and source libvirtd will send responses. When
|
||||
# keepalive_count is set to 0, connections will be automatically
|
||||
# closed after keepalive_interval seconds of inactivity without
|
||||
# sending any keepalive messages.
|
||||
#
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
|
||||
|
||||
|
||||
# Use seccomp syscall whitelisting in QEMU.
|
||||
# 1 = on, 0 = off, -1 = use QEMU default
|
||||
# Defaults to -1.
|
||||
#
|
||||
#seccomp_sandbox = 1
|
||||
|
||||
cgroup_device_acl = [
|
||||
"/dev/null", "/dev/full", "/dev/zero",
|
||||
"/dev/random", "/dev/urandom",
|
||||
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
|
||||
"/dev/rtc", "/dev/hpet","/dev/net/tun"
|
||||
]
|
@ -1,109 +0,0 @@
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# OpenStack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
auth_host = 10.10.10.51
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
admin_tenant_name = service
|
||||
admin_user = nova
|
||||
admin_password = service_pass
|
||||
signing_dirname = /tmp/keystone-signing-nova
|
||||
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
|
||||
auth_version = v2.0
|
@ -1,3 +0,0 @@
|
||||
[DEFAULT]
|
||||
libvirt_type=qemu
|
||||
compute_driver=libvirt.LibvirtDriver
|
@ -1,66 +0,0 @@
|
||||
[DEFAULT]
|
||||
dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
lock_path=/run/lock/nova
|
||||
force_dhcp_release=True
|
||||
iscsi_helper=tgtadm
|
||||
libvirt_use_virtio_for_bridges=True
|
||||
connection_type=libvirt
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
verbose=True
|
||||
ec2_private_dns_show_ip=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
volumes_path=/var/lib/nova/volumes
|
||||
enabled_apis=ec2,osapi_compute,metadata
|
||||
|
||||
compute_scheduler_driver=nova.scheduler.simple.SimpleScheduler
|
||||
rabbit_host=10.10.10.51
|
||||
nova_url=http://10.10.10.51:8774/v1.1/
|
||||
sql_connection=mysql://novaUser:novaPass@10.10.10.51/nova
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
|
||||
# Auth
|
||||
use_deprecated_auth=false
|
||||
auth_strategy=keystone
|
||||
|
||||
# Imaging service
|
||||
glance_api_servers=10.10.10.51:9292
|
||||
image_service=nova.image.glance.GlanceImageService
|
||||
|
||||
# Vnc configuration
|
||||
novnc_enabled=true
|
||||
novncproxy_base_url=http://10.10.10.51:6080/vnc_auto.html
|
||||
novncproxy_port=6080
|
||||
vncserver_proxyclient_address=10.10.10.51
|
||||
vncserver_listen=0.0.0.0
|
||||
|
||||
# Network settings
|
||||
network_api_class=nova.network.quantumv2.api.API
|
||||
neutron_url=http://10.10.10.51:9696
|
||||
neutron_auth_strategy=keystone
|
||||
neutron_admin_tenant_name=service
|
||||
neutron_admin_username=quantum
|
||||
neutron_admin_password=service_pass
|
||||
neutron_admin_auth_url=http://10.10.10.51:35357/v2.0
|
||||
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
|
||||
#If you want Quantum + Nova Security groups
|
||||
firewall_driver=nova.virt.firewall.NoopFirewallDriver
|
||||
security_group_api=quantum
|
||||
#If you want Nova Security groups only, comment the two lines above and uncomment line -1-.
|
||||
#-1-firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
|
||||
|
||||
#Metadata
|
||||
service_neutron_metadata_proxy = True
|
||||
neutron_metadata_proxy_shared_secret = helloOpenStack
|
||||
metadata_host = 10.10.10.51
|
||||
metadata_listen = 127.0.0.1
|
||||
metadata_listen_port = 8775
|
||||
|
||||
# Compute #
|
||||
compute_driver=libvirt.LibvirtDriver
|
||||
|
||||
# Cinder #
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
osapi_volume_listen_port=5900
|
@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
################################################################################
|
||||
# #
|
||||
# This script will carry out few tasks after installing OpenStack. #
|
||||
# #
|
||||
################################################################################
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
# Create tenant
|
||||
keystone tenant-create --name Os_Training
|
||||
|
||||
echo -n "Enter tenant id: "
|
||||
read TENANT_ID
|
||||
|
||||
# Create user and assign required role
|
||||
keystone user-create --name=trainee --pass=cloud --tenant-id $TENANT_ID --email=user_one@domain.com
|
||||
echo -n "Enter user id: "
|
||||
read USER_ID
|
||||
keystone role-list
|
||||
echo -n "Enter role id: "
|
||||
read ROLE_ID
|
||||
keystone user-role-add --tenant-id $TENANT_ID --user-id $USER_ID --role-id $ROLE_ID
|
||||
|
||||
# Create network
|
||||
quantum net-create --tenant-id $TENANT_ID training_network
|
||||
|
||||
# Add subnet
|
||||
quantum subnet-create --tenant-id $TENANT_ID training_network 25.25.25.0/24
|
||||
echo -n "Enter subnet id: "
|
||||
read SUBNET_ID
|
||||
|
||||
# Create router
|
||||
quantum router-create --tenant-id $TENANT_ID training_router
|
||||
echo -n "Enter router id: "
|
||||
read ROUTER_ID
|
||||
|
||||
# Add router to L3 agent
|
||||
quantum agent-list # to get the l3 agent ID
|
||||
echo -n "Enter L3 agent id: "
|
||||
read L3_AGENT_ID
|
||||
quantum l3-agent-router-add $L3_AGENT_ID $ROUTER_ID
|
||||
|
||||
# Add router to subnet
|
||||
quantum router-interface-add $ROUTER_ID $SUBNET_ID
|
||||
|
||||
echo "For logging into your cloud via Dashboard, use the following credentials:"
|
||||
echo "User name: trainee"
|
||||
echo "Password: cloud"
|
@ -1,114 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# About: Set up dependencies for VirtualBox sandbox meant for OpenStack Labs.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
################################################################################
|
||||
# #
|
||||
# This script downloads the required packages and configures the network #
|
||||
# interfaces. #
|
||||
# Downloading all packages here allows the remaining scripts to run without #
|
||||
# Internet access, which may be useful for training in unusual locations. #
|
||||
# #
|
||||
################################################################################
|
||||
|
||||
# Note: Internet access is required for this script to run.
|
||||
|
||||
SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
|
||||
|
||||
# Add a few required packages
|
||||
apt-get install -y ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
|
||||
# Update package index
|
||||
apt-get -y update
|
||||
|
||||
# Add OpenStack Grizzly repo
|
||||
echo deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main >> /etc/apt/sources.list.d/grizzly.list
|
||||
|
||||
# Update package index, upgrade installed packages and upgrade kernel
|
||||
apt-get -y --download-only update
|
||||
apt-get -y --download-only upgrade
|
||||
apt-get -y --download-only dist-upgrade
|
||||
apt-get install -y --download-only ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring
|
||||
|
||||
# Download CirrOS images for use in Glance.sh
|
||||
wget --directory-prefix="$SCRIPT_DIR/../Glance" http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-disk.img
|
||||
|
||||
# Configure the network interfaces by using the templates in the Templates
|
||||
# directory
|
||||
|
||||
# TO BE ADDED LATER ON.
|
||||
|
||||
#service networking restart
|
||||
configure_networks() {
|
||||
# Check if it is single node or multi node
|
||||
if [ "$1" == "single-node" ]; then
|
||||
# Copy Single Node interfaces file to /etc/network/
|
||||
echo "Configuring Network for Single Node"
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/interfaces-single" /etc/network/interfaces
|
||||
|
||||
else
|
||||
if [ "$1" == "multi-node" ]; then
|
||||
# If it is multi node, check which type of node it is
|
||||
case "$2" in
|
||||
"control")
|
||||
# Configure network for control node
|
||||
echo "Configuring network for control node"
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/interfaces-control" /etc/network/interfaces
|
||||
;;
|
||||
|
||||
"compute")
|
||||
# Configure network for compute node
|
||||
echo "Configuring network for compute node"
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/interfaces-compute" /etc/network/interfaces
|
||||
;;
|
||||
"network")
|
||||
# Configure network for network node
|
||||
echo "Configuring network for network node"
|
||||
cp --no-preserve=mode,ownership "$SCRIPT_DIR/Templates/interfaces-network" /etc/network/interfaces
|
||||
;;
|
||||
*)
|
||||
echo "Invalid input, cannot figure out which node this is. Error!"
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
service networking restart
|
||||
}
|
||||
|
||||
single_node() {
|
||||
# Install all package on the given virtual machine
|
||||
apt-get install -y --download-only mysql-server python-mysqldb rabbitmq-server ntp vlan bridge-utils \
|
||||
keystone glance openvswitch-switch openvswitch-datapath-dkms quantum-server quantum-plugin-openvswitch \
|
||||
quantum-plugin-openvswitch-agent dnsmasq quantum-dhcp-agent quantum-l3-agent cpu-checker kvm libvirt-bin \
|
||||
pm-utils nova-api nova-cert novnc nova-consoleauth nova-scheduler nova-novncproxy nova-doc nova-conductor \
|
||||
nova-compute-kvm cinder-api cinder-scheduler cinder-volume iscsitarget open-iscsi iscsitarget-dkms openstack-dashboard memcached
|
||||
|
||||
configure_networks $1 $2
|
||||
}
|
||||
|
||||
multi_node() {
|
||||
# $2 will be the node definition -- like control node, compute node,
|
||||
# network node.
|
||||
configure_networks $1 $2
|
||||
# Install packages as per the node definition ...
|
||||
|
||||
# TO BE DONE.
|
||||
|
||||
# Also need to figure out whether to download all the packages even
|
||||
# if they are not playing any role in the given node to keep the scripts
|
||||
# simpler
|
||||
}
|
||||
|
||||
if [ "$1" == "single-node" ]; then
|
||||
single_node $1 $2
|
||||
else
|
||||
if [ "$1" == "multi-node" ]; then
|
||||
multi_node $1 $2
|
||||
else
|
||||
echo "Invalid option ... cannot proceed"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "Your VM is ready for installing OpenStack\nYou don't need Internet access from now on."
|
@ -1,24 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth2
|
||||
iface eth2 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Compute node
|
||||
|
||||
# OpenStack management network (internal)
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address 10.10.10.53
|
||||
netmask 255.255.255.0
|
||||
|
||||
# OpenStack data network (internal)
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.20.20.53
|
||||
netmask 255.255.255.0
|
@ -1,24 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth2
|
||||
iface eth2 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Control node
|
||||
|
||||
# OpenStack management network (internal)
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
|
||||
# OpenStack API network (external)
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
@ -1,30 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth3
|
||||
iface eth3 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Network node
|
||||
|
||||
# OpenStack management network (internal)
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address 10.10.10.52
|
||||
netmask 255.255.255.0
|
||||
|
||||
# OpenStack data network (internal)
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.20.20.52
|
||||
netmask 255.255.255.0
|
||||
|
||||
# OpenStack API network (external)
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 192.168.100.52
|
||||
netmask 255.255.255.0
|
@ -1,23 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth2
|
||||
iface eth2 inet dhcp
|
||||
|
||||
# OpenStack single node
|
||||
|
||||
# OpenStack management network (internal)
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
|
||||
# OpenStack API network (external)
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
@ -1,24 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Compute node
|
||||
|
||||
# Management network
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.10.10.53
|
||||
netmask 255.255.255.0
|
||||
|
||||
# Expose OpenStack API over Internet/external network
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 10.20.20.53
|
||||
netmask 255.255.255.0
|
@ -1,24 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Control node
|
||||
|
||||
# OpenStack management network
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
|
||||
# Expose OpenStack API to external network/Internet
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
@ -1,30 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
# OpenStack multi node
|
||||
# Network node
|
||||
|
||||
# OpenStack management network
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.10.10.52
|
||||
netmask 255.255.255.0
|
||||
|
||||
# VM internal communication network
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 10.20.20.52
|
||||
netmask 255.255.255.0
|
||||
|
||||
# VM Internet/external network access
|
||||
auto eth3
|
||||
iface eth3 inet static
|
||||
address 192.168.100.52
|
||||
netmask 255.255.255.0
|
@ -1,23 +0,0 @@
|
||||
# interfaces(5) file used by ifup(8) and ifdown(8)
|
||||
|
||||
# local loopback
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# VirtualBox NAT -- for Internet access to VM
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
|
||||
# OpenStack single node
|
||||
|
||||
# OpenStack management network
|
||||
auto eth1
|
||||
iface eth1 inet static
|
||||
address 10.10.10.51
|
||||
netmask 255.255.255.0
|
||||
|
||||
# Expose OpenStack API to external network/Internet
|
||||
auto eth2
|
||||
iface eth2 inet static
|
||||
address 192.168.100.51
|
||||
netmask 255.255.255.0
|
@ -1,60 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script is for testing scripts inside this folder.
|
||||
#
|
||||
# Contact: pranav@aptira.com
|
||||
# Copyright: Aptira @aptira,aptira.com
|
||||
# License: Apache Software License (ASL) 2.0
|
||||
###############################################################################
|
||||
# #
|
||||
# Testing the scripts #
|
||||
# #
|
||||
###############################################################################
|
||||
|
||||
source Keystone/Scripts/Credentials.sh
|
||||
echo "
|
||||
Run this script from inside your virtual machine or test machine.
|
||||
This script is meant for testing the scripts related to OpenStack and
|
||||
not related to VirtualBox.
|
||||
|
||||
The sole aim of this script is to test all of the OpenStack scripts
|
||||
present in the sub folder which deploys OpenStack, as it is very important
|
||||
that the scripts install and configure OpenStack properly with a touch
|
||||
of reliability otherwise one might as well use DevStack ;).
|
||||
"
|
||||
|
||||
echo "Warning!!! This may break your operating system."
|
||||
|
||||
echo -n "Do you want to continue (y/N)? "
|
||||
read cont
|
||||
|
||||
if [ "$cont" == "Y" -o "$cont" == "y" ]; then
|
||||
|
||||
# Missing Exception Handlers :((, would have been very handy here
|
||||
echo "You pressed Yes."
|
||||
echo "Testing PreInstall.sh"
|
||||
bash PreInstall/PreInstall.sh "single-node" > Logs/PreInstall.log
|
||||
|
||||
echo "Testing Keystone.sh"
|
||||
bash Keystone/Keystone.sh > Logs/Keystone.log
|
||||
|
||||
echo "Testing Glance.sh"
|
||||
bash Glance/Glance.sh > Logs/Glance.log
|
||||
|
||||
echo "Testing Cinder.sh"
|
||||
bash Cinder/Cinder.sh > Logs/Cinder.log
|
||||
|
||||
echo "Testing Neutron.sh"
|
||||
bash Neutron/Neutron.sh > Logs/Neutron.log
|
||||
|
||||
echo "Testing Nova.sh"
|
||||
bash Nova/Nova.sh > Logs/Nova.log
|
||||
|
||||
echo "Testing Horizon.sh"
|
||||
bash Horizon/Horizon.sh > Logs/Horizon.log
|
||||
|
||||
echo "Testing PostInstall.sh"
|
||||
bash PostInstall/PostInstall.sh > Logs/PostInstall.log
|
||||
fi
|
||||
echo "Mostly the tests run fine ... although I'm not sure."
|
||||
echo "Please read the terminal messages carefully."
|
Loading…
x
Reference in New Issue
Block a user