HTTPS support.
This commit is contained in:
parent
a78da151bd
commit
2b7972d49c
10
config.yaml
10
config.yaml
@ -128,3 +128,13 @@ options:
|
|||||||
description: |
|
description: |
|
||||||
Default multicast port number that will be used to communicate between
|
Default multicast port number that will be used to communicate between
|
||||||
HA Cluster nodes.
|
HA Cluster nodes.
|
||||||
|
ssl_cert:
|
||||||
|
type: string
|
||||||
|
description: |
|
||||||
|
SSL certificate to install and use for API ports. Setting this value
|
||||||
|
and ssl_key will enable reverse proxying, point Glance's entry in the
|
||||||
|
Keystone catalog to use https, and override any certficiate and key
|
||||||
|
issued by Keystone (if it is configured to do so).
|
||||||
|
ssl_key:
|
||||||
|
type: string
|
||||||
|
description: SSL key to use with certificate specified as ssl_cert.
|
||||||
|
1
hooks/cluster-relation-changed
Symbolic link
1
hooks/cluster-relation-changed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova-cloud-controller-relations
|
1
hooks/cluster-relation-departed
Symbolic link
1
hooks/cluster-relation-departed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova-cloud-controller-relations
|
@ -317,18 +317,16 @@ function get_block_device() {
|
|||||||
|
|
||||||
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
|
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
|
||||||
HAPROXY_DEFAULT=/etc/default/haproxy
|
HAPROXY_DEFAULT=/etc/default/haproxy
|
||||||
|
|
||||||
##########################################################################
|
##########################################################################
|
||||||
# Description: Configures HAProxy services for Openstack API's
|
# Description: Configures HAProxy services for Openstack API's
|
||||||
# Parameters:
|
# Parameters:
|
||||||
# Space delimited list of service:port combinations for which
|
# Space delimited list of service:ext_port:int_port combinations for which
|
||||||
# haproxy service configuration should be generated for. The function
|
# haproxy service configuration should be generated for. The function
|
||||||
# assumes the name of the peer relation is 'cluster' and that every
|
# assumes the name of the peer relation is 'cluster' and that every
|
||||||
# service unit in the peer relation is running the same services.
|
# service unit in the peer relation is running the same services.
|
||||||
#
|
#
|
||||||
# The HAProxy service will listen on port + 10000.
|
# Example
|
||||||
# Example:
|
# configure_haproxy cinder_api:8776:8756i nova_api:8774:8764
|
||||||
# configure_haproxy cinder_api:12345 nova_api:9999
|
|
||||||
##########################################################################
|
##########################################################################
|
||||||
configure_haproxy() {
|
configure_haproxy() {
|
||||||
local address=`unit-get private-address`
|
local address=`unit-get private-address`
|
||||||
@ -337,7 +335,7 @@ configure_haproxy() {
|
|||||||
global
|
global
|
||||||
log 127.0.0.1 local0
|
log 127.0.0.1 local0
|
||||||
log 127.0.0.1 local1 notice
|
log 127.0.0.1 local1 notice
|
||||||
maxconn 4096
|
maxconn 20000
|
||||||
user haproxy
|
user haproxy
|
||||||
group haproxy
|
group haproxy
|
||||||
spread-checks 0
|
spread-checks 0
|
||||||
@ -364,14 +362,18 @@ listen stats :8888
|
|||||||
EOF
|
EOF
|
||||||
for service in $@; do
|
for service in $@; do
|
||||||
local service_name=$(echo $service | cut -d : -f 1)
|
local service_name=$(echo $service | cut -d : -f 1)
|
||||||
local api_listen_port=$(echo $service | cut -d : -f 2)
|
local haproxy_listen_port=$(echo $service | cut -d : -f 2)
|
||||||
local haproxy_listen_port=$(($api_listen_port + 10000))
|
local api_listen_port=$(echo $service | cut -d : -f 3)
|
||||||
|
juju-log "Adding haproxy configuration entry for $service "\
|
||||||
|
"($haproxy_listen_port -> $api_listen_port)"
|
||||||
cat >> $HAPROXY_CFG << EOF
|
cat >> $HAPROXY_CFG << EOF
|
||||||
listen $service_name 0.0.0.0:$haproxy_listen_port
|
listen $service_name 0.0.0.0:$haproxy_listen_port
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
option tcplog
|
option tcplog
|
||||||
server $name $address:$api_listen_port check
|
server $name $address:$api_listen_port check
|
||||||
EOF
|
EOF
|
||||||
|
local r_id=""
|
||||||
|
local unit=""
|
||||||
for r_id in `relation-ids cluster`; do
|
for r_id in `relation-ids cluster`; do
|
||||||
for unit in `relation-list -r $r_id`; do
|
for unit in `relation-list -r $r_id`; do
|
||||||
local unit_name=${unit////-}
|
local unit_name=${unit////-}
|
||||||
@ -384,6 +386,7 @@ EOF
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
echo "ENABLED=1" > $HAPROXY_DEFAULT
|
echo "ENABLED=1" > $HAPROXY_DEFAULT
|
||||||
|
service haproxy restart
|
||||||
}
|
}
|
||||||
|
|
||||||
##########################################################################
|
##########################################################################
|
||||||
@ -391,14 +394,94 @@ EOF
|
|||||||
# Returns: 0 if configured, 1 if not configured
|
# Returns: 0 if configured, 1 if not configured
|
||||||
##########################################################################
|
##########################################################################
|
||||||
is_clustered() {
|
is_clustered() {
|
||||||
for r_id in `relation-ids ha`; do
|
local r_id=""
|
||||||
for unit in `relation-list -r $r_id`; do
|
local unit=""
|
||||||
clustered=`relation-get -r $r_id clustered $unit`
|
for r_id in $(relation-ids ha); do
|
||||||
if [ -n "$clustered" ]; then
|
if [ -n "$r_id" ]; then
|
||||||
return 0
|
for unit in $(relation-list -r $r_id); do
|
||||||
fi
|
clustered=$(relation-get -r $r_id clustered $unit)
|
||||||
done
|
if [ -n "$clustered" ]; then
|
||||||
|
juju-log "Unit is haclustered"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
|
echo "Unit is not haclustered"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Return a list of all peers in cluster relations
|
||||||
|
##########################################################################
|
||||||
|
peer_units() {
|
||||||
|
local peers=""
|
||||||
|
local r_id=""
|
||||||
|
for r_id in $(relation-ids cluster); do
|
||||||
|
peers="$peers $(relation-list -r $r_id)"
|
||||||
|
done
|
||||||
|
echo $peers
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Determines whether the current unit is the oldest of all
|
||||||
|
# its peers - supports partial leader election
|
||||||
|
# Returns: 0 if oldest, 1 if not
|
||||||
|
##########################################################################
|
||||||
|
oldest_peer() {
|
||||||
|
peers=$1
|
||||||
|
local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
|
||||||
|
for peer in $peers; do
|
||||||
|
echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
|
||||||
|
local r_unit_no=$(echo $peer | cut -d / -f 2)
|
||||||
|
if (($r_unit_no<$l_unit_no)); then
|
||||||
|
juju-log "Not oldest peer; deferring"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
juju-log "Oldest peer; might take charge?"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Determines whether the current service units is the
|
||||||
|
# leader within a) a cluster of its peers or b) across a
|
||||||
|
# set of unclustered peers.
|
||||||
|
# Parameters: CRM resource to check ownership of if clustered
|
||||||
|
# Returns: 0 if leader, 1 if not
|
||||||
|
##########################################################################
|
||||||
|
eligible_leader() {
|
||||||
|
if is_clustered; then
|
||||||
|
if ! is_leader $1; then
|
||||||
|
juju-log 'Deferring action to CRM leader'
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
peers=$(peer_units)
|
||||||
|
for peer in $peers ; do
|
||||||
|
echo "$peer"
|
||||||
|
done
|
||||||
|
if [ -n "$peers" ] && ! oldest_peer "$peers"; then
|
||||||
|
juju-log 'Deferring action to oldest service unit.'
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Query Cluster peer interface to see if peered
|
||||||
|
# Returns: 0 if peered, 1 if not peered
|
||||||
|
##########################################################################
|
||||||
|
is_peered() {
|
||||||
|
local r_id=$(relation-ids cluster)
|
||||||
|
if [ -n "$r_id" ]; then
|
||||||
|
if [ -n "$(relation-list -r $r_id)" ]; then
|
||||||
|
juju-log "Unit peered"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
juju-log "Unit not peered"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -411,9 +494,192 @@ is_leader() {
|
|||||||
hostname=`hostname`
|
hostname=`hostname`
|
||||||
if [ -x /usr/sbin/crm ]; then
|
if [ -x /usr/sbin/crm ]; then
|
||||||
if crm resource show $1 | grep -q $hostname; then
|
if crm resource show $1 | grep -q $hostname; then
|
||||||
|
juju-log "$hostname is cluster leader"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
juju-log "$hostname is not cluster leader"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Determines whether enough data has been provided in
|
||||||
|
# configuration or relation data to configure HTTPS.
|
||||||
|
# Parameters: None
|
||||||
|
# Returns: 0 if HTTPS can be configured, 1 if not.
|
||||||
|
##########################################################################
|
||||||
|
https() {
|
||||||
|
local r_id=""
|
||||||
|
if [[ -n "$(config-get ssl_cert)" ]] &&
|
||||||
|
[[ -n "$(config-get ssl_key)" ]] ; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
for r_id in $(relation-ids identity-service) ; do
|
||||||
|
for unit in $(relation-list -r $r_id) ; do
|
||||||
|
if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
|
||||||
|
[[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
|
||||||
|
[[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
|
||||||
|
[[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: For a given number of port mappings, configures apache2
|
||||||
|
# HTTPs local reverse proxying using certficates and keys provided in
|
||||||
|
# either configuration data (preferred) or relation data. Assumes ports
|
||||||
|
# are not in use (calling charm should ensure that).
|
||||||
|
# Parameters: Variable number of proxy port mappings as
|
||||||
|
# $internal:$external.
|
||||||
|
# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
|
||||||
|
##########################################################################
|
||||||
|
enable_https() {
|
||||||
|
local port_maps="$@"
|
||||||
|
local http_restart=""
|
||||||
|
juju-log "Enabling HTTPS for port mappings: $port_maps."
|
||||||
|
|
||||||
|
# allow overriding of keystone provided certs with those set manually
|
||||||
|
# in config.
|
||||||
|
cert=$(config-get ssl_cert)
|
||||||
|
key=$(config-get ssl_key)
|
||||||
|
if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
|
||||||
|
juju-log "Inspecting identity-service relations for SSL certificate."
|
||||||
|
local r_id=""
|
||||||
|
for r_id in $(relation-ids identity-service) ; do
|
||||||
|
for unit in $(relation-list -r $r_id) ; do
|
||||||
|
cert="$(relation-get -r $r_id ssl_cert $unit)"
|
||||||
|
key="$(relation-get -r $r_id ssl_key $unit)"
|
||||||
|
ca_cert="$(relation-get -r $r_id ca_cert $unit)"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
[[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
|
||||||
|
[[ -n "$key" ]] && key=$(echo $key | base64 -di)
|
||||||
|
[[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
|
||||||
|
else
|
||||||
|
juju-log "Using SSL certificate provided in service config."
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ -z "$cert" ]] || [[ -z "$key" ]] &&
|
||||||
|
juju-log "Expected but could not find SSL certificate data, not "\
|
||||||
|
"configuring HTTPS!" && return 1
|
||||||
|
|
||||||
|
apt-get -y install apache2
|
||||||
|
a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
|
||||||
|
http_restart=1
|
||||||
|
|
||||||
|
mkdir -p /etc/apache2/ssl/$CHARM
|
||||||
|
echo "$cert" >/etc/apache2/ssl/$CHARM/cert
|
||||||
|
echo "$key" >/etc/apache2/ssl/$CHARM/key
|
||||||
|
if [[ -n "$ca_cert" ]] ; then
|
||||||
|
juju-log "Installing Keystone supplied CA cert."
|
||||||
|
echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
|
||||||
|
update-ca-certificates --fresh
|
||||||
|
fi
|
||||||
|
for port_map in $port_maps ; do
|
||||||
|
local ext_port=$(echo $port_map | cut -d: -f1)
|
||||||
|
local int_port=$(echo $port_map | cut -d: -f2)
|
||||||
|
juju-log "Creating apache2 reverse proxy vhost for $port_map."
|
||||||
|
cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
|
||||||
|
Listen $ext_port
|
||||||
|
NameVirtualHost *:$ext_port
|
||||||
|
<VirtualHost *:$ext_port>
|
||||||
|
ServerName $(unit-get private-address)
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
|
||||||
|
ProxyPass / http://localhost:$int_port/
|
||||||
|
ProxyPassReverse / http://localhost:$int_port/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
END
|
||||||
|
a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
||||||
|
http_restart=1
|
||||||
|
done
|
||||||
|
if [[ -n "$http_restart" ]] ; then
|
||||||
|
service apache2 restart
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Ensure HTTPS reverse proxying is disabled for given port
|
||||||
|
# mappings.
|
||||||
|
# Parameters: Variable number of proxy port mappings as
|
||||||
|
# $internal:$external.
|
||||||
|
# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
|
||||||
|
##########################################################################
|
||||||
|
disable_https() {
|
||||||
|
local port_maps="$@"
|
||||||
|
local http_restart=""
|
||||||
|
juju-log "Ensuring HTTPS disabled for $port_maps."
|
||||||
|
( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
|
||||||
|
for port_map in $port_maps ; do
|
||||||
|
local ext_port=$(echo $port_map | cut -d: -f1)
|
||||||
|
local int_port=$(echo $port_map | cut -d: -f2)
|
||||||
|
if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
|
||||||
|
juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
|
||||||
|
a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
||||||
|
http_restart=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [[ -n "$http_restart" ]] ; then
|
||||||
|
service apache2 restart
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Ensures HTTPS is either enabled or disabled for given port
|
||||||
|
# mapping.
|
||||||
|
# Parameters: Variable number of proxy port mappings as
|
||||||
|
# $internal:$external.
|
||||||
|
# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
|
||||||
|
##########################################################################
|
||||||
|
setup_https() {
|
||||||
|
# configure https via apache reverse proxying either
|
||||||
|
# using certs provided by config or keystone.
|
||||||
|
[[ -z "$CHARM" ]] &&
|
||||||
|
error_out "setup_https(): CHARM not set."
|
||||||
|
if ! https ; then
|
||||||
|
disable_https $@
|
||||||
|
else
|
||||||
|
enable_https $@
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Determine correct API server listening port based on
|
||||||
|
# existence of HTTPS reverse proxy and/or haproxy.
|
||||||
|
# Paremeters: The standard public port for given service.
|
||||||
|
# Returns: The correct listening port for API service.
|
||||||
|
##########################################################################
|
||||||
|
determine_api_port() {
|
||||||
|
local public_port="$1"
|
||||||
|
local i=0
|
||||||
|
( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
|
||||||
|
https >/dev/null 2>&1 && i=$[$i + 1]
|
||||||
|
echo $[$public_port - $[$i * 10]]
|
||||||
|
}
|
||||||
|
|
||||||
|
##########################################################################
|
||||||
|
# Description: Determine correct proxy listening port based on public IP +
|
||||||
|
# existence of HTTPS reverse proxy.
|
||||||
|
# Paremeters: The standard public port for given service.
|
||||||
|
# Returns: The correct listening port for haproxy service public address.
|
||||||
|
##########################################################################
|
||||||
|
determine_haproxy_port() {
|
||||||
|
local public_port="$1"
|
||||||
|
local i=0
|
||||||
|
https >/dev/null 2>&1 && i=$[$i + 1]
|
||||||
|
echo $[$public_port - $[$i * 10]]
|
||||||
|
}
|
||||||
|
@ -219,3 +219,65 @@ function ssh_compute {
|
|||||||
known_hosts="$(base64 /etc/nova/compute_ssh/$sunit/known_hosts)" \
|
known_hosts="$(base64 /etc/nova/compute_ssh/$sunit/known_hosts)" \
|
||||||
authorized_keys="$(base64 /etc/nova/compute_ssh/$sunit/authorized_keys)"
|
authorized_keys="$(base64 /etc/nova/compute_ssh/$sunit/authorized_keys)"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
configure_https() {
|
||||||
|
# setup https termination for all api services, depending on what is running
|
||||||
|
# and topology of current deployment.
|
||||||
|
local clustered=""
|
||||||
|
( [[ -n "$(peer_units)" ]] || is_clustered ) && clustered="1"
|
||||||
|
local services=""
|
||||||
|
local ssl_port_maps=""
|
||||||
|
local haproxy_port_maps=""
|
||||||
|
local next_server=""
|
||||||
|
local api_port=""
|
||||||
|
|
||||||
|
# upstartService:defaultPort:configOption
|
||||||
|
local svcs="nova-api-ec2:8773:ec2_listen_port
|
||||||
|
nova-api-os-compute:8774:osapi_compute_listen_port
|
||||||
|
nova-objectstore:3333:s3_listen_port"
|
||||||
|
[[ "$NET_MANAGER" == "Quantum" ]] &&
|
||||||
|
svcs="$svcs quantum-server:9696:bind_port"
|
||||||
|
|
||||||
|
for s in $svcs ; do
|
||||||
|
local service=$(echo $s | cut -d: -f1)
|
||||||
|
local port=$(echo $s | cut -d: -f2)
|
||||||
|
local opt=$(echo $s | cut -d: -f3)
|
||||||
|
if [[ -n "$clustered" ]] ; then
|
||||||
|
next_server="$(determine_haproxy_port $port)"
|
||||||
|
api_port="$(determine_api_port $port)"
|
||||||
|
haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port"
|
||||||
|
else
|
||||||
|
api_port="$(determine_api_port $port)"
|
||||||
|
next_server="$api_port"
|
||||||
|
fi
|
||||||
|
set_or_update "$opt" "$api_port"
|
||||||
|
ssl_port_maps="$ssl_port_maps $port:$next_server"
|
||||||
|
done
|
||||||
|
|
||||||
|
# make sure all backend api servers are bound to new backend port
|
||||||
|
# before setting up any frontends.
|
||||||
|
for s in $svcs ; do
|
||||||
|
local service=$(echo $s | cut -d: -f1)
|
||||||
|
service_ctl $service restart
|
||||||
|
done
|
||||||
|
|
||||||
|
[[ -n "$haproxy_port_maps" ]] && configure_haproxy $haproxy_port_maps
|
||||||
|
setup_https $ssl_port_maps
|
||||||
|
|
||||||
|
# another restart to ensure api servers are now bound to frontend ports
|
||||||
|
# that may have just been disabled.
|
||||||
|
for s in $svcs ; do
|
||||||
|
local service=$(echo $s | cut -d: -f1)
|
||||||
|
service_ctl $service restart
|
||||||
|
done
|
||||||
|
|
||||||
|
local r_id=""
|
||||||
|
# (re)configure ks endpoint accordingly
|
||||||
|
for r_id in "$(relation-ids identity-service)" ; do
|
||||||
|
keystone_joined "$r_id"
|
||||||
|
done
|
||||||
|
# pass on possibly updated quantum URL + ca_cert to compute nodes.
|
||||||
|
for r_id in "$(relation-ids cloud-compute)" ; do
|
||||||
|
compute_joined "$r_id"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
@ -44,6 +44,7 @@ function install_hook {
|
|||||||
cp files/create_tenant_net.py /usr/bin/quantum-tenant-net
|
cp files/create_tenant_net.py /usr/bin/quantum-tenant-net
|
||||||
|
|
||||||
service_ctl all stop
|
service_ctl all stop
|
||||||
|
configure_https
|
||||||
}
|
}
|
||||||
|
|
||||||
function upgrade_charm {
|
function upgrade_charm {
|
||||||
@ -70,13 +71,14 @@ function config_changed {
|
|||||||
set_config_flags
|
set_config_flags
|
||||||
|
|
||||||
if [ "$NET_MANAGER" == "Quantum" ] && \
|
if [ "$NET_MANAGER" == "Quantum" ] && \
|
||||||
is_clustered && is_leader 'res_nova_vip' || \
|
eligible_leader 'res_nova_vip' || \
|
||||||
! is_clustered; then
|
! is_clustered; then
|
||||||
configure_quantum_networking
|
configure_quantum_networking
|
||||||
fi
|
fi
|
||||||
|
|
||||||
determine_services
|
determine_services
|
||||||
service_ctl all restart
|
service_ctl all restart
|
||||||
|
configure_https
|
||||||
}
|
}
|
||||||
|
|
||||||
function amqp_joined {
|
function amqp_joined {
|
||||||
@ -191,27 +193,15 @@ function keystone_joined {
|
|||||||
# we need to get two entries into keystone's catalog, nova + ec2
|
# we need to get two entries into keystone's catalog, nova + ec2
|
||||||
# group, them by prepending $service_ to each setting. the keystone
|
# group, them by prepending $service_ to each setting. the keystone
|
||||||
# charm will assemble settings into corresponding catalog entries
|
# charm will assemble settings into corresponding catalog entries
|
||||||
if is_clustered && is_leader 'res_nova_vip'; then
|
eligible_leader 'res_nova_vip' || return 0
|
||||||
address=$(config-get vip)
|
|
||||||
nova_port=18774
|
is_clustered && local host=$(config-get vip) ||
|
||||||
ec2_port=18773
|
local host=$(unit-get private-address)
|
||||||
s3_port=13333
|
https && local scheme="https" || local scheme="http"
|
||||||
quantum_port=19696
|
|
||||||
vol_port=18776
|
nova_url="$scheme://$host:8774/v1.1/\$(tenant_id)s"
|
||||||
elif ! is_clustered; then
|
ec2_url="$scheme://$host:8773/services/Cloud"
|
||||||
address=$(unit-get private-address)
|
s3_url="$scheme://$host:3333"
|
||||||
nova_port=8774
|
|
||||||
ec2_port=8773
|
|
||||||
s3_port=3333
|
|
||||||
quantum_port=9696
|
|
||||||
vol_port=8776
|
|
||||||
else
|
|
||||||
# Not the leader and clustered - no action required
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
nova_url="http://$address:$nova_port/v1.1/\$(tenant_id)s"
|
|
||||||
ec2_url="http://$address:$ec2_port/services/Cloud"
|
|
||||||
s3_url="http://$address:$s3_port"
|
|
||||||
region="$(config-get region)"
|
region="$(config-get region)"
|
||||||
|
|
||||||
# these are the default endpoints
|
# these are the default endpoints
|
||||||
@ -232,7 +222,7 @@ function keystone_joined {
|
|||||||
s3_internal_url="$s3_url"
|
s3_internal_url="$s3_url"
|
||||||
|
|
||||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||||
quantum_url="http://$address:$quantum_port"
|
quantum_url="$scheme://$host:$quantum_port"
|
||||||
relation-set quantum_service="quantum" \
|
relation-set quantum_service="quantum" \
|
||||||
quantum_region="$region" \
|
quantum_region="$region" \
|
||||||
quantum_public_url="$quantum_url" \
|
quantum_public_url="$quantum_url" \
|
||||||
@ -242,7 +232,7 @@ function keystone_joined {
|
|||||||
|
|
||||||
# tack on an endpoint for nova-volume a relation exists.
|
# tack on an endpoint for nova-volume a relation exists.
|
||||||
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
||||||
nova_vol_url="http://$address:$vol_port/v1/\$(tenant_id)s"
|
nova_vol_url="$scheme://$host:$vol_port/v1/\$(tenant_id)s"
|
||||||
relation-set nova-volume_service="nova-volume" \
|
relation-set nova-volume_service="nova-volume" \
|
||||||
nova-volume_region="$region" \
|
nova-volume_region="$region" \
|
||||||
nova-volume_public_url="$nova_vol_url" \
|
nova-volume_public_url="$nova_vol_url" \
|
||||||
@ -335,6 +325,7 @@ EOF
|
|||||||
region=$region
|
region=$region
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
configure_https
|
||||||
}
|
}
|
||||||
|
|
||||||
volume_joined() {
|
volume_joined() {
|
||||||
@ -386,40 +377,52 @@ volume_joined() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
compute_joined() {
|
compute_joined() {
|
||||||
if is_clustered && ! is_leader 'res_nova_vip'; then
|
local r_id="$1"
|
||||||
# Clustered and not current leader - do nothing
|
[[ -n "$r_id" ]] && r_id="-r $r_id"
|
||||||
return 0
|
eligible_leader || return 0
|
||||||
fi
|
relation-set $r_id network_manager=$(config-get network-manager)
|
||||||
relation-set network_manager=$(config-get network-manager)
|
relation-set $r_id ec2_host=$(unit-get private-address)
|
||||||
relation-set ec2_host=$(unit-get private-address)
|
|
||||||
|
# Fish out keystone data to be passed onto compute, needed for
|
||||||
|
# quantum + https.
|
||||||
|
local rid=""
|
||||||
|
for rid in $(relation-ids identity-service) ; do
|
||||||
|
for unit in $(relation-list -r $rid); do
|
||||||
|
local keystone_host=$(relation-get -r $rid auth_host $unit)
|
||||||
|
local auth_port=$(relation-get -r $rid auth_port $unit)
|
||||||
|
local service_port=$(relation-get -r $rid service_port $unit)
|
||||||
|
local service_username=$(relation-get -r $rid service_username $unit)
|
||||||
|
local service_password=$(relation-get -r $rid service_password $unit)
|
||||||
|
local service_tenant=$(relation-get -r $rid service_tenant $unit)
|
||||||
|
local https_keystone=$(relation-get -r $rid https_keystone $unit)
|
||||||
|
local ca_cert=$(relation-get -r $rid ca_cert $unit)
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||||
rids=$(relation-ids identity-service)
|
if [[ -n "$keystone_host" ]]; then
|
||||||
for rid in $rids; do
|
relation-set $r_id \
|
||||||
for unit in $(relation-list -r $rid); do
|
keystone_host=$keystone_host \
|
||||||
keystone_host=$(relation-get -r $rid auth_host $unit)
|
auth_port=$auth_port \
|
||||||
if [ -n "$keystone_host" ]; then
|
service_port=$service_port \
|
||||||
relation-set \
|
service_username=$service_username \
|
||||||
keystone_host=$keystone_host \
|
service_password=$service_password \
|
||||||
auth_port=$(relation-get -r $rid auth_port $unit) \
|
service_tenant=$service_tenant
|
||||||
service_port=$(relation-get -r $rid service_port $unit) \
|
|
||||||
service_username=$(relation-get -r $rid service_username $unit) \
|
|
||||||
service_password=$(relation-get -r $rid service_password $unit) \
|
|
||||||
service_tenant=$(relation-get -r $rid service_tenant $unit)
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
if is_clustered; then
|
|
||||||
quantum_host=$(config-get vip)
|
|
||||||
quantum_port=19696
|
|
||||||
else
|
|
||||||
quantum_host=$(unit-get private-address)
|
|
||||||
quantum_port=9696
|
|
||||||
fi
|
fi
|
||||||
relation-set quantum_host=$quantum_host \
|
|
||||||
quantum_port=$quantum_port \
|
is_clustered && local host=$(config-get vip) ||
|
||||||
|
local host=$(unit-get private-address)
|
||||||
|
https && local scheme="https" || local scheme="http"
|
||||||
|
local quantum_url="$scheme:$host:9696"
|
||||||
|
relation-set $r_id quantum_url=$quantum_url \
|
||||||
quantum_plugin=$(config-get quantum-plugin)
|
quantum_plugin=$(config-get quantum-plugin)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# must pass on the keystone CA certficiate, if it exists.
|
||||||
|
if [[ -n "$keystone_host" ]] && [[ -n "$https_keystone" ]] ; then
|
||||||
|
relation-set $r_id https_keystone="True" ca_cert="$ca_cert"
|
||||||
|
fi
|
||||||
|
|
||||||
# volume driver is dependent on os version, or presence
|
# volume driver is dependent on os version, or presence
|
||||||
# of cinder (on folsom, at least)
|
# of cinder (on folsom, at least)
|
||||||
local cur_vers=$(get_os_codename_package "nova-common")
|
local cur_vers=$(get_os_codename_package "nova-common")
|
||||||
@ -429,11 +432,10 @@ compute_joined() {
|
|||||||
vol_drv="nova-volume"
|
vol_drv="nova-volume"
|
||||||
;;
|
;;
|
||||||
"folsom")
|
"folsom")
|
||||||
local r_ids=$(relation-ids cinder-volume-service)
|
[[ -z "$(relation-ids cinder-volume-service)" ]] && vol_drv="nova-volume"
|
||||||
[[ -z "$r_ids" ]] && vol_drv="nova-volume"
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
relation-set volume_service="$vol_drv"
|
relation-set $r_id volume_service="$vol_drv"
|
||||||
}
|
}
|
||||||
|
|
||||||
compute_changed() {
|
compute_changed() {
|
||||||
@ -453,10 +455,7 @@ compute_departed() {
|
|||||||
|
|
||||||
function quantum_joined() {
|
function quantum_joined() {
|
||||||
# Tell quantum service about keystone
|
# Tell quantum service about keystone
|
||||||
if is_clustered && ! is_leader 'res_nova_vip'; then
|
eligible_leader || return 0
|
||||||
# Clustered and not current leader - do nothing
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
rids=$(relation-ids identity-service)
|
rids=$(relation-ids identity-service)
|
||||||
for rid in $rids; do
|
for rid in $rids; do
|
||||||
for unit in $(relation-list -r $rid); do
|
for unit in $(relation-list -r $rid); do
|
||||||
@ -486,9 +485,26 @@ function quantum_joined() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function cluster_changed() {
|
function cluster_changed() {
|
||||||
configure_haproxy "quantum_api:9696" "nova_api:8774" \
|
[[ -z "$(peer_units)" ]] &&
|
||||||
"ec2_api:8773" "s3_api:3333" \
|
juju-log "cluster_changed() with no peers." && exit 0
|
||||||
"volume_api:8776"
|
# upstartService:defaultPort:configOption
|
||||||
|
local svcs="nova-api-ec2:8773:ec2_listen_port
|
||||||
|
nova-api-os-compute:8774:osapi_compute_listen_port
|
||||||
|
nova-objectstore:3333:s3_listen_port"
|
||||||
|
[[ "$NET_MANAGER" == "Quantum" ]] &&
|
||||||
|
svcs="$svcs quantum-server:9696:bind_port"
|
||||||
|
|
||||||
|
for s in $svcs ; do
|
||||||
|
local service=$(echo $s | cut -d: -f1)
|
||||||
|
local port=$(echo $s | cut -d: -f2)
|
||||||
|
local opt=$(echo $s | cut -d: -f3)
|
||||||
|
local next_server="$(determine_haproxy_port $port)"
|
||||||
|
local api_port="$(determine_api_port $port)"
|
||||||
|
local haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port"
|
||||||
|
set_or_update "$opt" "$api_port"
|
||||||
|
service_ctl $service restart
|
||||||
|
done
|
||||||
|
configure_haproxy $haproxy_port_maps
|
||||||
}
|
}
|
||||||
|
|
||||||
function ha_relation_joined() {
|
function ha_relation_joined() {
|
||||||
@ -513,13 +529,13 @@ function ha_relation_joined() {
|
|||||||
init_services="{
|
init_services="{
|
||||||
'res_nova_haproxy':'haproxy'
|
'res_nova_haproxy':'haproxy'
|
||||||
}"
|
}"
|
||||||
groups="{
|
clones="{
|
||||||
'grp_nova_haproxy':'res_nova_vip res_nova_haproxy'
|
'cl_nova_haproxy':'res_nova_haproxy'
|
||||||
}"
|
}"
|
||||||
relation-set corosync_bindiface=$corosync_bindiface \
|
relation-set corosync_bindiface=$corosync_bindiface \
|
||||||
corosync_mcastport=$corosync_mcastport \
|
corosync_mcastport=$corosync_mcastport \
|
||||||
resources="$resources" resource_params="$resource_params" \
|
resources="$resources" resource_params="$resource_params" \
|
||||||
init_services="$init_services" groups="$groups"
|
init_services="$init_services" clones="$groups"
|
||||||
else
|
else
|
||||||
juju-log "Insufficient configuration data to configure hacluster"
|
juju-log "Insufficient configuration data to configure hacluster"
|
||||||
exit 1
|
exit 1
|
||||||
@ -529,11 +545,12 @@ function ha_relation_joined() {
|
|||||||
function ha_relation_changed() {
|
function ha_relation_changed() {
|
||||||
local clustered=`relation-get clustered`
|
local clustered=`relation-get clustered`
|
||||||
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
|
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
|
||||||
|
https && local scheme="https" || local scheme="http"
|
||||||
for r_id in `relation-ids identity-service`; do
|
for r_id in `relation-ids identity-service`; do
|
||||||
address=$(config-get vip)
|
address=$(config-get vip)
|
||||||
nova_url="http://$address:18774/v1.1/\$(tenant_id)s"
|
nova_url="$scheme://$address:8774/v1.1/\$(tenant_id)s"
|
||||||
ec2_url="http://$address:18773/services/Cloud"
|
ec2_url="$scheme://$address:8773/services/Cloud"
|
||||||
s3_url="http://$address:13333"
|
s3_url="$scheme://$address:3333"
|
||||||
relation-set -r $r_id \
|
relation-set -r $r_id \
|
||||||
nova_public_url="$nova_url" \
|
nova_public_url="$nova_url" \
|
||||||
nova_admin_url="$nova_url" \
|
nova_admin_url="$nova_url" \
|
||||||
@ -546,15 +563,15 @@ function ha_relation_changed() {
|
|||||||
s3_internal_url="$s3_url"
|
s3_internal_url="$s3_url"
|
||||||
|
|
||||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||||
quantum_url="http://$address:19696"
|
quantum_url="$scheme://$address:9696"
|
||||||
relation-set -r $r_id \
|
relation-set -r $r_id \
|
||||||
quantum_public_url="$quantum_url" \
|
quantum_public_url="$quantum_url" \
|
||||||
quantum_admin_url="$quantum_url" \
|
quantum_admin_url="$quantum_url" \
|
||||||
quantum_internal_url="$quantum_url"
|
quantum_internal_url="$quantum_url"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
||||||
nova_vol_url="http://$address:18776/v1/\$(tenant_id)s"
|
nova_vol_url="$scheme://$address:8776/v1/\$(tenant_id)s"
|
||||||
relation-set -r $r_id \
|
relation-set -r $r_id \
|
||||||
nova-volume_public_url="$nova_vol_url" \
|
nova-volume_public_url="$nova_vol_url" \
|
||||||
nova-volume_admin_url="$nova_vol_url" \
|
nova-volume_admin_url="$nova_vol_url" \
|
||||||
@ -567,7 +584,7 @@ function ha_relation_changed() {
|
|||||||
for r_id in `relation-ids quantum-network-service`; do
|
for r_id in `relation-ids quantum-network-service`; do
|
||||||
relation-set -r $r_id \
|
relation-set -r $r_id \
|
||||||
quantum_host=$address
|
quantum_host=$address
|
||||||
quantum_port=19696
|
quantum_port=9696
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
Loading…
x
Reference in New Issue
Block a user