DNS HA
Implement DNS high availability. Pass the correct information to hacluster to register a DNS entry with MAAS 2.0 or greater rather than using a virtual IP. Charm-helpers sync to bring in DNS HA helpers Change-Id: I073770d3900b4948c4cceb440f90407128558c8f
This commit is contained in:
parent
18e19ef50e
commit
eba53c3d55
33
README.md
33
README.md
@ -14,6 +14,39 @@ console access for existing guests will stop working
|
||||
juju add-relation "nova-cloud-controller:pgsql-nova-db" "postgresql:db"
|
||||
juju add-relation "nova-cloud-controller:pgsql-neutron-db" "postgresql:db"
|
||||
|
||||
## HA/Clustering
|
||||
|
||||
There are two mutually exclusive high availability options: using virtual
|
||||
IP(s) or DNS. In both cases, a relationship to hacluster is required which
|
||||
provides the corosync back end HA functionality.
|
||||
|
||||
To use virtual IP(s) the clustered nodes must be on the same subnet such that
|
||||
the VIP is a valid IP on the subnet for one of the node's interfaces and each
|
||||
node has an interface in said subnet. The VIP becomes a highly-available API
|
||||
endpoint.
|
||||
|
||||
At a minimum, the config option 'vip' must be set in order to use virtual IP
|
||||
HA. If multiple networks are being used, a VIP should be provided for each
|
||||
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
|
||||
specified.
|
||||
|
||||
To use DNS high availability there are several prerequisites. However, DNS HA
|
||||
does not require the clustered nodes to be on the same subnet.
|
||||
Currently the DNS HA feature is only available for MAAS 2.0 or greater
|
||||
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
|
||||
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
|
||||
must be pre-registered in MAAS before use with DNS HA.
|
||||
|
||||
At a minimum, the config option 'dns-ha' must be set to true and at least one
|
||||
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
|
||||
be set in order to use DNS HA. One or more of the above hostnames may be set.
|
||||
|
||||
The charm will throw an exception in the following circumstances:
|
||||
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
|
||||
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
|
||||
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
|
||||
set
|
||||
|
||||
# Network Space support
|
||||
|
||||
This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
|
||||
|
30
config.yaml
30
config.yaml
@ -141,6 +141,12 @@ options:
|
||||
internal communication between services. If set to True this option will
|
||||
configure services to use internal endpoints where possible.
|
||||
# HA configuration settings
|
||||
dns-ha:
|
||||
type: boolean
|
||||
default: False
|
||||
description: |
|
||||
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
|
||||
settings below.
|
||||
vip:
|
||||
type: string
|
||||
default:
|
||||
@ -253,6 +259,30 @@ options:
|
||||
create public endpoints such as:
|
||||
|
||||
https://ncc.example.com:8775/v2/$(tenant_id)s
|
||||
os-internal-hostname:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The hostname or address of the internal endpoints provided by the
|
||||
nova-cloud-controller in the keystone identity provider.
|
||||
|
||||
This value will be used for internal endpoints. For example, an
|
||||
os-internal-hostname set to 'ncc.internal.example.com' with ssl
|
||||
enabled will create a internal endpoint as:
|
||||
|
||||
https://ncc.internal.example.com:8775/v2/$(tenant_id)s
|
||||
os-admin-hostname:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The hostname or address of the admin endpoints provided by the
|
||||
nova-cloud-controller in the keystone identity provider.
|
||||
|
||||
This value will be used for admin endpoints. For example, an
|
||||
os-admin-hostname set to 'ncc.admin.example.com' with ssl enabled
|
||||
will create a admin endpoint for as:
|
||||
|
||||
https://ncc.admin.example.com:8775/v2/$(tenant_id)s
|
||||
service-guard:
|
||||
type: boolean
|
||||
default: false
|
||||
|
@ -280,14 +280,14 @@ def get_hacluster_config(exclude_keys=None):
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
|
||||
os-admin-hostname, os-public-hostname
|
||||
os-admin-hostname, os-public-hostname, os-access-hostname
|
||||
|
||||
param: exclude_keys: list of setting key(s) to be excluded.
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing or incorrect.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
|
||||
'os-admin-hostname', 'os-public-hostname']
|
||||
'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
if exclude_keys and setting in exclude_keys:
|
||||
@ -324,7 +324,7 @@ def valid_hacluster_config():
|
||||
# If dns-ha then one of os-*-hostname must be set
|
||||
if dns:
|
||||
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
|
||||
'os-public-hostname']
|
||||
'os-public-hostname', 'os-access-hostname']
|
||||
# At this point it is unknown if one or all of the possible
|
||||
# network spaces are in HA. Validate at least one is set which is
|
||||
# the minimum required.
|
||||
|
@ -36,6 +36,10 @@ from charmhelpers.core.hookenv import (
|
||||
DEBUG,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
)
|
||||
@ -63,8 +67,11 @@ def update_dns_ha_resource_params(resources, resource_params,
|
||||
DNS HA
|
||||
"""
|
||||
|
||||
# Validate the charm environment for DNS HA
|
||||
assert_charm_supports_dns_ha()
|
||||
|
||||
settings = ['os-admin-hostname', 'os-internal-hostname',
|
||||
'os-public-hostname']
|
||||
'os-public-hostname', 'os-access-hostname']
|
||||
|
||||
# Check which DNS settings are set and update dictionaries
|
||||
hostname_group = []
|
||||
@ -109,3 +116,15 @@ def update_dns_ha_resource_params(resources, resource_params,
|
||||
msg = 'DNS HA: Hostname group has no members.'
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
|
||||
def assert_charm_supports_dns_ha():
|
||||
"""Validate prerequisites for DNS HA
|
||||
The MAAS client is only available on Xenial or greater
|
||||
"""
|
||||
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
|
||||
msg = ('DNS HA is only supported on 16.04 and greater '
|
||||
'versions of Ubuntu.')
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
return True
|
||||
|
@ -725,15 +725,14 @@ def git_install_requested():
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_default_repos(projects_yaml):
|
||||
def git_default_repos(projects):
|
||||
"""
|
||||
Returns default repos if a default openstack-origin-git value is specified.
|
||||
"""
|
||||
service = service_name()
|
||||
core_project = service
|
||||
|
||||
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
|
||||
if projects_yaml == default:
|
||||
if projects == default:
|
||||
|
||||
# add the requirements repo first
|
||||
repo = {
|
||||
@ -743,41 +742,34 @@ def git_default_repos(projects_yaml):
|
||||
}
|
||||
repos = [repo]
|
||||
|
||||
# neutron-* and nova-* charms require some additional repos
|
||||
if service in ['neutron-api', 'neutron-gateway',
|
||||
'neutron-openvswitch']:
|
||||
core_project = 'neutron'
|
||||
for project in ['neutron-fwaas', 'neutron-lbaas',
|
||||
'neutron-vpnaas']:
|
||||
# neutron and nova charms require some additional repos
|
||||
if service == 'neutron':
|
||||
for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
|
||||
repo = {
|
||||
'name': project,
|
||||
'repository': GIT_DEFAULT_REPOS[project],
|
||||
'name': svc,
|
||||
'repository': GIT_DEFAULT_REPOS[svc],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
elif service in ['nova-cloud-controller', 'nova-compute']:
|
||||
core_project = 'nova'
|
||||
elif service == 'nova':
|
||||
repo = {
|
||||
'name': 'neutron',
|
||||
'repository': GIT_DEFAULT_REPOS['neutron'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
elif service == 'openstack-dashboard':
|
||||
core_project = 'horizon'
|
||||
|
||||
# finally add the current service's core project repo
|
||||
# finally add the current service's repo
|
||||
repo = {
|
||||
'name': core_project,
|
||||
'repository': GIT_DEFAULT_REPOS[core_project],
|
||||
'name': service,
|
||||
'repository': GIT_DEFAULT_REPOS[service],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
return yaml.dump(dict(repositories=repos))
|
||||
|
||||
return projects_yaml
|
||||
return projects
|
||||
|
||||
|
||||
def _git_yaml_load(projects_yaml):
|
||||
|
@ -105,6 +105,10 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
||||
https,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ha.utils import (
|
||||
update_dns_ha_resource_params,
|
||||
)
|
||||
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
@ -706,7 +710,7 @@ def cluster_changed():
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
def ha_joined():
|
||||
def ha_joined(relation_id=None):
|
||||
cluster_config = get_hacluster_config()
|
||||
resources = {
|
||||
'res_nova_haproxy': 'lsb:haproxy',
|
||||
@ -714,7 +718,19 @@ def ha_joined():
|
||||
resource_params = {
|
||||
'res_nova_haproxy': 'op monitor interval="5s"',
|
||||
}
|
||||
init_services = {
|
||||
'res_nova_haproxy': 'haproxy'
|
||||
}
|
||||
clones = {
|
||||
'cl_nova_haproxy': 'res_nova_haproxy'
|
||||
}
|
||||
colocations = {}
|
||||
|
||||
if config('dns-ha'):
|
||||
update_dns_ha_resource_params(relation_id=relation_id,
|
||||
resources=resources,
|
||||
resource_params=resource_params)
|
||||
else:
|
||||
vip_group = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
@ -744,21 +760,15 @@ def ha_joined():
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})
|
||||
|
||||
init_services = {
|
||||
'res_nova_haproxy': 'haproxy'
|
||||
}
|
||||
clones = {
|
||||
'cl_nova_haproxy': 'res_nova_haproxy'
|
||||
}
|
||||
colocations = {}
|
||||
|
||||
if config('single-nova-consoleauth') and console_attributes('protocol'):
|
||||
if (config('single-nova-consoleauth') and
|
||||
console_attributes('protocol')):
|
||||
colocations['vip_consoleauth'] = COLO_CONSOLEAUTH
|
||||
init_services['res_nova_consoleauth'] = 'nova-consoleauth'
|
||||
resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
|
||||
resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS
|
||||
|
||||
relation_set(init_services=init_services,
|
||||
relation_set(relation_id=relation_id,
|
||||
init_services=init_services,
|
||||
corosync_bindiface=cluster_config['ha-bindiface'],
|
||||
corosync_mcastport=cluster_config['ha-mcastport'],
|
||||
resources=resources,
|
||||
|
@ -75,6 +75,7 @@ TO_PATCH = [
|
||||
'git_install_requested',
|
||||
'status_set',
|
||||
'network_get_primary_address',
|
||||
'update_dns_ha_resource_params',
|
||||
]
|
||||
|
||||
|
||||
@ -787,6 +788,7 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
self.get_netmask_for_address.return_value = None
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_nova_haproxy': 'haproxy'},
|
||||
@ -804,6 +806,42 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
call(**args),
|
||||
])
|
||||
|
||||
def test_ha_joined_dns_ha(self):
|
||||
def _fake_update(resources, resource_params, relation_id=None):
|
||||
resources.update({'res_nova_public_hostname': 'ocf:maas:dns'})
|
||||
resource_params.update({'res_nova_public_hostname':
|
||||
'params fqdn="nova.maas" '
|
||||
'ip_address="10.0.0.1"'})
|
||||
|
||||
self.test_config.set('dns-ha', True)
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': None,
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080',
|
||||
'os-admin-hostname': None,
|
||||
'os-internal-hostname': None,
|
||||
'os-public-hostname': 'nova.maas',
|
||||
}
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_nova_haproxy': 'haproxy'},
|
||||
'resources': {'res_nova_public_hostname': 'ocf:maas:dns',
|
||||
'res_nova_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_nova_public_hostname': 'params fqdn="nova.maas" '
|
||||
'ip_address="10.0.0.1"',
|
||||
'res_nova_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_nova_haproxy': 'res_nova_haproxy'},
|
||||
'colocations': {},
|
||||
}
|
||||
self.update_dns_ha_resource_params.side_effect = _fake_update
|
||||
|
||||
hooks.ha_joined()
|
||||
self.assertTrue(self.update_dns_ha_resource_params.called)
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
@patch('nova_cc_utils.config')
|
||||
def test_ha_relation_multi_consoleauth(self, config):
|
||||
self.get_hacluster_config.return_value = {
|
||||
@ -819,6 +857,7 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
self.get_netmask_for_address.return_value = None
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_nova_haproxy': 'haproxy'},
|
||||
@ -850,6 +889,7 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
self.get_netmask_for_address.return_value = None
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_nova_haproxy': 'haproxy',
|
||||
|
Loading…
x
Reference in New Issue
Block a user