
- Skip bandit error B113 - Fix some style errors - Adjust requirements - Stop running python27 jobs - Remove unit test jobs as on zuul tempest plugins are not loaded Change-Id: I14fa84dfe8ed8f30f7bc4e2432b92c1b6387eb3e
2100 lines
94 KiB
Python
2100 lines
94 KiB
Python
# Copyright 2017 VMware Inc
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import time
|
|
|
|
from neutron_lib import constants as nl_constants
|
|
|
|
from tempest.common.utils.linux import remote_client
|
|
from tempest import config
|
|
from tempest.lib.common.utils import data_utils
|
|
from tempest.lib.common.utils import test_utils
|
|
from tempest.lib import exceptions as lib_exc
|
|
|
|
from tempest.lib.services.network import quotas_client as net_quota
|
|
|
|
from vmware_nsx_tempest_plugin._i18n import _
|
|
from vmware_nsx_tempest_plugin.common import constants
|
|
from vmware_nsx_tempest_plugin.lib import traffic_manager
|
|
from vmware_nsx_tempest_plugin.services import designate_base
|
|
from vmware_nsx_tempest_plugin.services import fwaas_client as FWAASC
|
|
from vmware_nsx_tempest_plugin.services.lbaas import health_monitors_client
|
|
from vmware_nsx_tempest_plugin.services.lbaas import listeners_client
|
|
from vmware_nsx_tempest_plugin.services.lbaas import load_balancers_client
|
|
from vmware_nsx_tempest_plugin.services.lbaas import members_client
|
|
from vmware_nsx_tempest_plugin.services.lbaas import pools_client
|
|
from vmware_nsx_tempest_plugin.services import nsx_client
|
|
from vmware_nsx_tempest_plugin.services import openstack_network_clients
|
|
|
|
|
|
LOG = constants.log.getLogger(__name__)
|
|
|
|
CONF = config.CONF
|
|
|
|
RULE_TYPE_BANDWIDTH_LIMIT = "bandwidth_limit"
|
|
RULE_TYPE_DSCP_MARK = "dscp_marking"
|
|
|
|
|
|
# It includes feature related function such CRUD Mdproxy, L2GW or QoS
|
|
class FeatureManager(traffic_manager.IperfManager,
|
|
designate_base.DnsClientBase):
|
|
|
|
@classmethod
|
|
def setup_clients(cls):
|
|
"""Create various client connections. Such as NSXv3 and L2 Gateway.
|
|
|
|
"""
|
|
super(FeatureManager, cls).setup_clients()
|
|
try:
|
|
manager = getattr(cls.os_admin, "manager", cls.os_admin)
|
|
net_client = getattr(manager, "networks_client")
|
|
_params = manager.default_params_withy_timeout_values.copy()
|
|
except AttributeError as attribute_err:
|
|
LOG.warning(
|
|
"Failed to locate the attribute, Error: %(err_msg)s",
|
|
{"err_msg": attribute_err.__str__()})
|
|
_params = {}
|
|
cls.l2gw_client = openstack_network_clients.L2GatewayClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.network_quotas_client = net_quota.QuotasClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
'nova',
|
|
'internal',
|
|
**_params)
|
|
cls.nsx_client = nsx_client.NSXClient(
|
|
CONF.network.backend,
|
|
CONF.nsxv3.nsx_manager,
|
|
CONF.nsxv3.nsx_user,
|
|
CONF.nsxv3.nsx_password)
|
|
cls.l2gwc_client = openstack_network_clients.L2GatewayConnectionClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.load_balancers_client = \
|
|
load_balancers_client.get_client(cls.os_primary)
|
|
cls.listeners_client = listeners_client.get_client(cls.os_primary)
|
|
cls.pools_client = pools_client.get_client(cls.os_primary)
|
|
cls.members_client = members_client.get_client(cls.os_primary)
|
|
cls.health_monitors_client = \
|
|
health_monitors_client.get_client(cls.os_primary)
|
|
cls.load_balancers_admin_client = \
|
|
load_balancers_client.get_client(cls.os_admin)
|
|
cls.listeners_admin_client = listeners_client.get_client(cls.os_admin)
|
|
cls.pools_admin_client = pools_client.get_client(cls.os_admin)
|
|
cls.members_admin_client = members_client.get_client(cls.os_admin)
|
|
cls.health_monitors_admin_client = \
|
|
health_monitors_client.get_client(cls.os_admin)
|
|
cls.fwaas_v2_client = openstack_network_clients.FwaasV2Client(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.fwaasv1_client = FWAASC.get_client(cls.manager)
|
|
cls.vpnaas_client = openstack_network_clients.VPNClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.qos_policy_client = openstack_network_clients.QosPoliciesClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.qos_bw_client = openstack_network_clients.QosBWLimitClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.qos_dscp_client = openstack_network_clients.QosDscpClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.rbac_client = openstack_network_clients.\
|
|
RBAC_Client(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
net_client.service = 'load-balancer'
|
|
cls.octavia_admin_client = openstack_network_clients.\
|
|
OctaviaLB_Client(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_listener_client = openstack_network_clients.\
|
|
OctaviaListenersClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_pools_client = openstack_network_clients.\
|
|
OctaviaPoolsClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_hm_client = openstack_network_clients.\
|
|
OctaviaHealthMonitorClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_members_client = openstack_network_clients.\
|
|
OctaviaMembersClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_l7policies_client = openstack_network_clients.\
|
|
OctaviaL7PolicyClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_l7rules_client = openstack_network_clients.\
|
|
OctaviaL7RulesClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.octavia_admin_quota_client = openstack_network_clients.\
|
|
OctaviaQuotaClient(net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
net_client.service = 'dns'
|
|
cls.zones_v2_client = openstack_network_clients.ZonesV2Client(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.ptr_client = openstack_network_clients.DesignatePtrClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
net_client.service = 'key-manager'
|
|
cls.secret_client = openstack_network_clients.SecretClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
cls.container_client = openstack_network_clients.ContainerClient(
|
|
net_client.auth_provider,
|
|
net_client.service,
|
|
net_client.region,
|
|
net_client.endpoint_type,
|
|
**_params)
|
|
|
|
#
|
|
# FwaasV2 base class
|
|
#
|
|
def create_firewall_rule(self, **kwargs):
|
|
fw_rule = self.fwaas_v2_client.create_firewall_v2_rule(**kwargs)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.fwaas_v2_client.delete_firewall_v2_rule,
|
|
fw_rule["firewall_rule"]["id"])
|
|
return fw_rule
|
|
|
|
def create_firewall_policy(self, **kwargs):
|
|
fw_policy = self.fwaas_v2_client.create_firewall_v2_policy(**kwargs)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.fwaas_v2_client.delete_firewall_v2_policy,
|
|
fw_policy["firewall_policy"]["id"])
|
|
return fw_policy
|
|
|
|
def create_firewall_group(self, **kwargs):
|
|
fw_group = self.fwaas_v2_client.create_firewall_v2_group(**kwargs)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.fwaas_v2_client.delete_firewall_v2_group,
|
|
fw_group["firewall_group"]["id"])
|
|
return fw_group
|
|
|
|
def update_firewall_group(self, group_id, **kwargs):
|
|
fw_group = self.fwaas_v2_client.update_firewall_v2_group(group_id,
|
|
**kwargs)
|
|
return fw_group
|
|
|
|
def update_firewall_policy(self, policy_id, **kwargs):
|
|
return self.fwaas_v2_client.update_firewall_v2_policy(policy_id,
|
|
**kwargs)
|
|
|
|
def update_firewall_rule(self, rule_id, **kwargs):
|
|
return self.fwaas_v2_client.update_firewall_v2_rule(rule_id,
|
|
**kwargs)
|
|
|
|
def show_firewall_group(self, group_id):
|
|
fw_group = self.fwaas_v2_client.show_firewall_v2_group(group_id)
|
|
return fw_group
|
|
|
|
def show_firewall_rule(self, rule_id):
|
|
fw_rule = self.fwaas_v2_client.show_firewall_v2_rule(rule_id)
|
|
return fw_rule
|
|
|
|
def show_firewall_policy(self, policy_id):
|
|
fw_policy = self.fwaas_v2_client.show_firewall_v2_policy(policy_id)
|
|
return fw_policy
|
|
|
|
#
|
|
# FwaasV1 base class
|
|
#
|
|
def _create_firewall_rule_name(self, body):
|
|
firewall_rule_name = body['firewall_rule']['name']
|
|
firewall_rule_name = "Fwaas-" + firewall_rule_name
|
|
return firewall_rule_name
|
|
|
|
def _delete_rule_if_exists(self, rule_id):
|
|
# delete rule, if it exists
|
|
try:
|
|
self.fwaasv1_client.delete_firewall_rule(rule_id)
|
|
# if rule is not found, this means it was deleted in the test
|
|
except lib_exc.NotFound:
|
|
pass
|
|
|
|
def _delete_firewall_if_exists(self, fw_id):
|
|
# delete firewall, if it exists
|
|
try:
|
|
self.fwaasv1_client.delete_firewall(fw_id)
|
|
# if firewall is not found, this means it was deleted in the test
|
|
except lib_exc.NotFound:
|
|
pass
|
|
self.fwaasv1_client.wait_for_resource_deletion(fw_id)
|
|
|
|
def create_fw_v1_rule(self, **kwargs):
|
|
body = self.fwaasv1_client.create_firewall_rule(
|
|
name=data_utils.rand_name("fw-rule"),
|
|
**kwargs)
|
|
fw_rule = body['firewall_rule']
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.fwaasv1_client.delete_firewall_rule,
|
|
fw_rule['id'])
|
|
return fw_rule
|
|
|
|
def create_fw_v1_policy(self, **kwargs):
|
|
body = self.fwaasv1_client.create_firewall_policy(
|
|
name=data_utils.rand_name("fw-policy"),
|
|
**kwargs)
|
|
fw_policy = body['firewall_policy']
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.fwaasv1_client.delete_firewall_policy,
|
|
fw_policy['id'])
|
|
return fw_policy
|
|
|
|
def insert_fw_v1_rule_in_policy(self, firewall_policy_id, firewall_rule_id,
|
|
insert_after, insert_before):
|
|
self.fwaasv1_client.insert_firewall_rule_in_policy(firewall_policy_id,
|
|
firewall_rule_id,
|
|
insert_after,
|
|
insert_before)
|
|
|
|
def delete_fw_v1_and_wait(self, firewall_id):
|
|
self.fwaasv1_client.delete_firewall(firewall_id)
|
|
self._wait_firewall_while(firewall_id, [nl_constants.PENDING_DELETE],
|
|
not_found_ok=True)
|
|
|
|
def _delete_policy_if_exists(self, policy_id):
|
|
# delete policy, if it exists
|
|
try:
|
|
self.fwaasv1_client.delete_firewall_policy(policy_id)
|
|
# if policy is not found, this means it was deleted in the test
|
|
except lib_exc.NotFound:
|
|
pass
|
|
|
|
def create_fw_v1(self, **kwargs):
|
|
body = self.fwaasv1_client.create_firewall(
|
|
name=data_utils.rand_name("fw"),
|
|
**kwargs)
|
|
fw = body['firewall']
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.delete_fw_v1_and_wait,
|
|
fw['id'])
|
|
return fw
|
|
|
|
def update_fw_v1(self, firewall_id, **kwargs):
|
|
body = self.fwaasv1_client.update_firewall(firewall_id, **kwargs)
|
|
return body
|
|
|
|
def show_fw_v1(self, firewall_id):
|
|
body = self.fwaasv1_client.show_firewall(firewall_id)
|
|
return body
|
|
|
|
def _wait_fw_v1_while(self, firewall_id, statuses, not_found_ok=False):
|
|
start = int(time.time())
|
|
if not_found_ok:
|
|
expected_exceptions = (lib_exc.NotFound)
|
|
else:
|
|
expected_exceptions = ()
|
|
while True:
|
|
try:
|
|
fw = self.fwaasv1_client.show_firewall(firewall_id)
|
|
except expected_exceptions:
|
|
break
|
|
status = fw['firewall']['status']
|
|
if status not in statuses:
|
|
break
|
|
if int(time.time()) - start >= self.fwaasv1_client.build_timeout:
|
|
msg = ("Firewall %(firewall)s failed to reach "
|
|
"non PENDING status (current %(status)s)") % {
|
|
"firewall": firewall_id,
|
|
"status": status,
|
|
}
|
|
raise lib_exc.TimeoutException(msg)
|
|
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
|
|
|
def _wait_fw_v1_ready(self, firewall_id):
|
|
self._wait_firewall_while(firewall_id,
|
|
[nl_constants.PENDING_CREATE,
|
|
nl_constants.PENDING_UPDATE])
|
|
|
|
def _wait_fw_v1_until_ready(self, fw_id):
|
|
target_states = ('ACTIVE', 'CREATED')
|
|
|
|
def _wait():
|
|
firewall = self.fwaasv1_client.show_firewall(fw_id)
|
|
firewall = firewall['firewall']
|
|
return firewall['status'] in target_states
|
|
if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
|
|
CONF.network.build_interval):
|
|
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
|
|
(fw_id, target_states))
|
|
raise lib_exc.TimeoutException(m)
|
|
|
|
def create_fw_v1_basic_topo(self, router_type, protocol_name,
|
|
policy=None):
|
|
rtr_kwargs = {"router_type": "exclusive",
|
|
"admin_state_up": "True"}
|
|
router = self.create_topology_router("fire-1", **rtr_kwargs)
|
|
body = self.fwaasv1_client.create_fw_v1_rule(
|
|
name=data_utils.rand_name("fw-rule"),
|
|
action="allow",
|
|
protocol=protocol_name)
|
|
fw_rule_id1 = body['firewall_rule']['id']
|
|
self._create_firewall_rule_name(body)
|
|
self.addCleanup(self._delete_rule_if_exists, fw_rule_id1)
|
|
# Create firewall policy
|
|
if not policy:
|
|
body = self.fwaasv1_client.create_fw_v1_policy(
|
|
name=data_utils.rand_name("fw-policy"))
|
|
fw_policy_id = body['firewall_policy']['id']
|
|
self.addCleanup(self._delete_policy_if_exists, fw_policy_id)
|
|
# Insert rule to firewall policy
|
|
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
|
fw_policy_id, fw_rule_id1, '', '')
|
|
else:
|
|
fw_policy_id = policy
|
|
# Create firewall
|
|
firewall_1 = self.fwaasv1_client.create_fw_v1(
|
|
name=data_utils.rand_name("firewall"),
|
|
firewall_policy_id=fw_policy_id,
|
|
router_ids=[router['id']])
|
|
created_firewall = firewall_1['firewall']
|
|
self.addCleanup(self._delete_firewall_if_exists,
|
|
created_firewall['id'])
|
|
# Wait for the firewall resource to become ready
|
|
self._wait_fw_v1_until_ready(created_firewall['id'])
|
|
|
|
def ping_between_vms_different_router_uniscale(self, icmp_succeed=True):
|
|
"""
|
|
Receives topology servers dictionary as input and finds all the
|
|
servers list checks NS and EW Traffic
|
|
"""
|
|
for server in self.servers_details.values():
|
|
ip_address = server[0]['floating_ips'][0]['floating_ip_address']
|
|
ssh_source = self._get_remote_client(ip_address, use_password=True)
|
|
self.\
|
|
test_fip_check_server_and_project_network_connectivity(
|
|
server,
|
|
should_connect=icmp_succeed)
|
|
for remote_server in self.servers_details.values():
|
|
if remote_server[0]['name'] != server[0]['name']:
|
|
remote_ip = remote_server[0][
|
|
'addresses'].values()[0][0]['addr']
|
|
self.check_remote_connectivity(ssh_source, remote_ip,
|
|
should_succeed=True)
|
|
#
|
|
# L2Gateway base class. To get basics of L2GW.
|
|
#
|
|
|
|
def create_l2gw(self, l2gw_name, l2gw_param):
|
|
"""Creates L2GW and returns the response.
|
|
|
|
:param l2gw_name: name of the L2GW
|
|
:param l2gw_param: L2GW parameters
|
|
|
|
:return: response of L2GW create API
|
|
|
|
"""
|
|
LOG.info("l2gw name: %(name)s, l2gw_param: %(devices)s ",
|
|
{"name": l2gw_name, "devices": l2gw_param})
|
|
devices = []
|
|
for device_dict in l2gw_param:
|
|
interface = [{"name": device_dict["iname"],
|
|
"segmentation_id": device_dict[
|
|
"vlans"]}] if "vlans" in device_dict else [
|
|
{"name": device_dict["iname"]}]
|
|
device = {"device_name": device_dict["dname"],
|
|
"interfaces": interface}
|
|
devices.append(device)
|
|
l2gw_request_body = {"devices": devices}
|
|
LOG.info(" l2gw_request_body: %s", l2gw_request_body)
|
|
rsp = self.l2gw_client.create_l2_gateway(
|
|
name=l2gw_name, **l2gw_request_body)
|
|
LOG.info(" l2gw response: %s", rsp)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.l2gw_client.delete_l2_gateway, rsp[constants.L2GW]["id"])
|
|
return rsp, devices
|
|
|
|
def delete_l2gw(self, l2gw_id):
|
|
"""Delete L2gw.
|
|
|
|
:param l2gw_id: L2GW id to delete l2gw.
|
|
|
|
:return: response of the l2gw delete API.
|
|
|
|
"""
|
|
LOG.info("L2GW id: %(id)s to be deleted.", {"id": l2gw_id})
|
|
rsp = self.l2gw_client.delete_l2_gateway(l2gw_id)
|
|
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
|
return rsp
|
|
|
|
def update_l2gw(self, l2gw_id, l2gw_new_name, devices):
|
|
"""Update existing L2GW.
|
|
|
|
:param l2gw_id: L2GW id to update its parameters.
|
|
:param l2gw_new_name: name of the L2GW.
|
|
:param devices: L2GW parameters.
|
|
|
|
:return: Response of the L2GW update API.
|
|
|
|
"""
|
|
rsp = self.l2gw_client.update_l2_gateway(l2gw_id,
|
|
name=l2gw_new_name, **devices)
|
|
return rsp
|
|
|
|
def nsx_bridge_cluster_info(self):
|
|
"""Collect the device and interface name of the nsx brdige cluster.
|
|
|
|
:return: nsx bridge id and display name.
|
|
|
|
"""
|
|
response = self.nsx_client.get_bridge_cluster_info()
|
|
if len(response) == 0:
|
|
raise RuntimeError(_("NSX bridge cluster information is null"))
|
|
return [(x.get("id"), x.get("display_name")) for x in response]
|
|
|
|
def nsx_bridge_profile_info(self):
|
|
"""Collect the device and interface name of the nsx brdige profile.
|
|
|
|
:return: nsx bridge id and display name.
|
|
|
|
"""
|
|
response = self.nsx_client.get_bridge_profile_info()
|
|
if len(response) == 0:
|
|
raise RuntimeError(_("NSX bridge profile information is null"))
|
|
return [(x.get("id"), x.get("display_name")) for x in response]
|
|
|
|
def nsx_transport_zone_info(self):
|
|
"""Collect the Vlan Transport zone.
|
|
|
|
:return: nsx vlan transport zone.
|
|
|
|
"""
|
|
response = self.nsx_client.get_transport_zones()
|
|
if len(response) == 0:
|
|
raise RuntimeError(_("NSX bridge profile information is null"))
|
|
vlan_zone = None
|
|
for zone in response:
|
|
if zone['transport_type'] == 'VLAN':
|
|
if zone['display_name'] == 'transportzone2':
|
|
vlan_zone = zone['id']
|
|
if vlan_zone:
|
|
return vlan_zone
|
|
else:
|
|
raise RuntimeError(_("Vlan Transport zone not found"))
|
|
|
|
def create_l2gw_connection(self, l2gwc_param):
|
|
"""Creates L2GWC and return the response.
|
|
|
|
:param l2gwc_param: L2GWC parameters.
|
|
|
|
:return: response of L2GWC create API.
|
|
|
|
"""
|
|
LOG.info("l2gwc param: %(param)s ", {"param": l2gwc_param})
|
|
l2gwc_request_body = {"l2_gateway_id": l2gwc_param["l2_gateway_id"],
|
|
"network_id": l2gwc_param["network_id"]}
|
|
if "segmentation_id" in l2gwc_param:
|
|
l2gwc_request_body["segmentation_id"] = l2gwc_param[
|
|
"segmentation_id"]
|
|
LOG.info("l2gwc_request_body: %s", l2gwc_request_body)
|
|
rsp = self.l2gwc_client.create_l2_gateway_connection(
|
|
**l2gwc_request_body)
|
|
LOG.info("l2gwc response: %s", rsp)
|
|
self.addCleanup(
|
|
test_utils.call_and_ignore_notfound_exc,
|
|
self.l2gwc_client.delete_l2_gateway_connection,
|
|
rsp[constants.L2GWC]["id"])
|
|
return rsp
|
|
|
|
def delete_l2gw_connection(self, l2gwc_id):
|
|
"""Delete L2GWC and returns the response.
|
|
|
|
:param l2gwc_id: L2GWC id to delete L2GWC.
|
|
|
|
:return: response of the l2gwc delete API.
|
|
|
|
"""
|
|
LOG.info("L2GW connection id: %(id)s to be deleted",
|
|
{"id": l2gwc_id})
|
|
rsp = self.l2gwc_client.delete_l2_gateway_connection(l2gwc_id)
|
|
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
|
return rsp
|
|
|
|
#
|
|
# LBAAS section.
|
|
#
|
|
def delete_loadbalancer_resources(self, lb_id, admin=None):
|
|
"""Deletion of lbaas resources.
|
|
|
|
:param lb_id: Load Balancer ID.
|
|
|
|
"""
|
|
if admin:
|
|
lb_client = self.load_balancers_admin_client
|
|
else:
|
|
lb_client = self.load_balancers_client
|
|
statuses = lb_client.show_load_balancer_status_tree(lb_id)
|
|
statuses = statuses.get('statuses', statuses)
|
|
lb = statuses.get('loadbalancer')
|
|
for listener in lb.get('listeners', []):
|
|
for policy in listener.get('l7policies'):
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.l7policies_client.delete_policy,
|
|
policy.get('id'))
|
|
for pool in listener.get('pools'):
|
|
if admin:
|
|
self.delete_lb_pool_resources(lb_id, pool, admin=admin)
|
|
else:
|
|
self.delete_lb_pool_resources(lb_id, pool)
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.listeners_admin_client.delete_listener,
|
|
listener.get('id'))
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.listeners_client.delete_listener,
|
|
listener.get('id'))
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
# delete pools not attached to listener, but loadbalancer
|
|
for pool in lb.get('pools', []):
|
|
if admin:
|
|
self.delete_lb_pool_resources(lb_id, pool, admin=admin)
|
|
else:
|
|
self.delete_lb_pool_resources(lb_id, pool)
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
lb_client.delete_load_balancer, lb_id)
|
|
lb_client.wait_for_load_balancer_status(
|
|
lb_id, is_delete_op=True)
|
|
lbs = lb_client.list_load_balancers()['loadbalancers']
|
|
self.assertEqual(0, len(lbs))
|
|
|
|
def delete_lb_pool_healthmonitor(self, pool, admin=None):
|
|
"""Deletion of lb health pool and monitor.
|
|
"""
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.health_monitors_admin_client.delete_health_monitor,
|
|
pool.get('pool')['healthmonitor_id'])
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.health_monitors_client.delete_health_monitor,
|
|
pool.get('pool')['healthmonitor_id'])
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.pools_admin_client.delete_pool, pool.get('pool')['id'])
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.pools_client.delete_pool, pool.get('pool')['id'])
|
|
|
|
def delete_lb_pool_resources(self, lb_id, pool, admin=None):
|
|
"""Deletion of lbaas pool resources.
|
|
|
|
:param lb_id: Load Balancer ID.
|
|
:param pool: pool information.
|
|
|
|
"""
|
|
pool_id = pool.get('id')
|
|
hm = pool.get('healthmonitor')
|
|
if hm:
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.health_monitors_admin_client.delete_health_monitor,
|
|
pool.get('healthmonitor').get('id'))
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.health_monitors_client.delete_health_monitor,
|
|
pool.get('healthmonitor').get('id'))
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.pools_admin_client.delete_pool, pool.get('id'))
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.pools_client.delete_pool, pool.get('id'))
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
for member in pool.get('members', []):
|
|
if admin:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.members_admin_client.delete_member,
|
|
pool_id, member.get('id'))
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
else:
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.members_client.delete_member,
|
|
pool_id, member.get('id'))
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
|
|
def start_web_servers(self, protocol_port):
|
|
"""Start web server.
|
|
|
|
:param protocol_port: Port number.
|
|
|
|
"""
|
|
for server_name in self.topology_servers.keys():
|
|
server = self.servers_details[server_name].server
|
|
self.start_web_server(protocol_port, server, server_name)
|
|
|
|
def _wait_firewall_while(self, fw_group_id, statuses, not_found_ok=False):
|
|
if not_found_ok:
|
|
expected_exceptions = (lib_exc.NotFound)
|
|
else:
|
|
expected_exceptions = ()
|
|
while True:
|
|
try:
|
|
fw = self.show_firewall_group(fw_group_id)
|
|
except expected_exceptions:
|
|
break
|
|
status = fw['firewall_group']['status']
|
|
if status not in statuses:
|
|
break
|
|
|
|
def _wait_firewall_ready(self, fw_group_id):
|
|
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
|
self._wait_firewall_while(fw_group_id,
|
|
[nl_constants.PENDING_CREATE,
|
|
nl_constants.PENDING_UPDATE])
|
|
|
|
def wait_for_load_balancer_status(self, lb_id):
|
|
# Wait for load balancer become ONLINE and ACTIVE
|
|
self.load_balancers_client.wait_for_load_balancer_status(lb_id)
|
|
|
|
def create_addtional_lbaas_members(self, protocol_port):
|
|
"""Create Additional members in pool.
|
|
|
|
:param protocol_port: Port number.
|
|
|
|
"""
|
|
for server_name in self.topology_servers.keys():
|
|
if server_name in self.server_names:
|
|
continue
|
|
fip_data = self.servers_details[server_name].floating_ips[0]
|
|
fixed_ip_address = fip_data['fixed_ip_address']
|
|
self._disassociate_floating_ip(fip_data)
|
|
pool_id = self.pool['id']
|
|
vip_subnet_id = self.topology_subnets["subnet_lbaas_1"]['id']
|
|
lb_id = self.loadbalancer['id']
|
|
self.members_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port)
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
|
|
def check_lbaas_project_weight_values(self, count=2, HTTPS=None,
|
|
member_count=None,
|
|
barbican_http=None,
|
|
hash_persistence=False):
|
|
vip = self.vip_ip_address
|
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
|
if HTTPS is None:
|
|
self.do_http_request(vip=vip, send_counts=self.poke_counters)
|
|
else:
|
|
self.do_https_request(vip=vip, send_counts=self.poke_counters)
|
|
# ROUND_ROUBIN, so equal counts
|
|
if CONF.nsxv3.ens:
|
|
vms = len(self.topology_servers.keys())
|
|
if vms:
|
|
self.assertEqual(self.http_cnt["Welcome vm"] / 2, 3 * vms,
|
|
"LB fails with weighted values")
|
|
else:
|
|
pass
|
|
elif barbican_http:
|
|
no_of_vms = len(self.http_cnt)
|
|
# if source_ip persistence enabled then the count
|
|
# remains 1 as only one server will be actively responding
|
|
if hash_persistence:
|
|
self.assertEqual(no_of_vms, 1)
|
|
else:
|
|
if no_of_vms:
|
|
if (self.http_cnt['server_lbaas_2'] <
|
|
(self.poke_counters / no_of_vms)):
|
|
self.assertGreater(self.http_cnt['server_lbaas_3'],
|
|
self.poke_counters / no_of_vms)
|
|
elif (self.http_cnt['server_lbaas_3'] >
|
|
(self.poke_counters / no_of_vms)):
|
|
self.assertLess(self.http_cnt['server_lbaas_3'],
|
|
self.poke_counters / no_of_vms)
|
|
else:
|
|
self.assertEqual(self.http_cnt['server_lbaas_3'],
|
|
self.poke_counters / no_of_vms,
|
|
"LB fails with weighted values")
|
|
else:
|
|
no_of_vms = len(self.http_cnt)
|
|
# if source_ip persistence enabled then the count
|
|
# remains 1 as only one server will be actively responding
|
|
if hash_persistence:
|
|
self.assertEqual(no_of_vms, 1)
|
|
else:
|
|
if no_of_vms:
|
|
if (self.http_cnt['server_lbaas_0'] <
|
|
(self.poke_counters / no_of_vms)):
|
|
self.assertGreater(self.http_cnt['server_lbaas_1'],
|
|
self.poke_counters / no_of_vms)
|
|
elif (self.http_cnt['server_lbaas_0'] >
|
|
(self.poke_counters / no_of_vms)):
|
|
self.assertLess(self.http_cnt['server_lbaas_1'],
|
|
self.poke_counters / no_of_vms)
|
|
else:
|
|
self.assertEqual(self.http_cnt['server_lbaas_1'],
|
|
self.poke_counters / no_of_vms,
|
|
"LB fails with weighted values")
|
|
|
|
def check_project_lbaas(self, count=2, HTTPS=None,
|
|
no_resp=False, source_ip=False):
|
|
i = 0
|
|
vip = self.vip_ip_address
|
|
time.sleep(constants.WAIT_FOR_VIP)
|
|
if HTTPS is None:
|
|
self.do_http_request(vip=vip, send_counts=self.poke_counters,
|
|
no_resp=no_resp)
|
|
else:
|
|
self.do_https_request(vip=vip, send_counts=self.poke_counters,
|
|
no_resp=no_resp)
|
|
# ROUND_ROUBIN, so equal counts
|
|
no_of_vms = len(self.http_cnt)
|
|
if source_ip:
|
|
[[key, value]] = list(self.http_cnt.items())
|
|
for srvr in self.topology_servers.keys():
|
|
if srvr != key:
|
|
self.topology_servers.pop(srvr)
|
|
break
|
|
if no_resp is False:
|
|
if CONF.nsxv3.ens:
|
|
vms = len(self.topology_servers.keys())
|
|
if self.http_cnt["Welcome vm"] == self.poke_counters:
|
|
self.assertEqual(self.http_cnt["Welcome vm"] / vms,
|
|
3 * vms)
|
|
else:
|
|
for server_name in self.topology_servers.keys():
|
|
if i < count:
|
|
i += 1
|
|
self.assertEqual(self.poke_counters / no_of_vms,
|
|
self.http_cnt[server_name])
|
|
else:
|
|
break
|
|
else:
|
|
self.assertNotIn('server_lbaas_0', self.http_cnt)
|
|
|
|
def count_response(self, response):
|
|
response = response.decode('utf-8')
|
|
if response in self.http_cnt:
|
|
self.http_cnt[response] += 1
|
|
else:
|
|
self.http_cnt[response] = 1
|
|
|
|
def update_members_weight(self, weight):
|
|
for server in self.members:
|
|
self.members_client.update_member(
|
|
self.pool['id'], server['member']['id'], weight=weight)
|
|
weight += weight
|
|
|
|
def update_pool_algorithm(self, algo):
|
|
self.pools_client.update_pool(self.pool['id'],
|
|
lb_algorithm=algo)
|
|
|
|
def create_project_lbaas(self, protocol_type,
|
|
protocol_port, lb_algorithm,
|
|
hm_type, member_count=2,
|
|
max_vms=None, weight=None,
|
|
fip_disassociate=None, barbican=False,
|
|
pool_protocol=None, pool_port=None,
|
|
vip_subnet_id=None, barbican_container=None,
|
|
lb_id=None, count=None,
|
|
clean_up=None, persistence=False,
|
|
persistence_cookie_name=None,
|
|
persistence_type=None,
|
|
create_fip=True, external_subnet=None):
|
|
count = 0
|
|
lb_name = None
|
|
session_persistence = {}
|
|
if persistence:
|
|
if persistence_type:
|
|
session_persistence["type"] = persistence_type
|
|
if persistence_cookie_name:
|
|
session_persistence["cookie_name"] = persistence_cookie_name
|
|
if vip_subnet_id is None:
|
|
vip_subnet_id = self.topology_subnets["subnet_lbaas_1"]['id']
|
|
if lb_id is None:
|
|
lb_name = data_utils.rand_name(self.namestart)
|
|
if barbican:
|
|
self.loadbalancer = self.\
|
|
load_balancers_admin_client.\
|
|
create_load_balancer(name=lb_name,
|
|
vip_subnet_id=vip_subnet_id
|
|
)['loadbalancer']
|
|
lb_id = self.loadbalancer['id']
|
|
self.addCleanup(
|
|
self.load_balancers_admin_client.delete_load_balancer,
|
|
self.loadbalancer['id'])
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
elif external_subnet:
|
|
self.loadbalancer = self.\
|
|
load_balancers_admin_client.\
|
|
create_load_balancer(name=lb_name,
|
|
vip_subnet_id=external_subnet
|
|
)['loadbalancer']
|
|
lb_id = self.loadbalancer['id']
|
|
if clean_up is None:
|
|
self.addCleanup(
|
|
self.load_balancers_admin_client.delete_load_balancer,
|
|
self.loadbalancer['id'])
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
else:
|
|
self.loadbalancer = self.\
|
|
load_balancers_client.\
|
|
create_load_balancer(name=lb_name,
|
|
vip_subnet_id=vip_subnet_id
|
|
)['loadbalancer']
|
|
lb_id = self.loadbalancer['id']
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
|
|
if barbican:
|
|
listener_name = data_utils.rand_name("tempest_lb")
|
|
self.listener = self.listeners_admin_client.\
|
|
create_listener(loadbalancer_id=lb_id, protocol=protocol_type,
|
|
protocol_port=protocol_port,
|
|
name=listener_name,
|
|
default_tls_container_ref=barbican_container
|
|
["container_ref"])['listener']
|
|
if clean_up is None:
|
|
self.addCleanup(
|
|
self.listeners_admin_client.delete_listener,
|
|
self.listener['id'])
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
elif external_subnet:
|
|
listener_name = data_utils.rand_name("tempest_lb")
|
|
self.listener = self.listeners_admin_client.\
|
|
create_listener(loadbalancer_id=lb_id, protocol=protocol_type,
|
|
protocol_port=protocol_port,
|
|
name=listener_name)['listener']
|
|
if clean_up is None:
|
|
self.addCleanup(
|
|
self.listeners_admin_client.delete_listener,
|
|
self.listener['id'])
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
else:
|
|
self.listener = self.listeners_client.create_listener(
|
|
loadbalancer_id=lb_id, protocol=protocol_type,
|
|
protocol_port=protocol_port, name=lb_name)['listener']
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
if barbican:
|
|
if lb_name is not None:
|
|
self.pool = self.pools_admin_client.create_pool(
|
|
listener_id=self.listener['id'],
|
|
lb_algorithm=lb_algorithm, protocol=pool_protocol,
|
|
name=lb_name)['pool']
|
|
else:
|
|
self.pool = self.pools_admin_client.create_pool(
|
|
listener_id=self.listener['id'],
|
|
lb_algorithm=lb_algorithm, protocol=pool_protocol,
|
|
name=lb_id)['pool']
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
pool_id = self.pool['id']
|
|
if clean_up is None:
|
|
self.addCleanup(self.pools_admin_client.delete_pool, pool_id)
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
elif external_subnet:
|
|
self.pool = self.pools_admin_client.create_pool(
|
|
listener_id=self.listener['id'],
|
|
lb_algorithm=lb_algorithm, protocol=protocol_type,
|
|
name=lb_name,
|
|
session_persistence=session_persistence)['pool']
|
|
pool_id = self.pool['id']
|
|
if clean_up is None:
|
|
self.addCleanup(self.pools_admin_client.delete_pool, pool_id)
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
else:
|
|
self.pool = self.pools_client.create_pool(
|
|
listener_id=self.listener['id'],
|
|
lb_algorithm=lb_algorithm, protocol=protocol_type,
|
|
name=lb_name,
|
|
session_persistence=session_persistence)['pool']
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
pool_id = self.pool['id']
|
|
if barbican or external_subnet:
|
|
self.healthmonitor = (
|
|
self.health_monitors_admin_client.create_health_monitor(
|
|
pool_id=pool_id, type=hm_type,
|
|
delay=self.hm_delay, max_retries=self.hm_max_retries,
|
|
timeout=self.hm_timeout))['healthmonitor']
|
|
self.load_balancers_admin_client.wait_for_load_balancer_status(
|
|
lb_id)
|
|
if clean_up is None:
|
|
self.addCleanup(
|
|
self.health_monitors_admin_client.delete_health_monitor,
|
|
self.healthmonitor['id'])
|
|
else:
|
|
self.healthmonitor = (
|
|
self.health_monitors_client.create_health_monitor(
|
|
pool_id=pool_id, type=hm_type,
|
|
delay=self.hm_delay, max_retries=self.hm_max_retries,
|
|
timeout=self.hm_timeout))
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
self.members = []
|
|
for server_name in self.topology_servers.keys():
|
|
if count < member_count:
|
|
if create_fip:
|
|
fip_data = self.servers_details[server_name].\
|
|
floating_ips[0]
|
|
fixed_ip_address = fip_data['fixed_ip_address']
|
|
if fip_disassociate is None:
|
|
if barbican or external_subnet:
|
|
kwargs = dict(port_id=None)
|
|
self.cmgr_adm.floating_ips_client.\
|
|
update_floatingip(fip_data['id'],
|
|
**kwargs)['floatingip']
|
|
else:
|
|
self._disassociate_floating_ip(fip_data)
|
|
else:
|
|
net_name = self.servers_details[server_name][2][0]['name']
|
|
fixed_ip_address = self.servers_details[
|
|
server_name][0]['addresses'][net_name][0]['addr']
|
|
if weight:
|
|
weight += count
|
|
if barbican:
|
|
member = self.members_admin_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=pool_port,
|
|
weight=weight)['member']
|
|
elif external_subnet:
|
|
member = self.members_admin_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port,
|
|
weight=weight)['member']
|
|
else:
|
|
member = self.members_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port,
|
|
weight=weight)
|
|
else:
|
|
if barbican:
|
|
member = self.members_admin_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=pool_port)['member']
|
|
elif external_subnet:
|
|
member = self.members_admin_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port)
|
|
else:
|
|
member = self.members_client.create_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port)
|
|
if barbican or external_subnet:
|
|
self.load_balancers_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
else:
|
|
self.wait_for_load_balancer_status(lb_id)
|
|
if barbican or external_subnet:
|
|
if clean_up is None:
|
|
self.addCleanup(
|
|
self.members_admin_client.delete_member,
|
|
pool_id,
|
|
member['id'])
|
|
self.members.append(member)
|
|
self.server_names.append(server_name)
|
|
count += 1
|
|
else:
|
|
break
|
|
if not CONF.nsxv3.ens:
|
|
if barbican or external_subnet:
|
|
self.cmgr_adm.ports_client.update_port(
|
|
self.loadbalancer['vip_port_id'],
|
|
security_groups=[self.sg['id']])
|
|
else:
|
|
self.ports_client.update_port(
|
|
self.loadbalancer['vip_port_id'],
|
|
security_groups=[self.sg['id']])
|
|
# create lbaas public interface
|
|
if barbican or external_subnet:
|
|
if not hasattr(self, 'vip_ip_address'):
|
|
self.cmgr_adm.ports_client.update_port(
|
|
self.loadbalancer['vip_port_id'],
|
|
security_groups=[
|
|
self.sg['id']])
|
|
if external_subnet is None:
|
|
vip_fip = self.create_floatingip(
|
|
self.loadbalancer,
|
|
client=self.cmgr_adm.floating_ips_client,
|
|
port_id=self.loadbalancer['vip_port_id'])
|
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
|
else:
|
|
self.vip_ip_address = self.loadbalancer['vip_address']
|
|
return dict(lb_id=lb_id,
|
|
vip_address=self.vip_ip_address,
|
|
pool_id=pool_id,
|
|
healthmonitor_id=self.healthmonitor['id'],
|
|
members=self.members,
|
|
listener_id=self.listener['id'])
|
|
else:
|
|
if create_fip:
|
|
vip_fip = \
|
|
self.create_floatingip(self.loadbalancer,
|
|
port_id=self.loadbalancer[
|
|
'vip_port_id'])
|
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
|
else:
|
|
self.vip_ip_address = self.loadbalancer['vip_address']
|
|
pools = self.pools_client.show_pool(
|
|
self.pool['id'])
|
|
return dict(lb_id=lb_id, pool=pools,
|
|
vip_port=self.loadbalancer['vip_port_id'],
|
|
vip_ip=self.vip_ip_address)
|
|
|
|
def get_router_port(self, client):
|
|
"""List ports using admin creds """
|
|
ports_list = client.list_ports()
|
|
for port in ports_list['ports']:
|
|
port_info = client.show_port(port['id'])
|
|
if port_info['port']['device_owner'] == "network:router_interface":
|
|
return port_info['port']['id']
|
|
return None
|
|
|
|
# Vlan backed Tier-1 router operations
|
|
def check_downlink_port_created(self, router_op, subnet, port_id):
|
|
tag_hit = 0
|
|
backend_rtr_id = ''
|
|
all_routers = self.nsx.get_logical_routers()
|
|
for router in all_routers:
|
|
if router_op['name'] in router.get('display_name'):
|
|
backend_rtr_id = router['id']
|
|
rtr = {'id': backend_rtr_id}
|
|
break
|
|
if backend_rtr_id:
|
|
logical_rtr_ports = self.nsx.get_logical_router_ports(rtr)
|
|
for ports in logical_rtr_ports:
|
|
for rtr_tag in ports.get('tags'):
|
|
if rtr_tag['scope'] == "os-neutron-rport-id" and \
|
|
rtr_tag['tag'] == port_id:
|
|
tag_hit += 1
|
|
continue
|
|
if rtr_tag['scope'] == "os-subnet-id" and subnet['id'] == \
|
|
rtr_tag['tag']:
|
|
tag_hit += 1
|
|
ports_info = ports
|
|
break
|
|
if tag_hit == 2:
|
|
if ports_info.get('resource_type') \
|
|
== 'LogicalRouterDownLinkPort':
|
|
ip_address = ports_info.get('subnets')[
|
|
0].get('ip_addresses')
|
|
if ip_address:
|
|
if not self.\
|
|
cmgr_adm.subnets_client.show_subnet(
|
|
subnet['id']).get('subnet')[
|
|
'gateway_ip'] == ip_address[0]:
|
|
raise RuntimeError(
|
|
"Router centralized port ip doesn't "
|
|
"match with openstack subnet "
|
|
"gatewayip")
|
|
else:
|
|
pass
|
|
else:
|
|
raise RuntimeError(
|
|
"Router_port_id and subnet_id doesn't match at "
|
|
"the backend")
|
|
else:
|
|
raise RuntimeError("Router not created at the backend properly")
|
|
|
|
def check_centralized_port_created(self, router_op, subnet, port_id):
|
|
tag_hit = 0
|
|
backend_rtr_id = ''
|
|
all_routers = self.nsx.get_logical_routers()
|
|
for router in all_routers:
|
|
if router_op['name'] in router.get('display_name'):
|
|
backend_rtr_id = router['id']
|
|
rtr = {'id': backend_rtr_id}
|
|
break
|
|
if backend_rtr_id:
|
|
logical_rtr_ports = self.nsx.get_logical_router_ports(rtr)
|
|
for ports in logical_rtr_ports:
|
|
for rtr_tag in ports.get('tags'):
|
|
if rtr_tag['scope'] == "os-neutron-rport-id" and \
|
|
rtr_tag['tag'] == port_id:
|
|
tag_hit += 1
|
|
continue
|
|
if rtr_tag['scope'] == "os-subnet-id" and subnet['id'] == \
|
|
rtr_tag['tag']:
|
|
tag_hit += 1
|
|
ports_info = ports
|
|
break
|
|
if tag_hit == 2:
|
|
if ports_info.get(
|
|
'resource_type') \
|
|
== 'LogicalRouterCentralizedServicePort':
|
|
ip_address = ports_info.get('subnets')[
|
|
0].get('ip_addresses')
|
|
if ip_address:
|
|
if not self.cmgr_adm.subnets_client.\
|
|
show_subnet(subnet['id']).\
|
|
get('subnet')[
|
|
'gateway_ip'] == ip_address[0]:
|
|
raise RuntimeError(
|
|
"Router centralized port ip doesn't "
|
|
"match with openstack subnet "
|
|
"gatewayip")
|
|
else:
|
|
pass
|
|
else:
|
|
raise RuntimeError(
|
|
"Router_port_id and subnet_id doesn't match at "
|
|
"the backend")
|
|
else:
|
|
raise RuntimeError("Router not created at the backend properly")
|
|
|
|
#
|
|
# QoS base class. To get basics of QoS.
|
|
#
|
|
def get_qos_policy_id(self, policy_id_or_name):
|
|
policies = self.qos_policy_client.list_policies(name=policy_id_or_name)
|
|
policy_list = policies['policies']
|
|
if len(policy_list) > 0:
|
|
return policy_list[0]['id']
|
|
return policy_id_or_name
|
|
|
|
def create_qos_policy(self, name, description, shared, **kwargs):
|
|
result = self.qos_policy_client.create_policy(
|
|
name=name,
|
|
description=description,
|
|
shared=shared,
|
|
**kwargs
|
|
)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.qos_policy_client.delete_policy,
|
|
result['policy']['id'])
|
|
return result.get('policy', result)
|
|
|
|
def delete_qos_policy(self, policy_id):
|
|
result = self.qos_policy_client.delete_policy(policy_id)
|
|
return result.get('policy', result)
|
|
|
|
def list_qos_policies(self, **filters):
|
|
result = self.qos_policy_client.list_policies(**filters)
|
|
return result.get('policies', result)
|
|
|
|
def update_qos_policy(self, policy_id, **kwargs):
|
|
result = self.qos_policy_client.update_policy(policy_id, **kwargs)
|
|
return result.get('policy', result)
|
|
|
|
def show_qos_policy(self, policy_id, **fields):
|
|
result = self.qos_policy_client.show_policy(policy_id, **fields)
|
|
return result.get('policy', result)
|
|
|
|
#
|
|
# QoS bandwidth_limit
|
|
#
|
|
def create_bandwidth_limit_rule(self, policy_id,
|
|
max_kbps, max_burst_kbps,
|
|
**kwargs):
|
|
result = self.qos_bw_client.create_bandwidth_limit_rule(
|
|
policy_id,
|
|
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps,
|
|
**kwargs)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.qos_bw_client.delete_bandwidth_limit_rule,
|
|
result['bandwidth_limit_rule']['id'], policy_id)
|
|
return result.get('bandwidth_limit_rule', result)
|
|
|
|
def delete_bandwidth_limit_rule(self, rule_id, policy_id):
|
|
result = self.qos_bw_client.delete_bandwidth_limit_rule(
|
|
rule_id, policy_id)
|
|
return result.get('bandwidth_limit_rule', result)
|
|
|
|
def update_bandwidth_limit_rule(self, rule_id, policy_id_or_name,
|
|
**kwargs):
|
|
policy_id = self.get_qos_policy_id(policy_id_or_name)
|
|
result = self.qos_bw_client.update_bandwidth_limit_rule(
|
|
rule_id, policy_id, **kwargs)
|
|
return result.get('bandwidth_limit_rule', result)
|
|
|
|
def list_bandwidth_limit_rules(self, policy_id, **filters):
|
|
result = self.qos_bw_client.list_bandwidth_limit_rules(
|
|
policy_id, **filters)
|
|
return result.get('bandwidth_limit_rules', result)
|
|
|
|
def show_bandwidth_limit_rule(self, rule_id, policy_id,
|
|
**fields):
|
|
result = self.qos_bw_client.show_bandwidth_limit_rule(
|
|
rule_id, policy_id)
|
|
return result.get('bandwidth_limit_rule', result)
|
|
|
|
#
|
|
# QoS DSCP Marking Rule
|
|
#
|
|
def create_dscp_marking_rule(self, policy_id, dscp_mark,
|
|
**kwargs):
|
|
policy_id = self.get_qos_policy_id(policy_id)
|
|
kwargs['dscp_mark'] = dscp_mark
|
|
result = self.qos_dscp_client.create_dscp_marking_rule(
|
|
policy_id, **kwargs)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.qos_dscp_client.delete_dscp_marking_rule,
|
|
result['dscp_marking_rule']['id'], policy_id)
|
|
return result.get('dscp_marking_rule', result)
|
|
|
|
def delete_dscp_marking_rule(self, rule_id, policy_id_or_name):
|
|
policy_id = self.get_qos_policy_id(policy_id_or_name)
|
|
result = self.qos_dscp_client.delete_dscp_marking_rule(rule_id,
|
|
policy_id)
|
|
return result.get('dscp_marking_rule', result)
|
|
|
|
def update_dscp_marking_rule(self, rule_id, policy_id_or_name,
|
|
**kwargs):
|
|
policy_id = self.get_qos_policy_id(policy_id_or_name)
|
|
result = self.qos_dscp_client.update_dscp_marking_rule(
|
|
rule_id, policy_id, **kwargs)
|
|
return result.get('dscp_marking_rule', result)
|
|
|
|
def list_dscp_marking_rules(self, policy_id_or_name, **filters):
|
|
policy_id = self.get_qos_policy_id(policy_id_or_name)
|
|
result = self.qos_dscp_client.list_dscp_marking_rules(
|
|
policy_id, **filters)
|
|
return result.get('dscp_marking_rules', result)
|
|
|
|
def show_dscp_marking_rule(self, rule_id, policy_id_or_name, **fields):
|
|
policy_id = self.get_qos_policy_id(policy_id_or_name)
|
|
result = self.qos_dscp_client.show_dscp_marking_rule(
|
|
rule_id, policy_id, **fields)
|
|
return result.get('dscp_marking_rule', result)
|
|
|
|
def list_rule_types(self):
|
|
result = self.types_client.list_rule_types()
|
|
return result.get('rule_types', result)
|
|
|
|
#
|
|
# Designate Zone
|
|
#
|
|
def rand_zone_name(name='', prefix=None, suffix='.com.'):
|
|
"""Generate a random zone name
|
|
:param str name: The name that you want to include
|
|
:param prefix: the exact text to start the string. Defaults to "rand"
|
|
:param suffix: the exact text to end the string
|
|
:return: a random zone name e.g. example.org.
|
|
:rtype: string
|
|
"""
|
|
name = 'tempest'
|
|
name = data_utils.rand_name(name=name, prefix=prefix)
|
|
zone_name = name + suffix
|
|
return zone_name
|
|
|
|
def rand_email(self, zone_name):
|
|
"""Generate a random zone name
|
|
:return: a random zone name e.g. example.org.
|
|
:rtype: string
|
|
"""
|
|
email_id = 'example@%s' % str(zone_name).rstrip('.')
|
|
return email_id
|
|
|
|
def create_zone(self, name=None, email=None, description=None,
|
|
wait_until=False, tenant_id=None):
|
|
"""Create a zone with the specified parameters.
|
|
:param name: The name of the zone.
|
|
Default: Random Value
|
|
:param email: The email for the zone.
|
|
Default: Random Value
|
|
:param description: A description of the zone.
|
|
Default: Random Value
|
|
:param wait_until: Block until the zone reaches the desired status
|
|
:return: A tuple with the server response and the created zone.
|
|
"""
|
|
if name is None:
|
|
name = self.rand_zone_name()
|
|
zone = {
|
|
'name': name,
|
|
'email': email or self.rand_email(name),
|
|
'description': description or data_utils.rand_name('test-zone'),
|
|
}
|
|
_, body = self.zones_v2_client.create_zone(wait_until, **zone)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.delete_zone, body['id'])
|
|
return body
|
|
|
|
def delete_zone(self, uuid):
|
|
"""Deletes a zone having the specified UUID.
|
|
:param uuid: The unique identifier of the zone.
|
|
:return: A tuple with the server response and the response body.
|
|
"""
|
|
_, body = self.zones_v2_client.delete_zone(uuid)
|
|
return body
|
|
|
|
def show_zone(self, uuid):
|
|
"""Gets a specific zone.
|
|
:param uuid: Unique identifier of the zone in UUID format.
|
|
:return: Serialized zone as a dictionary.
|
|
"""
|
|
return self.zones_v2_client.show_zone(uuid)
|
|
|
|
def list_zones(self):
|
|
"""Gets a list of zones.
|
|
:return: Serialized zones as a list.
|
|
"""
|
|
return self.zones_v2_client.list_zones()
|
|
|
|
def list_record_set_zone(self, uuid, user=None):
|
|
"""list recordsets of a zone.
|
|
:param uuid: The unique identifier of the zone.
|
|
"""
|
|
body = self.zones_v2_client.list_recordset_zone(uuid)
|
|
self.assertGreater(len(body), 0)
|
|
return body
|
|
|
|
def set_ptr_record(self, region, fip_id, ptrd_name):
|
|
"""list ptr recordsets associated with floating ip.
|
|
:param fip_id: Unique FloatingIP ID.
|
|
"""
|
|
ptr_id = region + ":" + fip_id
|
|
ptrd = {
|
|
"ptrdname": ptrd_name,
|
|
"description": "This is a floating ip PTR Domain Name",
|
|
"ttl": 600
|
|
}
|
|
_, body = self.ptr_client.set_fip_ptr_record(ptr_id, **ptrd)
|
|
return body
|
|
|
|
def show_ptr_record(self, region, fip_id, user=None):
|
|
"""list ptr recordsets associated with floating ip.
|
|
:param fip_id: Unique FloatingIP ID.
|
|
"""
|
|
ptr_id = region + ":" + fip_id
|
|
body = self.ptr_client.show_ptr_record(ptr_id)
|
|
return body
|
|
|
|
def _get_uuid(self, href):
|
|
return href.split('/')[-1]
|
|
|
|
def create_barbican_secret(self, **kwargs):
|
|
"""
|
|
Create barbican secret
|
|
"""
|
|
result = self.secret_client.create_secret(**kwargs)
|
|
uuid = self._get_uuid(result['secret_ref'])
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.secret_client.delete_secret, uuid)
|
|
self.secret_client.add_acl_user_secret(
|
|
secret_id=uuid, user_id=CONF.barbican.barbican_user_id)
|
|
return result
|
|
|
|
def create_barbican_container(self, **kwargs):
|
|
"""
|
|
Create barbican secret container
|
|
"""
|
|
result = self.container_client.create_container(**kwargs)
|
|
uuid = self._get_uuid(result['container_ref'])
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.container_client.delete_container, uuid)
|
|
self.container_client.add_acl_user_containers(
|
|
secret_id=uuid, user_id=CONF.barbican.barbican_user_id)
|
|
return result
|
|
|
|
def create_barbican_secret_conatainer(self, cert_file, key_file):
|
|
"""
|
|
Create barbican secrets with provided
|
|
cert file and key file.
|
|
Create barbican secret container with
|
|
created secrets and return secrets and container
|
|
"""
|
|
cert_file = open(cert_file, "r")
|
|
cert_content = cert_file.read()
|
|
secret_name1 = data_utils.rand_name(name='tempest-cert-secret')
|
|
kwargs = {"secret_type": 'passphrase',
|
|
"algorithm": constants.ALGORITHM,
|
|
"payload_content_type": constants.PAYLOAD_CONTENT_TYPE,
|
|
"mode": constants.MODE,
|
|
"bit_length": constants.BIT_LENGTH,
|
|
"payload": cert_content,
|
|
"name": secret_name1}
|
|
barbican_secret1 = self.create_barbican_secret(**kwargs)
|
|
cert_file = open(key_file, "r")
|
|
cert_content = cert_file.read()
|
|
secret_name2 = data_utils.rand_name(name='tempest-key-secret')
|
|
kwargs = {"secret_type": 'passphrase',
|
|
"algorithm": constants.ALGORITHM,
|
|
"payload_content_type": constants.PAYLOAD_CONTENT_TYPE,
|
|
"mode": constants.MODE,
|
|
"bit_length": constants.BIT_LENGTH,
|
|
"payload": cert_content,
|
|
"name": secret_name2}
|
|
barbican_secret2 = self.create_barbican_secret(**kwargs)
|
|
container_name = data_utils.rand_name(name='tempest-container')
|
|
kwargs = {"type": constants.CONTAINER_TYPE,
|
|
"name": container_name,
|
|
"secret_refs": [{"secret_ref": barbican_secret1
|
|
['secret_ref'],
|
|
"name": 'certificate'},
|
|
{"secret_ref": barbican_secret2
|
|
['secret_ref'],
|
|
"name": 'private_key'}]}
|
|
barbican_container = self.create_barbican_container(**kwargs)
|
|
secret_container_dict = dict(secret_1=barbican_secret1,
|
|
secret_2=barbican_secret2,
|
|
secret_container=barbican_container)
|
|
return secret_container_dict
|
|
|
|
def check_certificate_at_backend(self, should_present=True,
|
|
cert_conent=None):
|
|
"""
|
|
Check barbican certificate at backend
|
|
"""
|
|
# check certificate at backend
|
|
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
|
# nsx api call to get certificates from backend
|
|
certs = self.nsx.get_certificates()
|
|
Present = "False"
|
|
for cert in certs:
|
|
if cert['pem_encoded'] == cert_conent:
|
|
Present = "True"
|
|
if should_present:
|
|
self.assertIn(Present, "True")
|
|
else:
|
|
self.assertIn(Present, "False")
|
|
|
|
def delete_octavia_lb_resources(self, lb_id):
|
|
"""Deletion of lbaas resources.
|
|
|
|
:param lb_id: Load Balancer ID.
|
|
|
|
"""
|
|
oc_client = self.octavia_admin_client
|
|
try:
|
|
statuses = oc_client.show_octavia_lb_status_tree(lb_id)
|
|
statuses = statuses.get('statuses', statuses)
|
|
lb = statuses.get('loadbalancer')
|
|
for listener in lb.get('listeners'):
|
|
for pool in listener.get('pools'):
|
|
self.delete_octavia_lb_pool_resources(lb_id, pool)
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.octavia_admin_listener_client.delete_octavia_listener,
|
|
listener.get('id'))
|
|
self.wait_for_octavia_loadbalancer_status(lb_id)
|
|
# delete pools not attached to listener, but loadbalancer
|
|
tbdel = self.octavia_admin_pools_client.\
|
|
list_octavia_pools()['pools']
|
|
lb_pools = [pool for pool in tbdel
|
|
if pool['loadbalancers'][0]['id'] == lb_id]
|
|
for i in lb_pools:
|
|
pool_id = i['id']
|
|
self.octavia_admin_pools_client.delete_octavia_pool(pool_id)
|
|
self.wait_for_octavia_loadbalancer_status(lb_id)
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
oc_client.delete_octavia_load_balancer, lb_id)
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
is_delete_op=True)
|
|
except lib_exc.NotFound:
|
|
pass
|
|
|
|
def delete_octavia_lb_pool_resources(self, lb_id, pool):
|
|
"""Deletion of lbaas pool resources.
|
|
|
|
:param lb_id: Load Balancer ID.
|
|
:param pool: pool information.
|
|
|
|
"""
|
|
pool_id = pool.get('id')
|
|
self.wait_for_octavia_loadbalancer_status(lb_id)
|
|
test_utils.call_and_ignore_notfound_exc(
|
|
self.octavia_admin_pools_client.delete_octavia_pool,
|
|
pool_id)
|
|
self.wait_for_octavia_loadbalancer_status(lb_id)
|
|
|
|
def wait_for_octavia_loadbalancer_status(self, lb_id):
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
|
|
def create_project_octavia(self, protocol_type,
|
|
protocol_port, lb_algorithm,
|
|
hm_type=None, member_count=2,
|
|
max_vms=None, weight=None,
|
|
fip_disassociate=None,
|
|
pool_protocol=None, pool_port=None,
|
|
vip_subnet_id=None,
|
|
lb_id=None, count=None,
|
|
clean_up=None, vip_net_id=None,
|
|
delay=None, max_retries=None,
|
|
timeout=None, default_pool=False,
|
|
vip_port_id=None, persistence=False,
|
|
persistence_type=None,
|
|
session_persistence=None,
|
|
persistence_cookie_name=None,
|
|
allowed_cidrs=None, l7policy=False, action=None,
|
|
redirect_url=None, l7rule=False,
|
|
compare_type=None,
|
|
type=None, value=None, barbican=False,
|
|
barbican_container=None, invert=None,
|
|
qos_policy_id=None, external=None,
|
|
external_subnet=None, create_fip=None,
|
|
no_cleanup=False, ipv6=False):
|
|
count = 0
|
|
lb_name = None
|
|
if persistence:
|
|
session_persistence = {}
|
|
if persistence_type:
|
|
session_persistence["type"] = persistence_type
|
|
if persistence_cookie_name:
|
|
session_persistence["cookie_name"] = persistence_cookie_name
|
|
if lb_id is None:
|
|
lb_name = data_utils.rand_name(self.namestart)
|
|
if external:
|
|
vip_subnet_id = external_subnet
|
|
self.loadbalancer = self.\
|
|
octavia_admin_client.\
|
|
create_octavia_load_balancer(
|
|
name=lb_name,
|
|
vip_subnet_id=external_subnet)['loadbalancer']
|
|
else:
|
|
self.loadbalancer = self.\
|
|
octavia_admin_client.\
|
|
create_octavia_load_balancer(
|
|
name=lb_name,
|
|
vip_subnet_id=vip_subnet_id,
|
|
vip_network_id=vip_net_id,
|
|
vip_port_id=vip_port_id,
|
|
vip_qos_policy_id=qos_policy_id,
|
|
admin_state_up=True)['loadbalancer']
|
|
lb_id = self.loadbalancer['id']
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
if not no_cleanup:
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.octavia_admin_client.
|
|
delete_octavia_load_balancer, lb_id)
|
|
if barbican:
|
|
tls_id = barbican_container["container_ref"]
|
|
else:
|
|
tls_id = None
|
|
self.listener = self.octavia_admin_listener_client.\
|
|
create_octavia_listener(loadbalancer_id=lb_id,
|
|
protocol=protocol_type,
|
|
protocol_port=protocol_port,
|
|
name=lb_name,
|
|
allowed_cidrs=allowed_cidrs,
|
|
default_tls_container_ref=tls_id
|
|
)['listener']
|
|
if not no_cleanup:
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.octavia_admin_listener_client.
|
|
delete_octavia_listener,
|
|
self.listener['id'])
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
if l7policy and action != 'REDIRECT_TO_POOL':
|
|
l7p = self.octavia_admin_l7policies_client.\
|
|
create_octavia_l7policies(listener_id=self.listener['id'],
|
|
name='l7p', action=action,
|
|
redirect_url=redirect_url)
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
if l7rule:
|
|
l7p_id = l7p['l7policy']['id']
|
|
self.octavia_admin_l7rules_client.create_octavia_l7rules(
|
|
l7policy_id=l7p_id, compare_type=compare_type, value=value,
|
|
type=type, invert=invert)
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
if default_pool:
|
|
self.pool = self.octavia_admin_pools_client.\
|
|
create_octavia_pool(loadbalancer_id=lb_id,
|
|
lb_algorithm=lb_algorithm,
|
|
protocol=protocol_type,
|
|
name=lb_name)
|
|
pool_id = self.pool['pool']['id']
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
self.octavia_admin_listener_client.\
|
|
update_octavia_listener(default_pool_id=pool_id,
|
|
listener_id=self.listener['id'])
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
else:
|
|
if barbican:
|
|
protocol_type = pool_protocol
|
|
self.pool = self.octavia_admin_pools_client.\
|
|
create_octavia_pool(listener_id=self.listener['id'],
|
|
lb_algorithm=lb_algorithm,
|
|
protocol=protocol_type,
|
|
name=lb_name,
|
|
session_persistence=session_persistence)
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
pool_id = self.pool['pool']['id']
|
|
if not no_cleanup:
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.octavia_admin_pools_client.
|
|
delete_octavia_pool,
|
|
pool_id)
|
|
if hm_type:
|
|
self.healthmonitor = self.octavia_hm_client.\
|
|
create_octavia_hm(pool_id=pool_id, type=hm_type, delay=delay,
|
|
timeout=timeout, max_retries=max_retries,
|
|
name=lb_name)
|
|
if external:
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id,
|
|
operating_status='OFFLINE')
|
|
else:
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
if not no_cleanup:
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.octavia_hm_client.
|
|
delete_octavia_hm,
|
|
self.healthmonitor['healthmonitor']['id'])
|
|
self.members = []
|
|
for server_name in self.topology_servers.keys():
|
|
if count < member_count:
|
|
if create_fip:
|
|
fip_data = self.servers_details[server_name].\
|
|
floating_ips[0]
|
|
if fip_disassociate is True:
|
|
x = str(
|
|
self.topology_servers[server_name]['addresses'].
|
|
keys()).split("'")[1]
|
|
m = self.topology_servers[server_name]
|
|
fixed_ip_address = m['addresses'][x][0]['addr']
|
|
else:
|
|
if external:
|
|
fixed_ip_address = fip_data['floating_ip_address']
|
|
else:
|
|
fixed_ip_address = fip_data['fixed_ip_address']
|
|
if fip_disassociate is None:
|
|
kwargs = dict(port_id=None)
|
|
self.cmgr_adm.floating_ips_client.\
|
|
update_floatingip(fip_data['id'],
|
|
**kwargs)['floatingip']
|
|
else:
|
|
net_name = self.servers_details[server_name][2][0]['name']
|
|
fixed_ip_address = self.servers_details[
|
|
server_name][0]['addresses'][net_name][0]['addr']
|
|
if ipv6:
|
|
if self.servers_details[server_name][0]['addresses'][
|
|
net_name][0]['version'] == 6:
|
|
fixed_ip_address = self.servers_details[
|
|
server_name][0]['addresses'][net_name][0]['addr']
|
|
else:
|
|
fixed_ip_address = self.servers_details[
|
|
server_name][0]['addresses'][net_name][1]['addr']
|
|
if weight:
|
|
weight += count
|
|
if barbican:
|
|
member = self.octavia_admin_members_client.\
|
|
create_octavia_member(pool_id,
|
|
subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=pool_port,
|
|
weight=weight)
|
|
else:
|
|
member = self.octavia_admin_members_client.\
|
|
create_octavia_member(pool_id,
|
|
subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port,
|
|
weight=weight)
|
|
else:
|
|
member = self.octavia_admin_members_client.\
|
|
create_octavia_member(pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port)
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
self.members.append(member)
|
|
self.server_names.append(server_name)
|
|
count += 1
|
|
else:
|
|
break
|
|
self.cmgr_adm.ports_client.\
|
|
update_port(self.loadbalancer['vip_port_id'],
|
|
security_groups=[self.sg['id']])
|
|
# create floatingip for public network
|
|
if create_fip is False:
|
|
self.vip_ip_address = self.loadbalancer['vip_port_id']
|
|
vip_fip = self.vip_ip_address
|
|
else:
|
|
self.cmgr_adm.ports_client.update_port(
|
|
self.loadbalancer['vip_port_id'],
|
|
security_groups=[
|
|
self.sg['id']])
|
|
if not external:
|
|
vip_fip = self.create_floatingip(
|
|
self.loadbalancer,
|
|
client=self.cmgr_adm.floating_ips_client,
|
|
port_id=self.loadbalancer['vip_port_id'])
|
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
|
else:
|
|
self.vip_ip_address = self.loadbalancer['vip_address']
|
|
vip_fip = []
|
|
return dict(lb_id=lb_id,
|
|
vip_address=self.vip_ip_address,
|
|
pool_id=pool_id,
|
|
members=self.members,
|
|
listener_id=self.listener['id'],
|
|
vip_fip=vip_fip,
|
|
lb_vip=self.loadbalancer['vip_port_id'],
|
|
loadbalancer=self.loadbalancer)
|
|
|
|
def check_router_components_on_edge(self, router):
|
|
edge_ips = CONF.nsx_edge.nsx_edge_ip
|
|
nsx_dr_rtr_name = "DR-" + router['name']
|
|
dr_present = False
|
|
nsx_sr_rtr_name = "SR-" + router['name']
|
|
sr_present = False
|
|
for nsx_edge_ip in edge_ips:
|
|
ssh_client = remote_client.RemoteClient(
|
|
nsx_edge_ip, 'root', 'Admin!23Admin')
|
|
command = "nsxcli -c get logical-router | awk {'print $4'}"
|
|
data = ssh_client.exec_command(command)
|
|
result = data.split('\n')
|
|
present = False
|
|
present = [True for el in result if nsx_sr_rtr_name in el]
|
|
if present:
|
|
sr_present = True
|
|
present = False
|
|
present = [True for el in result if nsx_dr_rtr_name in el]
|
|
if present:
|
|
dr_present = True
|
|
return [{'dr_present': dr_present}, {'sr_present': sr_present}]
|
|
|
|
def create_project_octavia_scale(self, protocol_type,
|
|
protocol_port, lb_algorithm,
|
|
hm_type=None, member_count=2,
|
|
max_vms=None, weight=None,
|
|
fip_disassociate=None,
|
|
pool_protocol=None, pool_port=None,
|
|
vip_subnet_id=None,
|
|
lb_id=None, count=None,
|
|
clean_up=None, vip_net_id=None,
|
|
delay=None, max_retries=None,
|
|
timeout=None, default_pool=False,
|
|
vip_port_id=None, scale=None,
|
|
listener_count=None, pool_count=None,
|
|
lb_pool=False, allowed_cidrs=None):
|
|
count = 0
|
|
lb_name = None
|
|
lb_name = data_utils.rand_name(self.namestart)
|
|
if not listener_count:
|
|
listener_count = 1
|
|
if not pool_count:
|
|
pool_count = 1
|
|
self.pools = []
|
|
for i in range(scale):
|
|
self.loadbalancer = self.\
|
|
octavia_admin_client.\
|
|
create_octavia_load_balancer(name=lb_name,
|
|
vip_subnet_id=vip_subnet_id,
|
|
vip_network_id=vip_net_id,
|
|
)['loadbalancer']
|
|
lb_id = self.loadbalancer['id']
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
for lc in range(listener_count):
|
|
protocol_port = str(int(protocol_port) + 1)
|
|
self.listener = self.octavia_admin_listener_client.\
|
|
create_octavia_listener(loadbalancer_id=lb_id,
|
|
protocol=protocol_type,
|
|
protocol_port=protocol_port,
|
|
name=lb_name,
|
|
allowed_cidrs=allowed_cidrs
|
|
)['listener']
|
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
|
l_id = self.listener['id']
|
|
for x in range(pool_count):
|
|
if not lb_pool:
|
|
self.pool = self.octavia_admin_pools_client.\
|
|
create_octavia_pool(listener_id=l_id,
|
|
lb_algorithm=lb_algorithm,
|
|
protocol=protocol_type,
|
|
name=lb_name)
|
|
else:
|
|
self.pool = self.octavia_admin_pools_client.\
|
|
create_octavia_pool(loadbalancer_id=lb_id,
|
|
lb_algorithm=lb_algorithm,
|
|
protocol=protocol_type,
|
|
name=lb_name)
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
pool_id = self.pool['pool']['id']
|
|
self.octavia_admin_listener_client.\
|
|
update_octavia_listener(listener_id=l_id,
|
|
default_pool_id=pool_id)
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
if hm_type:
|
|
self.healthmonitor = self.octavia_hm_client.\
|
|
create_octavia_hm(pool_id=pool_id,
|
|
type=hm_type, delay=2,
|
|
timeout=2, max_retries=2,
|
|
name=lb_name)
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
self.members = []
|
|
count = 0
|
|
for server_name in self.topology_servers.keys():
|
|
if count < member_count:
|
|
fip_data = self.servers_details[server_name].\
|
|
floating_ips[0]
|
|
fixed_ip_address = fip_data['fixed_ip_address']
|
|
if fip_disassociate is None:
|
|
kwargs = dict(port_id=None)
|
|
self.cmgr_adm.floating_ips_client.\
|
|
update_floatingip(fip_data['id'],
|
|
**kwargs)['floatingip']
|
|
|
|
if weight:
|
|
weight += count
|
|
member = self.octavia_admin_members_client.\
|
|
create_octavia_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port,
|
|
weight=weight)
|
|
else:
|
|
member = self.octavia_admin_members_client.\
|
|
create_octavia_member(
|
|
pool_id, subnet_id=vip_subnet_id,
|
|
address=fixed_ip_address,
|
|
protocol_port=protocol_port)
|
|
self.octavia_admin_client.\
|
|
wait_for_load_balancer_status(lb_id)
|
|
self.members.append(member)
|
|
self.server_names.append(server_name)
|
|
count += 1
|
|
else:
|
|
break
|
|
self.cmgr_adm.ports_client.\
|
|
update_port(self.loadbalancer['vip_port_id'],
|
|
security_groups=[self.sg['id']])
|
|
# create floatingip for public network
|
|
self.cmgr_adm.ports_client.update_port(
|
|
self.loadbalancer['vip_port_id'],
|
|
security_groups=[
|
|
self.sg['id']])
|
|
vip_fip = self.create_floatingip(
|
|
self.loadbalancer,
|
|
client=self.cmgr_adm.floating_ips_client,
|
|
port_id=self.loadbalancer['vip_port_id'])
|
|
self.vip_ip_address = vip_fip['floating_ip_address']
|
|
return 1
|
|
|
|
def get_status_lb_resources(self, lb_id):
|
|
status = self.octavia_admin_client.show_octavia_lb_status_tree(lb_id)
|
|
statuses = status['statuses']
|
|
status_dict = {}
|
|
status_dict['lb_operating'] = \
|
|
statuses['loadbalancer']['operating_status']
|
|
status_dict['provisioning_status'] = \
|
|
statuses['loadbalancer']['provisioning_status']
|
|
listeners = status['statuses']['loadbalancer']['listeners']
|
|
status_dict['lstnr_op_status'] = \
|
|
listeners[0]['operating_status']
|
|
status_dict['lstnr_pr_status'] = \
|
|
listeners[0]['provisioning_status']
|
|
pools, hms = listeners[0]['pools'][0], \
|
|
listeners[0]['pools'][0]['health_monitor']
|
|
status_dict['pool_pr_status'] = pools['provisioning_status']
|
|
status_dict['pool_op_status'] = pools['operating_status']
|
|
status_dict['hm_pr_status'] = hms['provisioning_status']
|
|
status_dict['hm_op_status'] = hms['operating_status']
|
|
noerr = True
|
|
for stat in list(status_dict.values()):
|
|
if stat not in ('ACTIVE', 'ONLINE'):
|
|
noerr = False
|
|
break
|
|
continue
|
|
return noerr, status_dict
|
|
|
|
def deploy_octavia_topology(self, no_of_servers=2,
|
|
image_id=None, slaac=False):
|
|
kwargs = {'name': "router_lbaas",
|
|
'external_gateway_info':
|
|
{"network_id": CONF.network.public_network_id}}
|
|
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.cmgr_adm.routers_client.delete_router,
|
|
router_lbaas['router']['id'])
|
|
networks_client = self.cmgr_adm.networks_client
|
|
name = "network_lbaas_1"
|
|
network_lbaas_1 = self.\
|
|
create_topology_network(name,
|
|
networks_client=networks_client)
|
|
# verify realization state of network
|
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
|
nsx_network = self.nsxp.get_logical_switch(network_lbaas_1['name'],
|
|
network_lbaas_1['id'])
|
|
self.assertTrue(self.nsxp.verify_realized_state(nsx_network))
|
|
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
|
sec_client = self.cmgr_adm.security_groups_client
|
|
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
|
security_group_rules_client=sec_rule_client,
|
|
security_groups_client=sec_client)
|
|
self.sg = self.create_topology_security_group(**kwargs)
|
|
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
|
port_range_min=constants.HTTP_PORT,
|
|
port_range_max=constants.HTTP_PORT, ),
|
|
dict(direction='ingress', protocol='tcp',
|
|
port_range_min=443, port_range_max=443, )]
|
|
t_id = network_lbaas_1['tenant_id']
|
|
for rule in lbaas_rules:
|
|
self.add_security_group_rule(self.sg, rule,
|
|
secclient=sec_client,
|
|
ruleclient=sec_rule_client,
|
|
tenant_id=t_id)
|
|
if slaac:
|
|
address_cidr = CONF.network.project_network_v6_cidr
|
|
address_prefixlen = CONF.network.project_network_v6_mask_bits
|
|
if ((address_prefixlen >= 126)):
|
|
msg = ("Subnet %s isn't large" % address_cidr)
|
|
raise lib_exc.InvalidConfiguration(msg)
|
|
body = {'ip_version': 6, 'ipv6_ra_mode': 'slaac',
|
|
'ipv6_address_mode': 'slaac', 'cidr': '2001:db8::/64',
|
|
"network_id": network_lbaas_1['id'],
|
|
'allocation_pools': [{
|
|
'start': str(address_cidr).split('/')[0] + '2',
|
|
'end': str(address_cidr).split('/')[0] + '70'}]}
|
|
create_floating_ip = False
|
|
else:
|
|
body = {"network_id": network_lbaas_1['id'],
|
|
"allocation_pools":
|
|
[{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
|
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
|
create_floating_ip = True
|
|
# Verify realization state of sg after rule add
|
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
|
sg = self.nsxp.get_firewall_section(self.sg['name'], self.sg['id'],
|
|
os_tenant_id='default')
|
|
self.assertTrue(self.nsxp.verify_realized_state(sg))
|
|
subnet_client = self.cmgr_adm.subnets_client
|
|
subnet_lbaas = subnet_client.create_subnet(**body)
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
subnet_client.delete_subnet,
|
|
subnet_lbaas['subnet']['id'])
|
|
self.cmgr_adm.routers_client.\
|
|
add_router_interface(router_lbaas['router']['id'],
|
|
subnet_id=subnet_lbaas['subnet']['id'])
|
|
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
|
self.cmgr_adm.routers_client.remove_router_interface,
|
|
router_lbaas['router']['id'],
|
|
subnet_id=subnet_lbaas['subnet']['id'])
|
|
for instance in range(0, no_of_servers):
|
|
self.create_topology_instance(
|
|
"server_lbaas_%s" % instance, [network_lbaas_1],
|
|
security_groups=[{'name': self.sg['name']}],
|
|
image_id=image_id, clients=self.cmgr_adm,
|
|
create_floating_ip=create_floating_ip)
|
|
# Verify realization status of network on vm creation
|
|
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
|
nsx_network = self.nsxp.get_logical_switch(network_lbaas_1['name'],
|
|
network_lbaas_1['id'])
|
|
self.assertTrue(self.nsxp.verify_realized_state(nsx_network))
|
|
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
|
network=network_lbaas_1)
|
|
|
|
def verifyBackendStatus_octavia_lb(self, status):
|
|
activeStatus = ("UP", "ACTIVE")
|
|
if status['service_status'] not in activeStatus:
|
|
return False
|
|
for vs in status['virtual_servers']:
|
|
if vs['status'] not in activeStatus:
|
|
return False
|
|
for pool in status['pools']:
|
|
if pool['status'] not in activeStatus:
|
|
return False
|
|
for member in pool['members']:
|
|
if member['status'] not in activeStatus:
|
|
return False
|
|
return True
|