Merge "Datera 2.3 driver update"

This commit is contained in:
Jenkins 2017-01-24 00:38:12 +00:00 committed by Gerrit Code Review
commit b15e84f594
10 changed files with 3119 additions and 1220 deletions

View File

@ -70,7 +70,8 @@ from cinder.volume.drivers.coprhd import common as \
cinder_volume_drivers_coprhd_common cinder_volume_drivers_coprhd_common
from cinder.volume.drivers.coprhd import scaleio as \ from cinder.volume.drivers.coprhd import scaleio as \
cinder_volume_drivers_coprhd_scaleio cinder_volume_drivers_coprhd_scaleio
from cinder.volume.drivers import datera as cinder_volume_drivers_datera from cinder.volume.drivers.datera import datera_iscsi as \
cinder_volume_drivers_datera_dateraiscsi
from cinder.volume.drivers.dell import dell_storagecenter_common as \ from cinder.volume.drivers.dell import dell_storagecenter_common as \
cinder_volume_drivers_dell_dellstoragecentercommon cinder_volume_drivers_dell_dellstoragecentercommon
from cinder.volume.drivers.dell_emc.scaleio import driver as \ from cinder.volume.drivers.dell_emc.scaleio import driver as \
@ -269,7 +270,7 @@ def list_opts():
cinder_volume_drivers_coho.coho_opts, cinder_volume_drivers_coho.coho_opts,
cinder_volume_drivers_coprhd_common.volume_opts, cinder_volume_drivers_coprhd_common.volume_opts,
cinder_volume_drivers_coprhd_scaleio.scaleio_opts, cinder_volume_drivers_coprhd_scaleio.scaleio_opts,
cinder_volume_drivers_datera.d_opts, cinder_volume_drivers_datera_dateraiscsi.d_opts,
cinder_volume_drivers_dell_dellstoragecentercommon. cinder_volume_drivers_dell_dellstoragecentercommon.
common_opts, common_opts,
cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts, cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts,

View File

@ -19,27 +19,27 @@ from cinder import context
from cinder import exception from cinder import exception
from cinder import test from cinder import test
from cinder.volume import configuration as conf from cinder.volume import configuration as conf
from cinder.volume.drivers import datera from cinder.volume.drivers.datera import datera_iscsi as datera
from cinder.volume import volume_types from cinder.volume import volume_types
datera.DEFAULT_SI_SLEEP = 0 datera.datc.DEFAULT_SI_SLEEP = 0
URL_TEMPLATES = datera.URL_TEMPLATES datera.datc.DEFAULT_SNAP_SLEEP = 0
OS_PREFIX = datera.OS_PREFIX URL_TEMPLATES = datera.datc.URL_TEMPLATES
UNMANAGE_PREFIX = datera.UNMANAGE_PREFIX OS_PREFIX = datera.datc.OS_PREFIX
UNMANAGE_PREFIX = datera.datc.UNMANAGE_PREFIX
class DateraVolumeTestCase(test.TestCase): class DateraVolumeTestCasev2(test.TestCase):
def setUp(self): def setUp(self):
super(DateraVolumeTestCase, self).setUp() super(DateraVolumeTestCasev2, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration) self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.san_ip = '127.0.0.1' self.cfg.san_ip = '127.0.0.1'
self.cfg.san_is_local = True self.cfg.san_is_local = True
self.cfg.datera_api_token = 'secret' self.cfg.datera_api_token = 'secret'
self.cfg.datera_api_port = '7717' self.cfg.datera_api_port = '7717'
self.cfg.datera_api_version = '1'
self.cfg.datera_num_replicas = '2' self.cfg.datera_num_replicas = '2'
self.cfg.datera_503_timeout = 0.01 self.cfg.datera_503_timeout = 0.01
self.cfg.datera_503_interval = 0.001 self.cfg.datera_503_interval = 0.001
@ -47,6 +47,9 @@ class DateraVolumeTestCase(test.TestCase):
self.cfg.datera_debug = False self.cfg.datera_debug = False
self.cfg.san_login = 'user' self.cfg.san_login = 'user'
self.cfg.san_password = 'pass' self.cfg.san_password = 'pass'
self.cfg.datera_tenant_id = 'test-tenant'
self.cfg.driver_client_cert = None
self.cfg.driver_client_cert_key = None
mock_exec = mock.Mock() mock_exec = mock.Mock()
mock_exec.return_value = ('', '') mock_exec.return_value = ('', '')
@ -57,8 +60,15 @@ class DateraVolumeTestCase(test.TestCase):
self.driver.configuration.get = _config_getter self.driver.configuration.get = _config_getter
self.volume = _stub_volume() self.volume = _stub_volume()
self.api_patcher = mock.patch('cinder.volume.drivers.datera.' self.api_patcher = mock.patch('cinder.volume.drivers.datera.'
'DateraDriver._issue_api_request') 'datera_iscsi.DateraDriver.'
'_issue_api_request')
self.driver._request = mock.Mock()
m = mock.Mock()
m.json.return_value = {'api_versions': ['v2']}
self.driver._request.return_value = m
self.mock_api = self.api_patcher.start() self.mock_api = self.api_patcher.start()
self._apiv = "2"
self._tenant = None
self.addCleanup(self.api_patcher.stop) self.addCleanup(self.api_patcher.stop)
@ -124,16 +134,24 @@ class DateraVolumeTestCase(test.TestCase):
self.assertIsNone(self.driver.create_cloned_volume(self.volume, self.assertIsNone(self.driver.create_cloned_volume(self.volume,
source_volume)) source_volume))
@mock.patch.object(datera.DateraDriver, 'extend_volume') def test_create_cloned_volume_success_larger(self):
def test_create_cloned_volume_success_larger(self, mock_extend):
cloned_volume = _stub_volume( cloned_volume = _stub_volume(
id='7f91abfa-7964-41ed-88fc-207c3a290b4f', id='7f91abfa-7964-41ed-88fc-207c3a290b4f',
display_name='foo', display_name='foo',
size=2 size=2
) )
self.driver.create_cloned_volume(cloned_volume, self.volume)
mock_extend.assert_called_once_with(cloned_volume, mock_extend = mock.Mock()
cloned_volume['size']) if self._apiv == '2':
self.driver._extend_volume_2 = mock_extend
self.driver.create_cloned_volume(cloned_volume, self.volume)
mock_extend.assert_called_with(
cloned_volume, cloned_volume['size'])
else:
self.driver._extend_volume_2_1 = mock_extend
self.driver.create_cloned_volume(cloned_volume, self.volume)
mock_extend.assert_called_with(
cloned_volume, cloned_volume['size'])
def test_create_cloned_volume_fails(self): def test_create_cloned_volume_fails(self):
self.mock_api.side_effect = exception.DateraAPIException self.mock_api.side_effect = exception.DateraAPIException
@ -149,7 +167,22 @@ class DateraVolumeTestCase(test.TestCase):
self.assertIsNone(self.driver.delete_volume(self.volume)) self.assertIsNone(self.driver.delete_volume(self.volume))
def test_delete_volume_not_found(self): def test_delete_volume_not_found(self):
self.mock_api.side_effect = exception.NotFound if self._apiv == '2':
self.mock_api.side_effect = exception.NotFound
else:
self.mock_api.side_effect = [
self._generate_fake_api_request("tenant"),
{},
exception.NotFound,
self._generate_fake_api_request()(
"acl_policy", api_version=self._apiv, tenant=self._tenant),
self._generate_fake_api_request()(
"ig_group", api_version=self._apiv, tenant=self._tenant),
{},
{},
{},
{},
]
self.assertIsNone(self.driver.delete_volume(self.volume)) self.assertIsNone(self.driver.delete_volume(self.volume))
def test_delete_volume_fails(self): def test_delete_volume_fails(self):
@ -196,10 +229,9 @@ class DateraVolumeTestCase(test.TestCase):
'data': { 'data': {
'target_discovered': False, 'target_discovered': False,
'volume_id': self.volume['id'], 'volume_id': self.volume['id'],
'target_iqn': ('iqn.2013-05.com.daterainc:c20aba21-6ef6-' 'target_iqn': ('iqn.2013-05.com.daterainc:tc:01:sn:'
'446b-b374-45733b4883ba--ST--storage-1:01:' '3bbb080aab7d9abc'),
'sn:34e5b20fbadd3abb'), 'target_portal': '172.28.41.63:3260',
'target_portal': '172.28.94.11:3260',
'target_lun': 0, 'target_lun': 0,
'discard': False}} 'discard': False}}
self.assertEqual(expected, self.assertEqual(expected,
@ -215,16 +247,29 @@ class DateraVolumeTestCase(test.TestCase):
connector) connector)
def test_detach_volume_success(self): def test_detach_volume_success(self):
self.mock_api.side_effect = [ if self._apiv == '2':
{}, self.mock_api.side_effect = [
self._generate_fake_api_request()( {},
"acl_policy"), self._generate_fake_api_request()(
self._generate_fake_api_request()( "acl_policy", api_version=self._apiv, tenant=self._tenant),
"ig_group"), self._generate_fake_api_request()(
{}, "ig_group", api_version=self._apiv, tenant=self._tenant),
{}, {},
{}, {},
{}] {},
{}]
else:
self.mock_api.side_effect = [
{},
{},
self._generate_fake_api_request()(
"acl_policy", api_version=self._apiv, tenant=self._tenant),
self._generate_fake_api_request()(
"ig_group", api_version=self._apiv, tenant=self._tenant),
{},
{},
{},
{}]
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume = _stub_volume(status='in-use') volume = _stub_volume(status='in-use')
self.assertIsNone(self.driver.detach_volume(ctxt, volume)) self.assertIsNone(self.driver.detach_volume(ctxt, volume))
@ -237,13 +282,27 @@ class DateraVolumeTestCase(test.TestCase):
self.driver.detach_volume, ctxt, volume) self.driver.detach_volume, ctxt, volume)
def test_detach_volume_not_found(self): def test_detach_volume_not_found(self):
self.mock_api.side_effect = exception.NotFound if self._apiv == '2':
self.mock_api.side_effect = exception.NotFound
else:
self.mock_api.side_effect = [
self._generate_fake_api_request("tenant"),
exception.NotFound,
self._generate_fake_api_request()(
"acl_policy", api_version=self._apiv, tenant=self._tenant),
self._generate_fake_api_request()(
"ig_group", api_version=self._apiv, tenant=self._tenant),
{},
{},
{},
{}]
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume = _stub_volume(status='in-use') volume = _stub_volume(status='in-use')
self.assertIsNone(self.driver.detach_volume(ctxt, volume)) self.assertIsNone(self.driver.detach_volume(ctxt, volume))
def test_create_snapshot_success(self): def test_create_snapshot_success(self):
snapshot = _stub_snapshot(volume_id=self.volume['id']) snapshot = _stub_snapshot(volume_id=self.volume['id'])
self.mock_api.side_effect = self._generate_fake_api_request()
self.assertIsNone(self.driver.create_snapshot(snapshot)) self.assertIsNone(self.driver.create_snapshot(snapshot))
def test_create_snapshot_fails(self): def test_create_snapshot_fails(self):
@ -257,7 +316,15 @@ class DateraVolumeTestCase(test.TestCase):
self.assertIsNone(self.driver.delete_snapshot(snapshot)) self.assertIsNone(self.driver.delete_snapshot(snapshot))
def test_delete_snapshot_not_found(self): def test_delete_snapshot_not_found(self):
self.mock_api.side_effect = [stub_return_snapshots, exception.NotFound] if self._apiv == '2':
self.mock_api.side_effect = [
stub_return_snapshots,
exception.NotFound]
else:
self.mock_api.side_effect = [
self._generate_fake_api_request("tenant"),
stub_return_snapshots_21,
exception.NotFound]
snapshot = _stub_snapshot(self.volume['id'], volume_id="test") snapshot = _stub_snapshot(self.volume['id'], volume_id="test")
self.assertIsNone(self.driver.delete_snapshot(snapshot)) self.assertIsNone(self.driver.delete_snapshot(snapshot))
@ -269,7 +336,17 @@ class DateraVolumeTestCase(test.TestCase):
def test_create_volume_from_snapshot_success(self): def test_create_volume_from_snapshot_success(self):
snapshot = _stub_snapshot(volume_id=self.volume['id']) snapshot = _stub_snapshot(volume_id=self.volume['id'])
self.mock_api.side_effect = [stub_return_snapshots, None] if self._apiv == '2':
self.mock_api.side_effect = [
stub_return_snapshots,
list(stub_return_snapshots.values())[0],
None]
else:
self.mock_api.side_effect = [
self._generate_fake_api_request("tenant"),
stub_return_snapshots_21,
{'data': stub_return_snapshots_21['data'][0]},
None]
self.assertIsNone( self.assertIsNone(
self.driver.create_volume_from_snapshot(self.volume, snapshot)) self.driver.create_volume_from_snapshot(self.volume, snapshot))
@ -294,26 +371,43 @@ class DateraVolumeTestCase(test.TestCase):
self.mock_api.return_value = { self.mock_api.return_value = {
'key': 'dd2469de081346c28ac100e071709403' 'key': 'dd2469de081346c28ac100e071709403'
} }
self.assertIsNone(self.driver._login()) self.assertIsNone(self.driver.login())
self.assertEqual(1, self.mock_api.call_count) self.assertEqual(1, self.mock_api.call_count)
def test_login_unsuccessful(self): def test_login_unsuccessful(self):
self.mock_api.side_effect = exception.NotAuthorized self.mock_api.side_effect = exception.NotAuthorized
self.assertRaises(exception.NotAuthorized, self.driver._login) self.assertRaises(exception.NotAuthorized, self.driver.login)
self.assertEqual(1, self.mock_api.call_count) self.assertEqual(1, self.mock_api.call_count)
def test_manage_existing(self): def test_manage_existing(self):
TEST_NAME = {"source-name": "test-app:test-si:test-vol"}
self.mock_api.return_value = {} self.mock_api.return_value = {}
self.assertIsNone( if self._apiv == '2':
self.driver.manage_existing( test_name = {"source-name": "test-app:test-si:test-vol"}
_stub_volume(), self.assertIsNone(
TEST_NAME)) self.driver.manage_existing(
self.mock_api.assert_called_once_with( _stub_volume(),
URL_TEMPLATES['ai_inst']().format( test_name))
TEST_NAME["source-name"].split(":")[0]), self.mock_api.assert_called_with(
method='put', URL_TEMPLATES['ai_inst']().format(
body={'name': OS_PREFIX + _stub_volume()['id']}) test_name["source-name"].split(":")[0]),
method='put',
body={'name': OS_PREFIX + _stub_volume()['id']},
api_version=self._apiv)
else:
tenant = 'tenant'
test_name = {"source-name": "{}:test-app:test-si:test-vol".format(
tenant)}
self.assertIsNone(
self.driver.manage_existing(
_stub_volume(),
test_name))
self.mock_api.assert_called_with(
URL_TEMPLATES['ai_inst']().format(
test_name["source-name"].split(":")[1]),
method='put',
body={'name': OS_PREFIX + _stub_volume()['id']},
api_version=self._apiv,
tenant='tenant')
def test_manage_existing_wrong_ref(self): def test_manage_existing_wrong_ref(self):
TEST_NAME = {"source-name": "incorrect-reference"} TEST_NAME = {"source-name": "incorrect-reference"}
@ -330,9 +424,17 @@ class DateraVolumeTestCase(test.TestCase):
self.driver.manage_existing_get_size( self.driver.manage_existing_get_size(
_stub_volume(), _stub_volume(),
TEST_NAME), 500) TEST_NAME), 500)
self.mock_api.assert_called_once_with( if self._apiv == '2':
URL_TEMPLATES['ai_inst']().format( self.mock_api.assert_called_with(
TEST_NAME["source-name"].split(":")[0])) URL_TEMPLATES['ai_inst']().format(
TEST_NAME["source-name"].split(":")[0]),
api_version=self._apiv)
else:
self.mock_api.assert_called_with(
URL_TEMPLATES['ai_inst']().format(
TEST_NAME["source-name"].split(":")[0]),
api_version=self._apiv,
tenant=self._tenant)
def test_manage_existing_get_size_wrong_ref(self): def test_manage_existing_get_size_wrong_ref(self):
TEST_NAME = {"source-name": "incorrect-reference"} TEST_NAME = {"source-name": "incorrect-reference"}
@ -343,51 +445,108 @@ class DateraVolumeTestCase(test.TestCase):
TEST_NAME) TEST_NAME)
def test_get_manageable_volumes(self): def test_get_manageable_volumes(self):
self.mock_api.return_value = non_cinder_ais if self._apiv == '2':
self.assertEqual( self.mock_api.return_value = non_cinder_ais
self.driver.get_manageable_volumes( self.assertEqual(
{}, "", 10, 0, "", ""), self.driver.get_manageable_volumes(
[{'cinder_id': None, {}, "", 10, 0, "", ""),
'extra_info': None, [{'cinder_id': None,
'reason_not_safe': None, 'extra_info': None,
'reference': {"source-name": 'test-app-inst:storage-1:volume-1'}, 'reason_not_safe': None,
'safe_to_manage': True, 'reference': {
'size': 50}, "source-name": 'test-app-inst:storage-1:volume-1'},
{'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba', 'safe_to_manage': True,
'extra_info': None, 'size': 50},
'reason_not_safe': None, {'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba',
'reference': None, 'extra_info': None,
'safe_to_manage': False, 'reason_not_safe': None,
'size': None}]) 'reference': None,
'safe_to_manage': False,
'size': None}])
else:
self.mock_api.return_value = non_cinder_ais_21
self.assertEqual(
self.driver.get_manageable_volumes(
{}, "", 10, 0, "", ""),
[{'cinder_id': None,
'extra_info': None,
'reason_not_safe': '',
'reference': {
"source-name": 'test-app-inst:storage-1:volume-1'},
'safe_to_manage': True,
'size': 50},
{'cinder_id': 'c20aba21-6ef6-446b-b374-45733b4883ba',
'extra_info': None,
'reason_not_safe': '',
'reference': None,
'safe_to_manage': False,
'size': None}])
def test_unmanage(self): def test_unmanage(self):
self.mock_api.return_value = {} self.mock_api.return_value = {}
self.assertIsNone(self.driver.unmanage(_stub_volume())) self.assertIsNone(self.driver.unmanage(_stub_volume()))
self.mock_api.assert_called_once_with( if self._apiv == '2':
URL_TEMPLATES['ai_inst']().format( self.mock_api.assert_called_with(
OS_PREFIX + _stub_volume()['id']), URL_TEMPLATES['ai_inst']().format(
method='put', OS_PREFIX + _stub_volume()['id']),
body={'name': UNMANAGE_PREFIX + _stub_volume()['id']}) method='put',
body={'name': UNMANAGE_PREFIX + _stub_volume()['id']},
api_version=self._apiv)
else:
self.mock_api.assert_called_with(
URL_TEMPLATES['ai_inst']().format(
OS_PREFIX + _stub_volume()['id']),
method='put',
body={'name': UNMANAGE_PREFIX + _stub_volume()['id']},
api_version=self._apiv,
tenant=self._tenant)
def _generate_fake_api_request(self, targets_exist=True): def _generate_fake_api_request(self, targets_exist=True):
def _fake_api_request(resource_type, method='get', resource=None, def _fake_api_request(resource_type, *args, **kwargs):
body=None, action=None, sensitive=False): if 'api_version' not in kwargs:
raise ValueError("Fix me dummy")
result = None
if resource_type.split('/')[-1] == 'storage-1': if resource_type.split('/')[-1] == 'storage-1':
return stub_get_export result = stub_get_export
elif resource_type == 'app_instances':
return stub_single_ai
elif (resource_type.split('/')[-1] == elif (resource_type.split('/')[-1] ==
'c20aba21-6ef6-446b-b374-45733b4883ba'): 'c20aba21-6ef6-446b-b374-45733b4883ba'):
return stub_app_instance[ result = stub_app_instance[
'c20aba21-6ef6-446b-b374-45733b4883ba'] 'c20aba21-6ef6-446b-b374-45733b4883ba']
elif resource_type == 'acl_policy': elif resource_type == 'acl_policy':
return stub_acl result = stub_acl if self._apiv == '2' else stub_acl_21
elif resource_type == 'ig_group': elif resource_type == 'ig_group':
return stub_ig result = stub_ig if self._apiv == '2' else stub_ig_21
elif resource_type.split('/')[-1] == 'snapshots':
result = {'timestamp': 'test_ts'}
elif resource_type.split('/')[-1] == 'test_ts':
result = {'op_state': 'available'}
elif resource_type == 'tenant':
return {}
else: else:
return list(stub_app_instance.values())[0] if kwargs.get('api_version') == '2':
result = list(stub_app_instance.values())[0]
else:
result = stub_app_instance_21['data']
if kwargs.get('api_version') == '2':
return result
else:
return {'data': result}
return _fake_api_request return _fake_api_request
class DateraVolumeTestCasev21(DateraVolumeTestCasev2):
def setUp(self):
super(DateraVolumeTestCasev21, self).setUp()
m = mock.Mock()
m.json.return_value = {'api_versions': ['v2.1']}
self.driver._request.return_value = m
self.driver._store_metadata = mock.Mock()
self._apiv = '2.1'
self._tenant = self.cfg.datera_tenant_id
stub_acl = { stub_acl = {
'initiator_groups': [ 'initiator_groups': [
'/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'], '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'],
@ -395,11 +554,23 @@ stub_acl = {
'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/' 'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/'
'storage_instances/storage-1/acl_policy')} 'storage_instances/storage-1/acl_policy')}
stub_acl_21 = {
'initiator_groups': [
{'path': '/initiator_groups/IG-8739f309-dae9-4534-aa02-5b8e9e96eefd'}],
'initiators': [],
'path': ('/app_instances/8739f309-dae9-4534-aa02-5b8e9e96eefd/'
'storage_instances/storage-1/acl_policy')}
stub_ig = { stub_ig = {
'members': ['/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'], 'members': ['/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'],
'name': 'IG-21e08155-8b95-4108-b148-089f64623963', 'name': 'IG-21e08155-8b95-4108-b148-089f64623963',
'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'} 'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'}
stub_ig_21 = {
'members': [
{'path': '/initiators/iqn.1993-08.org.debian:01:ed22de8d75c0'}],
'name': 'IG-21e08155-8b95-4108-b148-089f64623963',
'path': '/initiator_groups/IG-21e08155-8b95-4108-b148-089f64623963'}
stub_create_export = { stub_create_export = {
"_ipColl": ["172.28.121.10", "172.28.120.10"], "_ipColl": ["172.28.121.10", "172.28.120.10"],
@ -458,11 +629,10 @@ stub_app_instance = {
"storage-1": { "storage-1": {
"access": { "access": {
"ips": [ "ips": [
"172.28.94.11" "172.28.41.63"
], ],
"iqn": "iqn.2013-05.com.daterainc:c20aba21-6ef6-446b-" "iqn": "iqn.2013-05.com.daterainc:tc:01:sn:"
"b374-45733b4883ba--ST--storage-1:01:sn:" "3bbb080aab7d9abc",
"34e5b20fbadd3abb",
"path": "/app_instances/c20aba21-6ef6-446b-b374" "path": "/app_instances/c20aba21-6ef6-446b-b374"
"-45733b4883ba/storage_instances/storage-1/access" "-45733b4883ba/storage_instances/storage-1/access"
}, },
@ -524,7 +694,104 @@ stub_app_instance = {
"uuid": "c20aba21-6ef6-446b-b374-45733b4883ba" "uuid": "c20aba21-6ef6-446b-b374-45733b4883ba"
} }
} }
stub_app_instance_21 = {
"tenant": "/root",
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb",
"version": "v2.1",
"data": {
"tenant": "/root",
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb",
"name": "OS-9b0216bc-8aab-47f2-b746-843f497cb7a6",
"id": "1e52946a-5c77-45ed-8b4e-b46e7236a8eb",
"health": "ok",
"app_template": {
"path": "",
"resolved_path": "",
"resolved_tenant": ""
},
"descr": "",
"admin_state": "online",
"storage_instances": [
{
"health": "ok",
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-b46e7236a8eb/"
"storage_instances/storage-1",
"name": "storage-1",
"admin_state": "online",
"op_state": "available",
"volumes": [
{
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-"
"b46e7236a8eb/"
"storage_instances/storage-1/volumes/volume-1",
"name": "volume-1",
"replica_count": 1,
"uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6",
"size": 500,
"capacity_in_use": 0,
"snapshot_policies": [],
"snapshots": [],
"placement_mode": "hybrid",
"op_state": "available",
"active_storage_nodes": [
{
"path": "/storage_nodes/75f2cae4-68fb-4236-"
"a90c-b6c480b68816"
}
],
"health": "ok"
}
],
"access_control_mode": "deny_all",
"acl_policy": {
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-"
"b46e7236a8eb/"
"storage_instances/storage-1/acl_policy",
"initiators": [],
"initiator_groups": []
},
"ip_pool": {
"path": "/access_network_ip_pools/default",
"resolved_path": "/access_network_ip_pools/default",
"resolved_tenant": "/root"
},
"access": {
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-"
"b46e7236a8eb/"
"storage_instances/storage-1/access",
"ips": [
"172.28.41.63",
"172.29.41.29"
],
"iqn": "iqn.2013-05.com.daterainc:tc:01:sn:"
"3bbb080aab7d9abc"
},
"auth": {
"path": "/app_instances/1e52946a-5c77-45ed-8b4e-"
"b46e7236a8eb/"
"storage_instances/storage-1/auth",
"type": "none",
"initiator_user_name": "",
"initiator_pswd": "(hidden)",
"target_user_name": "",
"target_pswd": "(hidden)"
},
"active_initiators": [],
"active_storage_nodes": [
{
"path": "/storage_nodes/75f2cae4-68fb-4236-a90c-"
"b6c480b68816"
}
],
"uuid": "eb3d7b07-b520-4cc2-b365-90135b84c356"
}
],
"create_mode": "openstack",
"uuid": "9b0216bc-8aab-47f2-b746-843f497cb7a6",
"snapshots": [],
"snapshot_policies": []
}
}
stub_get_export = stub_app_instance[ stub_get_export = stub_app_instance[
'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1'] 'c20aba21-6ef6-446b-b374-45733b4883ba']['storage_instances']['storage-1']
@ -549,6 +816,25 @@ stub_return_snapshots = \
} }
} }
stub_return_snapshots_21 = {
'data': [
{
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba"
"/storage_instances/storage-1/volumes/volume-1/snapshots/",
"timestamp": "1446076293.118600738",
"utc_ts": "1446076293.118600738",
"uuid": "0bb34f0c-fea4-48e0-bf96-591120ac7e3c"
},
{
"op_state": "available",
"path": "/app_instances/c20aba21-6ef6-446b-b374-45733b4883ba"
"/storage_instances/storage-1/volumes/volume-1/snapshots/",
"timestamp": "1446076384.00607846",
"utc_ts": "1446076384.00607846",
"uuid": "25b4b959-c30a-45f2-a90c-84a40f34f0a1"
}]
}
non_cinder_ais = { non_cinder_ais = {
"75bc1c69-a399-4acb-aade-3514caf13c5e": { "75bc1c69-a399-4acb-aade-3514caf13c5e": {
@ -691,6 +977,147 @@ non_cinder_ais = {
} }
} }
non_cinder_ais_21 = {
'data': [{
"admin_state": "online",
"create_mode": "normal",
"descr": "",
"health": "ok",
"id": "75bc1c69-a399-4acb-aade-3514caf13c5e",
"name": "test-app-inst",
"path": "/app_instances/75bc1c69-a399-4acb-aade-3514caf13c5e",
"snapshot_policies": {},
"snapshots": {},
"storage_instances": {
"storage-1": {
"access": {
"ips": [
"172.28.41.93"
],
"iqn": "iqn.2013-05.com.daterainc:tc:01:sn:"
"29036682e2d37b98",
"path": "/app_instances/75bc1c69-a399-4acb-aade-"
"3514caf13c5e/storage_instances/storage-1/access"
},
"access_control_mode": "deny_all",
"acl_policy": {
"initiator_groups": [],
"initiators": [],
"path": "/app_instances/75bc1c69-a399-4acb-aade-"
"3514caf13c5e/storage_instances/storage-"
"1/acl_policy"
},
"active_initiators": [],
"active_storage_nodes": [
"/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9"
],
"admin_state": "online",
"auth": {
"initiator_pswd": "(hidden)",
"initiator_user_name": "",
"path": "/app_instances/75bc1c69-a399-4acb-aade-"
"3514caf13c5e/storage_instances/storage-1/auth",
"target_pswd": "(hidden)",
"target_user_name": "",
"type": "none"
},
"creation_type": "user",
"ip_pool": "/access_network_ip_pools/default",
"name": "storage-1",
"op_state": "available",
"path": "/app_instances/75bc1c69-a399-4acb-aade-"
"3514caf13c5e/storage_instances/storage-1",
"uuid": "6421237d-e4fc-433a-b535-148d5b6d8586",
"volumes": {
"volume-1": {
"capacity_in_use": 0,
"name": "volume-1",
"op_state": "available",
"path": "/app_instances/75bc1c69-a399-4acb-aade-"
"3514caf13c5e/storage_instances/storage-"
"1/volumes/volume-1",
"replica_count": 1,
"size": 50,
"snapshot_policies": {},
"snapshots": {},
"uuid": "e674d29c-a672-40d1-9577-abe3a504ffe9"
}
}
}
},
"uuid": "00000000-0000-0000-0000-000000000000"
},
{
"admin_state": "offline",
"create_mode": "openstack",
"descr": "",
"health": "ok",
"id": "dfdaf8d1-8976-4c13-a829-3345e03cf810",
"name": "OS-c20aba21-6ef6-446b-b374-45733b4883ba",
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810",
"snapshot_policies": {},
"snapshots": {},
"storage_instances": {
"storage-1": {
"access": {
"ips": [
"172.28.41.57"
],
"iqn": "iqn.2013-05.com.daterainc:tc:01:sn:"
"56cd59e754ad02b6",
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-"
"3345e03cf810/storage_instances/storage-1/access"
},
"access_control_mode": "deny_all",
"acl_policy": {
"initiator_groups": [],
"initiators": [],
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-"
"3345e03cf810/storage_instances/storage-"
"1/acl_policy"
},
"active_initiators": [],
"active_storage_nodes": [
"/storage_nodes/78b350a8-43f2-453f-a257-8df76d7406b9"
],
"admin_state": "offline",
"auth": {
"initiator_pswd": "(hidden)",
"initiator_user_name": "",
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-"
"3345e03cf810/storage_instances/storage-1/auth",
"target_pswd": "(hidden)",
"target_user_name": "",
"type": "none"
},
"creation_type": "user",
"ip_pool": "/access_network_ip_pools/default",
"name": "storage-1",
"op_state": "unavailable",
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-3345e03cf810"
"/storage_instances/storage-1",
"uuid": "5620a673-9985-464e-9616-e325a50eac60",
"volumes": {
"volume-1": {
"capacity_in_use": 0,
"name": "volume-1",
"op_state": "available",
"path": "/app_instances/dfdaf8d1-8976-4c13-a829-"
"3345e03cf810/storage_instances/storage-"
"1/volumes/volume-1",
"replica_count": 1,
"size": 5,
"snapshot_policies": {},
"snapshots": {},
"uuid": "c20aba21-6ef6-446b-b374-45733b4883ba"
}
}
}
},
"uuid": "c20aba21-6ef6-446b-b374-45733b4883ba"
}]
}
def _stub_datera_volume(*args, **kwargs): def _stub_datera_volume(*args, **kwargs):
return { return {

File diff suppressed because it is too large Load Diff

View File

View File

@ -0,0 +1,763 @@
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import uuid
import eventlet
import ipaddress
import six
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.i18n import _, _LI, _LW, _LE
from cinder import exception
from cinder.volume import utils as volutils
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
class DateraApi(object):
# =================
# = Create Volume =
# =================
def _create_volume_2(self, volume):
# Generate App Instance, Storage Instance and Volume
# Volume ID will be used as the App Instance Name
# Storage Instance and Volumes will have standard names
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
template = policies['template']
if template:
app_params = (
{
'create_mode': "openstack",
# 'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'app_template': '/app_templates/{}'.format(template)
})
else:
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': {
storage_name: {
'name': storage_name,
'volumes': {
volume_name: {
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': {
}
}
}
}
}
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
'post',
body=app_params,
api_version='2')
self._update_qos(volume, policies)
# =================
# = Extend Volume =
# =================
def _extend_volume_2(self, volume, new_size):
# Current product limitation:
# If app_instance is bound to template resizing is not possible
# Once policies are implemented in the product this can go away
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning(_LW("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s"),
volume=volume, template=template)
return
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version='2')
if app_inst['admin_state'] == 'online':
reonline = True
self._detach_volume_2(None, volume)
# Change Volume Size
app_inst = datc._get_name(volume['id'])
data = {
'size': new_size
}
store_name, vol_name = self._scrape_template(policies)
self._issue_api_request(
datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(app_inst),
method='put',
body=data,
api_version='2')
# Online Volume, if it was online before
if reonline:
self._create_export_2(None, volume, None)
# =================
# = Cloned Volume =
# =================
def _create_cloned_volume_2(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
store_name, vol_name = self._scrape_template(policies)
src = "/" + datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(datc._get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': datc._get_name(volume['id']),
'uuid': str(volume['id']),
'clone_src': src,
}
self._issue_api_request(
datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2')
if volume['size'] > src_vref['size']:
self._extend_volume_2(volume, volume['size'])
# =================
# = Delete Volume =
# =================
def _delete_volume_2(self, volume):
self.detach_volume(None, volume)
app_inst = datc._get_name(volume['id'])
try:
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
app_inst),
method='delete',
api_version='2')
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
# =================
# = Ensure Export =
# =================
def _ensure_export_2(self, context, volume, connector):
return self._create_export_2(context, volume, connector)
# =========================
# = Initialize Connection =
# =========================
def _initialize_connection_2(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(
url, method='put', body=data, api_version='2')
storage_instances = app_inst["storage_instances"]
si_names = list(storage_instances.keys())
portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260'
iqn = storage_instances[si_names[0]]['access']['iqn']
if multipath:
portals = [p + ':3260' for p in
storage_instances[si_names[0]]['access']['ips']]
iqns = [iqn for _ in
storage_instances[si_names[0]]['access']['ips']]
lunids = [self._get_lunid() for _ in
storage_instances[si_names[0]]['access']['ips']]
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
return {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
# =================
# = Create Export =
# =================
def _create_export_2(self, context, volume, connector):
# Online volume in case it hasn't been already
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(url, method='put', body=data, api_version='2')
# Check if we've already setup everything for this volume
url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
storage_instances = self._issue_api_request(url, api_version='2')
# Handle adding initiator to product if necessary
# Then add initiator to ACL
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request(
'initiators', api_version='2')
for iqn, values in current_initiators.items():
if initiator == iqn:
found = True
break
# If we didn't find a matching initiator, create one
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it because race conditions
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True,
api_version='2')
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group, 'members': [initiator_path]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True,
api_version='2')
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si_name in storage_instances.keys():
acl_url = (datc.URL_TEMPLATES['si']() +
"/{}/acl_policy").format(
datc._get_name(volume['id']), si_name)
existing_acl = self._issue_api_request(acl_url,
method="get",
api_version='2')
data = {}
data['initiators'] = existing_acl['initiators']
data['initiator_groups'] = existing_acl['initiator_groups']
data['initiator_groups'].append(initiator_group_path)
self._issue_api_request(acl_url,
method="put",
body=data,
api_version='2')
if connector and connector.get('ip'):
try:
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']), api_version='2')['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = datc.URL_TEMPLATES['si_inst'](
store_name).format(datc._get_name(volume['id']))
ip_pool_data = {'ip_pool': initiator_ip_pool_path}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data,
api_version='2')
except exception.DateraAPIException:
# Datera product 1.0 support
pass
# Check to ensure we're ready for go-time
self._si_poll(volume, policies)
# =================
# = Detach Volume =
# =================
def _detach_volume_2(self, context, volume, attachment=None):
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data,
api_version='2')
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl_2(volume)
def _check_for_acl_2(self, initiator_path):
"""Returns True if an acl is found for initiator_path """
# TODO(_alastor_) when we get a /initiators/:initiator/acl_policies
# endpoint use that instead of this monstrosity
initiator_groups = self._issue_api_request("initiator_groups",
api_version='2')
for ig, igdata in initiator_groups.items():
if initiator_path in igdata['members']:
LOG.debug("Found initiator_group: %s for initiator: %s",
ig, initiator_path)
return True
LOG.debug("No initiator_group found for initiator: %s", initiator_path)
return False
def _clean_acl_2(self, volume):
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
acl_url = (datc.URL_TEMPLATES["si_inst"](
store_name) + "/acl_policy").format(datc._get_name(volume['id']))
try:
initiator_group = self._issue_api_request(
acl_url, api_version='2')['initiator_groups'][0]
initiator_iqn_path = self._issue_api_request(
initiator_group.lstrip("/"))["members"][0]
# Clear out ACL and delete initiator group
self._issue_api_request(acl_url,
method="put",
body={'initiator_groups': []},
api_version='2')
self._issue_api_request(initiator_group.lstrip("/"),
method="delete",
api_version='2')
if not self._check_for_acl_2(initiator_iqn_path):
self._issue_api_request(initiator_iqn_path.lstrip("/"),
method="delete",
api_version='2')
except (IndexError, exception.NotFound):
LOG.debug("Did not find any initiator groups for volume: %s",
volume)
# ===================
# = Create Snapshot =
# ===================
def _create_snapshot_2(self, snapshot):
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
url_template = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
url = url_template.format(datc._get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
}
snap = self._issue_api_request(url, method='post', body=snap_params,
api_version='2')
snapu = "/".join((url, snap['timestamp']))
self._snap_poll(snapu)
# ===================
# = Delete Snapshot =
# ===================
def _delete_snapshot_2(self, snapshot):
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get',
api_version='2')
try:
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(ts)
self._issue_api_request(url, method='delete',
api_version='2')
break
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
# ========================
# = Volume From Snapshot =
# ========================
def _create_volume_from_snapshot_2(self, volume, snapshot):
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu, method='get',
api_version='2')
for ts, snap in snapshots.items():
if snap['uuid'] == snapshot['id']:
found_ts = ts
break
else:
raise exception.NotFound
snap_url = (snap_temp + '/{}').format(
datc._get_name(snapshot['volume_id']), found_ts)
self._snap_poll(snap_url)
src = "/" + snap_url
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'clone_src': src,
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
method='post',
body=app_params,
api_version='2')
# ==========
# = Manage =
# ==========
def _manage_existing_2(self, volume, existing_ref):
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
LOG.debug("Managing existing Datera volume %s. "
"Changing name to %s",
datc._get_name(volume['id']),
existing_ref)
data = {'name': datc._get_name(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data, api_version='2')
# ===================
# = Manage Get Size =
# ===================
def _manage_existing_get_size_2(self, volume, existing_ref):
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst_name),
api_version='2')
return self._get_size_2(volume, app_inst, si_name, vol_name)
def _get_size_2(self, volume, app_inst=None, si_name=None, vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
app_inst = self._issue_api_request(vol_url)
size = app_inst[
'storage_instances'][si_name]['volumes'][vol_name]['size']
return size
# =========================
# = Get Manageable Volume =
# =========================
def _get_manageable_volumes_2(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(
datc.URL_TEMPLATES['ai'](), api_version='2').values()
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = None
cinder_id = None
extra_info = None
if re.match(datc.UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(datc.OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids):
safe_to_manage = self._is_manageable(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
# ============
# = Unmanage =
# ============
def _unmanage_2(self, volume):
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], datc._get_unmanaged(volume['id']))
data = {'name': datc._get_unmanaged(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
method='put',
body=data,
api_version='2')
# ================
# = Volume Stats =
# ================
def _get_volume_stats_2(self, refresh=False):
if refresh or not self.cluster_stats:
try:
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request('system', api_version='2')
if 'uuid' not in results:
LOG.error(_LE(
'Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get(
'volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': (
int(results['total_capacity']) / units.Gi),
'free_capacity_gb': (
int(results['available_capacity']) / units.Gi),
'reserved_percentage': 0,
}
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
return self.cluster_stats
def _is_manageable(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return True
return False
# =========
# = Login =
# =========
def _login_2(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.datera_api_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request('login', 'put', body=body,
sensitive=True, api_version='2')
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
# ===========
# = Polling =
# ===========
def _snap_poll(self, url):
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 10
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = self._issue_api_request(url, api_version='2')
if snap['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
def _si_poll(self, volume, policies):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(datc.DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = datc.URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
datc._get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url, api_version='2')
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
# ============
# = IP Pools =
# ============
def _get_ip_pool_for_string_ip(self, ip):
"""Takes a string ipaddress and return the ip_pool API object dict """
pool = 'default'
ip_obj = ipaddress.ip_address(six.text_type(ip))
ip_pools = self._issue_api_request('access_network_ip_pools',
api_version='2')
for ip_pool, ipdata in ip_pools.items():
for access, adata in ipdata['network_paths'].items():
if not adata.get('start_ip'):
continue
pool_if = ipaddress.ip_interface(
"/".join((adata['start_ip'], str(adata['netmask']))))
if ip_obj in pool_if.network:
pool = ip_pool
return self._issue_api_request(
"access_network_ip_pools/{}".format(pool), api_version='2')['path']
# =============
# = Templates =
# =============
def _scrape_template(self, policies):
sname = policies['default_storage_name']
vname = policies['default_volume_name']
template = policies['template']
if template:
result = self._issue_api_request(
datc.URL_TEMPLATES['at']().format(template), api_version='2')
sname, st = list(result['storage_templates'].items())[0]
vname = list(st['volume_templates'].keys())[0]
return sname, vname
# =======
# = QoS =
# =======
def _update_qos(self, resource, policies):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies,
api_version='2')

View File

@ -0,0 +1,877 @@
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import uuid
import eventlet
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.i18n import _, _LI, _LW, _LE
from cinder import exception
from cinder.volume import utils as volutils
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
class DateraApi(object):
# =================
# = Create Volume =
# =================
def _create_volume_2_1(self, volume):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = policies['default_storage_name']
volume_name = policies['default_volume_name']
template = policies['template']
if template:
app_params = (
{
'create_mode': "openstack",
# 'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'app_template': '/app_templates/{}'.format(template)
})
else:
app_params = (
{
'create_mode': "openstack",
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'access_control_mode': 'deny_all',
'storage_instances': [
{
'name': storage_name,
'volumes': [
{
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': [
]
}
]
}
]
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
'post',
body=app_params,
api_version='2.1',
tenant=tenant)
self._update_qos_2_1(volume, policies, tenant)
metadata = {}
volume_type = self._get_volume_type_obj(volume)
if volume_type:
metadata.update({datc.M_TYPE: volume_type['name']})
metadata.update(self.HEADER_DATA)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
self._store_metadata(url, metadata, "create_volume_2_1", tenant)
# =================
# = Extend Volume =
# =================
def _extend_volume_2_1(self, volume, new_size):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template:
LOG.warning(_LW("Volume size not extended due to template binding:"
" volume: %(volume)s, template: %(template)s"),
volume=volume, template=template)
return
# Offline App Instance, if necessary
reonline = False
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
api_version='2.1', tenant=tenant)
if app_inst['data']['admin_state'] == 'online':
reonline = True
self._detach_volume_2_1(None, volume)
# Change Volume Size
app_inst = datc._get_name(volume['id'])
data = {
'size': new_size
}
store_name, vol_name = self._scrape_template(policies)
self._issue_api_request(
datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(app_inst),
method='put',
body=data,
api_version='2.1',
tenant=tenant)
# Online Volume, if it was online before
if reonline:
self._create_export_2_1(None, volume, None)
# =================
# = Cloned Volume =
# =================
def _create_cloned_volume_2_1(self, volume, src_vref):
policies = self._get_policies_for_resource(volume)
tenant = self._create_tenant(volume)
store_name, vol_name = self._scrape_template(policies)
src = "/" + datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name).format(datc._get_name(src_vref['id']))
data = {
'create_mode': 'openstack',
'name': datc._get_name(volume['id']),
'uuid': str(volume['id']),
'clone_volume_src': {'path': src},
}
self._issue_api_request(
datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2.1',
tenant=tenant)
if volume['size'] > src_vref['size']:
self._extend_volume_2_1(volume, volume['size'])
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
volume_type = self._get_volume_type_obj(volume)
if volume_type:
vtype = volume_type['name']
else:
vtype = None
metadata = {datc.M_TYPE: vtype,
datc.M_CLONE: datc._get_name(src_vref['id'])}
self._store_metadata(url, metadata, "create_cloned_volume_2_1", tenant)
# =================
# = Delete Volume =
# =================
def _delete_volume_2_1(self, volume):
self.detach_volume(None, volume)
tenant = self._create_tenant(volume)
app_inst = datc._get_name(volume['id'])
try:
self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst),
method='delete',
api_version='2.1',
tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(volume['id']))
# =================
# = Ensure Export =
# =================
def _ensure_export_2_1(self, context, volume, connector):
self.create_export(context, volume, connector)
# =========================
# = Initialize Connection =
# =========================
def _initialize_connection_2_1(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(
url, method='put', body=data, api_version='2.1', tenant=tenant)[
'data']
storage_instances = app_inst["storage_instances"]
si = storage_instances[0]
portal = si['access']['ips'][0] + ':3260'
iqn = si['access']['iqn']
if multipath:
portals = [p + ':3260' for p in si['access']['ips']]
iqns = [iqn for _ in si['access']['ips']]
lunids = [self._get_lunid() for _ in si['access']['ips']]
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
self._store_metadata(url, {}, "initialize_connection_2_1", tenant)
return result
# =================
# = Create Export =
# =================
def _create_export_2_1(self, context, volume, connector):
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'online'
}
self._issue_api_request(
url, method='put', body=data, api_version='2.1', tenant=tenant)
# Check if we've already setup everything for this volume
url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
storage_instances = self._issue_api_request(
url, api_version='2.1', tenant=tenant)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
if (connector and
connector.get('initiator') and
not policies['acl_allow_all']):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id']
found = False
initiator = connector['initiator']
current_initiators = self._issue_api_request(
'initiators', api_version='2.1', tenant=tenant)
for iqn, values in current_initiators.items():
if initiator == iqn:
found = True
break
# If we didn't find a matching initiator, create one
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it because race conditions
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True,
api_version='2.1',
tenant=tenant)
# Create initiator group with initiator in it
initiator_path = "/initiators/{}".format(initiator)
initiator_group_path = "/initiator_groups/{}".format(
initiator_group)
ig_data = {'name': initiator_group,
'members': [{'path': initiator_path}]}
self._issue_api_request("initiator_groups",
method="post",
body=ig_data,
conflict_ok=True,
api_version='2.1',
tenant=tenant)
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si in storage_instances['data']:
acl_url = (datc.URL_TEMPLATES['si']() +
"/{}/acl_policy").format(
datc._get_name(volume['id']), si['name'])
existing_acl = self._issue_api_request(acl_url,
method="get",
api_version='2.1',
tenant=tenant)['data']
data = {}
data['initiators'] = existing_acl['initiators']
data['initiator_groups'] = existing_acl['initiator_groups']
data['initiator_groups'].append({"path": initiator_group_path})
self._issue_api_request(acl_url,
method="put",
body=data,
api_version='2.1',
tenant=tenant)
if connector and connector.get('ip'):
# Case where volume_type has non default IP Pool info
if policies['ip_pool'] != 'default':
initiator_ip_pool_path = self._issue_api_request(
"access_network_ip_pools/{}".format(
policies['ip_pool']),
api_version='2.1',
tenant=tenant)['path']
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
connector['ip'])
ip_pool_url = datc.URL_TEMPLATES['si_inst'](
store_name).format(datc._get_name(volume['id']))
ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
self._issue_api_request(ip_pool_url,
method="put",
body=ip_pool_data,
api_version='2.1',
tenant=tenant)
# Check to ensure we're ready for go-time
self._si_poll_2_1(volume, policies, tenant)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
metadata = {}
# TODO(_alastor_): Figure out what we want to post with a create_export
# call
self._store_metadata(url, metadata, "create_export_2_1", tenant)
# =================
# = Detach Volume =
# =================
def _detach_volume_2_1(self, context, volume, attachment=None):
tenant = self._create_tenant(volume)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data,
api_version='2.1', tenant=tenant)
except exception.NotFound:
msg = _LI("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# TODO(_alastor_): Make acl cleaning multi-attach aware
self._clean_acl_2_1(volume, tenant)
url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
metadata = {}
try:
self._store_metadata(url, metadata, "detach_volume_2_1", tenant)
except exception.NotFound:
# If the object isn't found, we probably are deleting/detaching
# an already deleted object
pass
def _clean_acl_2_1(self, volume, tenant):
policies = self._get_policies_for_resource(volume)
store_name, _ = self._scrape_template(policies)
acl_url = (datc.URL_TEMPLATES["si_inst"](
store_name) + "/acl_policy").format(datc._get_name(volume['id']))
try:
initiator_group = self._issue_api_request(
acl_url, api_version='2.1', tenant=tenant)['data'][
'initiator_groups'][0]['path']
initiator_iqn_path = self._issue_api_request(
initiator_group.lstrip("/"), api_version='2.1', tenant=tenant)[
"data"]["members"][0]["path"]
# Clear out ACL and delete initiator group
self._issue_api_request(acl_url,
method="put",
body={'initiator_groups': []},
api_version='2.1',
tenant=tenant)
self._issue_api_request(initiator_group.lstrip("/"),
method="delete",
api_version='2.1',
tenant=tenant)
if not self._check_for_acl_2(initiator_iqn_path):
self._issue_api_request(initiator_iqn_path.lstrip("/"),
method="delete",
api_version='2.1',
tenant=tenant)
except (IndexError, exception.NotFound):
LOG.debug("Did not find any initiator groups for volume: %s",
volume)
# ===================
# = Create Snapshot =
# ===================
def _create_snapshot_2_1(self, snapshot):
tenant = self._create_tenant(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
url_template = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
url = url_template.format(datc._get_name(snapshot['volume_id']))
snap_params = {
'uuid': snapshot['id'],
}
snap = self._issue_api_request(url, method='post', body=snap_params,
api_version='2.1', tenant=tenant)
snapu = "/".join((url, snap['data']['timestamp']))
self._snap_poll_2_1(snapu, tenant)
# ===================
# = Delete Snapshot =
# ===================
def _delete_snapshot_2_1(self, snapshot):
tenant = self._create_tenant(snapshot)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(snapu,
method='get',
api_version='2.1',
tenant=tenant)
try:
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
url_template = snapu + '/{}'
url = url_template.format(snap['timestamp'])
self._issue_api_request(
url,
method='delete',
api_version='2.1',
tenant=tenant)
break
else:
raise exception.NotFound
except exception.NotFound:
msg = _LI("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc._get_name(snapshot['id']))
# ========================
# = Volume From Snapshot =
# ========================
def _create_volume_from_snapshot_2_1(self, volume, snapshot):
tenant = self._create_tenant(volume)
policies = self._get_policies_for_resource(snapshot)
store_name, vol_name = self._scrape_template(policies)
snap_temp = datc.URL_TEMPLATES['vol_inst'](
store_name, vol_name) + '/snapshots'
snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
snapshots = self._issue_api_request(
snapu, method='get', api_version='2.1', tenant=tenant)
for snap in snapshots['data']:
if snap['uuid'] == snapshot['id']:
found_ts = snap['utc_ts']
break
else:
raise exception.NotFound
snap_url = (snap_temp + '/{}').format(
datc._get_name(snapshot['volume_id']), found_ts)
self._snap_poll_2_1(snap_url, tenant)
src = "/" + snap_url
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': datc._get_name(volume['id']),
'clone_snapshot_src': {'path': src},
})
self._issue_api_request(
datc.URL_TEMPLATES['ai'](),
method='post',
body=app_params,
api_version='2.1',
tenant=tenant)
# ==========
# = Manage =
# ==========
def _manage_existing_2_1(self, volume, existing_ref):
# Only volumes created under the requesting tenant can be managed in
# the v2.1 API. Eg. If tenant A is the tenant for the volume to be
# managed, it must also be tenant A that makes this request.
# This will be fixed in a later API update
tenant = self._create_tenant(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name = existing_ref.split(":")[0]
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = existing_ref.split(":")
except TypeError:
app_inst_name, storage_inst_name, vol_name = existing_ref.split(
":")
tenant = None
LOG.debug("Managing existing Datera volume %s "
"Changing name to %s",
datc._get_name(volume['id']), existing_ref)
data = {'name': datc._get_name(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
app_inst_name), method='put', body=data, api_version='2.1',
tenant=tenant)
# ===================
# = Manage Get Size =
# ===================
def _manage_existing_get_size_2_1(self, volume, existing_ref):
tenant = self._create_tenant(volume)
existing_ref = existing_ref['source-name']
if existing_ref.count(":") != 2:
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format:"
"app_inst_name:storage_inst_name:vol_name"))
app_inst_name, si_name, vol_name = existing_ref.split(":")
app_inst = self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(app_inst_name),
api_version='2.1', tenant=tenant)
return self._get_size_2_1(
volume, tenant, app_inst, si_name, vol_name)
def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None,
vol_name=None):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
policies = self._get_policies_for_resource(volume)
si_name = si_name if si_name else policies['default_storage_name']
vol_name = vol_name if vol_name else policies['default_volume_name']
if not app_inst:
vol_url = datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id']))
app_inst = self._issue_api_request(
vol_url, api_version='2.1', tenant=tenant)['data']
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = None
for si in sis:
if si['name'] == si_name:
found_si = si
break
found_vol = None
for vol in found_si['volumes']:
if vol['name'] == vol_name:
found_vol = vol
size = found_vol['size']
return size
# =========================
# = Get Manageable Volume =
# =========================
def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit,
offset, sort_keys, sort_dirs):
# Use the first volume to determine the tenant we're working under
if cinder_volumes:
tenant = self._create_tenant(cinder_volumes[0])
else:
tenant = None
LOG.debug("Listing manageable Datera volumes")
app_instances = self._issue_api_request(
datc.URL_TEMPLATES['ai'](), api_version='2.1',
tenant=tenant)['data']
results = []
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = ""
cinder_id = None
extra_info = None
if re.match(datc.UUID4_RE, ai_name):
cinder_id = ai_name.lstrip(datc.OS_PREFIX)
if (not cinder_id and
ai_name.lstrip(datc.OS_PREFIX) not in cinder_volume_ids):
safe_to_manage, reason_not_safe = self._is_manageable_2_1(ai)
if safe_to_manage:
si = list(ai['storage_instances'].values())[0]
si_name = si['name']
vol = list(si['volumes'].values())[0]
vol_name = vol['name']
size = vol['size']
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable_2_1(self, app_inst):
if len(app_inst['storage_instances']) == 1:
si = list(app_inst['storage_instances'].values())[0]
if len(si['volumes']) == 1:
return (True, "")
return (False,
"App Instance has more than one storage instance or volume")
# ============
# = Unmanage =
# ============
def _unmanage_2_1(self, volume):
tenant = self._create_tenant(volume)
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], datc._get_unmanaged(volume['id']))
data = {'name': datc._get_unmanaged(volume['id'])}
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
datc._get_name(volume['id'])),
method='put',
body=data,
api_version='2.1',
tenant=tenant)
# ================
# = Volume Stats =
# ================
# =========
# = Login =
# =========
# ===========
# = Tenancy =
# ===========
def _create_tenant(self, volume=None):
# Create the Datera tenant if specified in the config
# Otherwise use the tenant provided
if self.tenant_id is None:
tenant = None
elif self.tenant_id.lower() == "map" and volume:
# Convert dashless uuid to uuid with dashes
# Eg: 0e33e95a9b154d348c675a1d8ea5b651 -->
# 0e33e95a-9b15-4d34-8c67-5a1d8ea5b651
tenant = datc._get_name(str(uuid.UUID(volume["project_id"])))
elif self.tenant_id.lower() == "map" and not volume:
tenant = None
else:
tenant = self.tenant_id
if tenant:
params = {'name': tenant}
self._issue_api_request(
'tenants', method='post', body=params, conflict_ok=True,
api_version='2.1')
return tenant
# ============
# = Metadata =
# ============
def _get_metadata(self, obj_url, tenant):
url = "/".join((obj_url.rstrip("/"), "metadata"))
mdata = self._issue_api_request(
url, api_version="2.1", tenant=tenant).get("data")
# Make sure we only grab the relevant keys
filter_mdata = {k: json.loads(mdata[k])
for k in mdata if k in datc.M_KEYS}
return filter_mdata
def _store_metadata(self, obj_url, data, calling_func_name, tenant):
mdata = self._get_metadata(obj_url, tenant)
new_call_entry = (calling_func_name, self.HEADER_DATA['Datera-Driver'])
if mdata.get(datc.M_CALL):
mdata[datc.M_CALL].append(new_call_entry)
else:
mdata[datc.M_CALL] = [new_call_entry]
mdata.update(data)
mdata.update(self.HEADER_DATA)
data_s = {k: json.dumps(v) for k, v in data.items()}
url = "/".join((obj_url.rstrip("/"), "metadata"))
return self._issue_api_request(url, method="put", api_version="2.1",
body=data_s, tenant=tenant)
# =========
# = Login =
# =========
def _login_2_1(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.datera_api_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request(
'login', 'put', body=body, sensitive=True, api_version='2.1',
tenant=None)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.'))
# ===========
# = Polling =
# ===========
def _snap_poll_2_1(self, url, tenant):
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 10
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = self._issue_api_request(url,
api_version='2.1',
tenant=tenant)['data']
if snap['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
def _si_poll_2_1(self, volume, policies, tenant):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(datc.DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
check_url = datc.URL_TEMPLATES['si_inst'](
policies['default_storage_name']).format(
datc._get_name(volume['id']))
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url,
api_version='2.1',
tenant=tenant)['data']
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
# ================
# = Volume Stats =
# ================
def _get_volume_stats_2_1(self, refresh=False):
if refresh or not self.cluster_stats:
try:
LOG.debug("Updating cluster stats info.")
results = self._issue_api_request(
'system', api_version='2.1')['data']
if 'uuid' not in results:
LOG.error(_LE(
'Failed to get updated stats from Datera Cluster.'))
backend_name = self.configuration.safe_get(
'volume_backend_name')
stats = {
'volume_backend_name': backend_name or 'Datera',
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': (
int(results['total_capacity']) / units.Gi),
'free_capacity_gb': (
int(results['available_capacity']) / units.Gi),
'reserved_percentage': 0,
'QoS_support': True,
}
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error(_LE('Failed to get updated stats from Datera '
'cluster.'))
return self.cluster_stats
# =======
# = QoS =
# =======
def _update_qos_2_1(self, resource, policies, tenant):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies,
api_version='2.1', tenant=tenant)

View File

@ -0,0 +1,211 @@
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
import six
import time
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LI, _LE
LOG = logging.getLogger(__name__)
OS_PREFIX = "OS-"
UNMANAGE_PREFIX = "UNMANAGED-"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12}")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
URL_TEMPLATES = {
'ai': lambda: 'app_instances',
'ai_inst': lambda: (URL_TEMPLATES['ai']() + '/{}'),
'si': lambda: (URL_TEMPLATES['ai_inst']() + '/storage_instances'),
'si_inst': lambda storage_name: (
(URL_TEMPLATES['si']() + '/{}').format(
'{}', storage_name)),
'vol': lambda storage_name: (
(URL_TEMPLATES['si_inst'](storage_name) + '/volumes')),
'vol_inst': lambda storage_name, volume_name: (
(URL_TEMPLATES['vol'](storage_name) + '/{}').format(
'{}', volume_name)),
'at': lambda: 'app_templates/{}'}
DEFAULT_SI_SLEEP = 10
DEFAULT_SNAP_SLEEP = 5
INITIATOR_GROUP_PREFIX = "IG-"
API_VERSIONS = ["2", "2.1"]
API_TIMEOUT = 20
###############
# METADATA KEYS
###############
M_TYPE = 'cinder_volume_type'
M_CALL = 'cinder_calls'
M_CLONE = 'cinder_clone_from'
M_MANAGED = 'cinder_managed'
M_KEYS = [M_TYPE, M_CALL, M_CLONE, M_MANAGED]
def _get_name(name):
return "".join((OS_PREFIX, name))
def _get_unmanaged(name):
return "".join((UNMANAGE_PREFIX, name))
def _authenticated(func):
"""Ensure the driver is authenticated to make a request.
In do_setup() we fetch an auth token and store it. If that expires when
we do API request, we'll fetch a new one.
"""
@functools.wraps(func)
def func_wrapper(driver, *args, **kwargs):
try:
return func(driver, *args, **kwargs)
except exception.NotAuthorized:
# Prevent recursion loop. After the driver arg is the
# resource_type arg from _issue_api_request(). If attempt to
# login failed, we should just give up.
if args[0] == 'login':
raise
# Token might've expired, get a new one, try again.
driver.login()
return func(driver, *args, **kwargs)
return func_wrapper
def _api_lookup(func):
"""Perform a dynamic API implementation lookup for a call
Naming convention follows this pattern:
# original_func(args) --> _original_func_X_?Y?(args)
# where X and Y are the major and minor versions of the latest
# supported API version
# From the Datera box we've determined that it supports API
# versions ['2', '2.1']
# This is the original function call
@_api_lookup
def original_func(arg1, arg2):
print("I'm a shim, this won't get executed!")
pass
# This is the function that is actually called after determining
# the correct API version to use
def _original_func_2_1(arg1, arg2):
some_version_2_1_implementation_here()
# This is the function that would be called if the previous function
# did not exist:
def _original_func_2(arg1, arg2):
some_version_2_implementation_here()
# This function would NOT be called, because the connected Datera box
# does not support the 1.5 version of the API
def _original_func_1_5(arg1, arg2):
some_version_1_5_implementation_here()
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = args[0]
api_versions = _get_supported_api_versions(obj)
api_version = None
index = -1
while True:
try:
api_version = api_versions[index]
except (IndexError, KeyError):
msg = _("No compatible API version found for this product: "
"api_versions -> %(api_version)s, %(func)s")
LOG.error(msg, api_version=api_version, func=func)
raise exception.DateraAPIException(msg % (api_version, func))
# Py27
try:
name = "_" + "_".join(
(func.func_name, api_version.replace(".", "_")))
# Py3+
except AttributeError:
name = "_" + "_".join(
(func.__name__, api_version.replace(".", "_")))
try:
LOG.info(_LI("Trying method: %s"), name)
return getattr(obj, name)(*args[1:], **kwargs)
except AttributeError as e:
# If we find the attribute name in the error message
# then we continue otherwise, raise to prevent masking
# errors
if name not in six.text_type(e):
raise
else:
LOG.info(e)
index -= 1
except exception.DateraAPIException as e:
if "UnsupportedVersionError" in six.text_type(e):
index -= 1
else:
raise
return wrapper
def _get_supported_api_versions(driver):
t = time.time()
if driver.api_cache and driver.api_timeout - t < API_TIMEOUT:
return driver.api_cache
results = []
host = driver.configuration.san_ip
port = driver.configuration.datera_api_port
client_cert = driver.configuration.driver_client_cert
client_cert_key = driver.configuration.driver_client_cert_key
cert_data = None
header = {'Content-Type': 'application/json; charset=utf-8',
'Datera-Driver': 'OpenStack-Cinder-{}'.format(driver.VERSION)}
protocol = 'http'
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
try:
url = '%s://%s:%s/api_versions' % (protocol, host, port)
resp = driver._request(url, "get", None, header, cert_data)
data = resp.json()
results = [elem.strip("v") for elem in data['api_versions']]
except (exception.DateraAPIException, KeyError):
# Fallback to pre-endpoint logic
for version in API_VERSIONS[0:-1]:
url = '%s://%s:%s/v%s' % (protocol, host, port, version)
resp = driver._request(url, "get", None, header, cert_data)
if ("api_req" in resp.json() or
str(resp.json().get("code")) == "99"):
results.append(version)
else:
LOG.error(_LE("No supported API versions available, "
"Please upgrade your Datera EDF software"))
return results

View File

@ -0,0 +1,739 @@
# Copyright 2016 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
import cinder.volume.drivers.datera.datera_api2 as api2
import cinder.volume.drivers.datera.datera_api21 as api21
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='2',
deprecated_for_removal=True,
help='Datera API version.'),
cfg.IntOpt('datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1"),
cfg.StrOpt('datera_tenant_id',
default=None,
help="If set to 'Map' --> OpenStack project ID will be mapped "
"implicitly to Datera tenant ID\n"
"If set to 'None' --> Datera tenant ID will not be used "
"during volume provisioning\n"
"If set to anything else --> Datera tenant ID will be the "
"provided value")
]
CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts)
@interface.volumedriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi):
"""The OpenStack Datera Driver
Version history:
1.0 - Initial driver
1.1 - Look for lun-0 instead of lun-1.
2.0 - Update For Datera API v2
2.1 - Multipath, ACL and reorg
2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
2.3 - Templates, Tenants, Snapshot Polling,
2.1 Api Version Support, Restructure
"""
VERSION = '2.3'
CI_WIKI_NAME = "datera-ci"
HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)}
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.username = self.configuration.san_login
self.password = self.configuration.san_password
self.cluster_stats = {}
self.datera_api_token = None
self.interval = self.configuration.datera_503_interval
self.retry_attempts = (self.configuration.datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = self.configuration.datera_debug
self.datera_api_versions = []
if self.datera_debug:
utils.setup_tracing(['method'])
self.tenant_id = self.configuration.datera_tenant_id
if self.tenant_id and self.tenant_id.lower() == 'none':
self.tenant_id = None
self.api_check = time.time()
self.api_cache = []
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self.login()
self._create_tenant()
# =================
# =================
# = Create Volume =
# =================
@datc._api_lookup
def create_volume(self, volume):
"""Create a logical volume."""
pass
# =================
# = Extend Volume =
# =================
@datc._api_lookup
def extend_volume(self, volume, new_size):
pass
# =================
# =================
# = Cloned Volume =
# =================
@datc._api_lookup
def create_cloned_volume(self, volume, src_vref):
pass
# =================
# = Delete Volume =
# =================
@datc._api_lookup
def delete_volume(self, volume):
pass
# =================
# = Ensure Export =
# =================
@datc._api_lookup
def ensure_export(self, context, volume, connector):
"""Gets the associated account, retrieves CHAP info and updates."""
# =========================
# = Initialize Connection =
# =========================
@datc._api_lookup
def initialize_connection(self, volume, connector):
pass
# =================
# = Create Export =
# =================
@datc._api_lookup
def create_export(self, context, volume, connector):
pass
# =================
# = Detach Volume =
# =================
@datc._api_lookup
def detach_volume(self, context, volume, attachment=None):
pass
# ===================
# = Create Snapshot =
# ===================
@datc._api_lookup
def create_snapshot(self, snapshot):
pass
# ===================
# = Delete Snapshot =
# ===================
@datc._api_lookup
def delete_snapshot(self, snapshot):
pass
# ========================
# = Volume From Snapshot =
# ========================
@datc._api_lookup
def create_volume_from_snapshot(self, volume, snapshot):
pass
# ==========
# = Manage =
# ==========
@datc._api_lookup
def manage_existing(self, volume, existing_ref):
"""Manage an existing volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
(existing_ref['source-name'] ==
tenant:app_inst_name:storage_inst_name:vol_name)
if using Datera 2.1 API
or
(existing_ref['source-name'] ==
app_inst_name:storage_inst_name:vol_name)
if using 2.0 API
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
pass
# ===================
# = Manage Get Size =
# ===================
@datc._api_lookup
def manage_existing_get_size(self, volume, existing_ref):
"""Get the size of an unmanaged volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume on the Datera backend
"""
pass
# =========================
# = Get Manageable Volume =
# =========================
@datc._api_lookup
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Returns a list of dictionaries, each specifying a volume in the host,
with the following keys:
- reference (dictionary): The reference for a volume, which can be
passed to "manage_existing".
- size (int): The size of the volume according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this volume is safe to
manage according to the storage backend. For example, is the volume
in use or invalid for any reason.
- reason_not_safe (string): If safe_to_manage is False, the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the user
:param cinder_volumes: A list of volumes in this host that Cinder
currently manages, used to determine if
a volume is manageable or not.
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys are
'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
"""
pass
# ============
# = Unmanage =
# ============
@datc._api_lookup
def unmanage(self, volume):
"""Unmanage a currently managed volume in Cinder
:param volume: Cinder volume to unmanage
"""
pass
# ================
# = Volume Stats =
# ================
@datc._api_lookup
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
pass
# =========
# = Login =
# =========
@datc._api_lookup
def login(self):
pass
# =======
# = QoS =
# =======
def _update_qos(self, resource, policies):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies,
api_version='2')
def _get_lunid(self):
return 0
# ============================
# = Volume-Types/Extra-Specs =
# ============================
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: DF --> Datera Fabric
"""
properties = {}
if self.configuration.get('datera_debug_replica_count_override'):
replica_count = 1
else:
replica_count = 3
self._set_property(
properties,
"DF:replica_count",
"Datera Volume Replica Count",
_("Specifies number of replicas for each volume. Can only be "
"increased once volume is created"),
"integer",
minimum=1,
default=replica_count)
self._set_property(
properties,
"DF:acl_allow_all",
"Datera ACL Allow All",
_("True to set acl 'allow_all' on volumes created. Cannot be "
"changed on volume once set"),
"boolean",
default=False)
self._set_property(
properties,
"DF:ip_pool",
"Datera IP Pool",
_("Specifies IP pool to use for volume"),
"string",
default="default")
self._set_property(
properties,
"DF:template",
"Datera Template",
_("Specifies Template to use for volume provisioning"),
"string",
default="")
# ###### QoS Settings ###### #
self._set_property(
properties,
"DF:read_bandwidth_max",
"Datera QoS Max Bandwidth Read",
_("Max read bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:default_storage_name",
"Datera Default Storage Instance Name",
_("The name to use for storage instances created"),
"string",
default="storage-1")
self._set_property(
properties,
"DF:default_volume_name",
"Datera Default Volume Name",
_("The name to use for volumes created"),
"string",
default="volume-1")
self._set_property(
properties,
"DF:write_bandwidth_max",
"Datera QoS Max Bandwidth Write",
_("Max write bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_bandwidth_max",
"Datera QoS Max Bandwidth Total",
_("Max total bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:read_iops_max",
"Datera QoS Max iops Read",
_("Max read iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:write_iops_max",
"Datera QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_iops_max",
"Datera QoS Max IOPS Total",
_("Max total iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
# ###### End QoS Settings ###### #
return properties, 'DF'
def _get_volume_type_obj(self, resource):
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
volume_type = None
return volume_type
def _get_policies_for_resource(self, resource):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
volume_type = self._get_volume_type_obj(resource)
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs')
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in self._init_vendor_properties()[0].items()}
if volume_type:
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
policies.update(qos_kvs)
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
# ================
# = API Requests =
# ================
def _request(self, connection_string, method, payload, header, cert_data):
LOG.debug("Endpoint for Datera API call: %s", connection_string)
try:
response = getattr(requests, method)(connection_string,
data=payload, headers=header,
verify=False, cert=cert_data)
return response
except requests.exceptions.RequestException as ex:
msg = _(
'Failed to make a request to Datera cluster endpoint due '
'to the following reason: %s') % six.text_type(
ex.message)
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _raise_response(self, response):
msg = _('Request to Datera cluster returned bad status:'
' %(status)s | %(reason)s') % {
'status': response.status_code,
'reason': response.reason}
LOG.error(msg)
raise exception.DateraAPIException(msg)
def _handle_bad_status(self,
response,
connection_string,
method,
payload,
header,
cert_data,
sensitive=False,
conflict_ok=False):
if (response.status_code == 400 and
connection_string.endswith("api_versions")):
# Raise the exception, but don't log any error. We'll just fall
# back to the old style of determining API version. We make this
# request a lot, so logging it is just noise
raise exception.DateraAPIException
if not sensitive:
LOG.debug(("Datera Response URL: %s\n"
"Datera Response Payload: %s\n"
"Response Object: %s\n"),
response.url,
payload,
vars(response))
if response.status_code == 404:
raise exception.NotFound(response.json()['message'])
elif response.status_code in [403, 401]:
raise exception.NotAuthorized()
elif response.status_code == 409 and conflict_ok:
# Don't raise, because we're expecting a conflict
pass
elif response.status_code == 503:
current_retry = 0
while current_retry <= self.retry_attempts:
LOG.debug("Datera 503 response, trying request again")
eventlet.sleep(self.interval)
resp = self._request(connection_string,
method,
payload,
header,
cert_data)
if resp.ok:
return response.json()
elif resp.status_code != 503:
self._raise_response(resp)
else:
self._raise_response(response)
@datc._authenticated
def _issue_api_request(self, resource_url, method='get', body=None,
sensitive=False, conflict_ok=False,
api_version='2', tenant=None):
"""All API requests to Datera cluster go through this method.
:param resource_url: the url of the resource
:param method: the request verb
:param body: a dict with options for the action_type
:param sensitive: Bool, whether request should be obscured from logs
:param conflict_ok: Bool, True to suppress ConflictError exceptions
during this request
:param api_version: The Datera api version for the request
:param tenant: The tenant header value for the request (only applicable
to 2.1 product versions and later)
:returns: a dict of the response from the Datera cluster
"""
host = self.configuration.san_ip
port = self.configuration.datera_api_port
api_token = self.datera_api_token
payload = json.dumps(body, ensure_ascii=False)
payload.encode('utf-8')
header = {'Content-Type': 'application/json; charset=utf-8'}
header.update(self.HEADER_DATA)
protocol = 'http'
if self.configuration.driver_use_ssl:
protocol = 'https'
if api_token:
header['Auth-Token'] = api_token
if tenant == "all":
header['tenant'] = tenant
elif tenant and '/root' not in tenant:
header['tenant'] = "".join(("/root/", tenant))
elif tenant and '/root' in tenant:
header['tenant'] = tenant
elif self.tenant_id and self.tenant_id.lower() != "map":
header['tenant'] = self.tenant_id
client_cert = self.configuration.driver_client_cert
client_cert_key = self.configuration.driver_client_cert_key
cert_data = None
if client_cert:
protocol = 'https'
cert_data = (client_cert, client_cert_key)
connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
api_version, resource_url)
response = self._request(connection_string,
method,
payload,
header,
cert_data)
data = response.json()
if not response.ok:
self._handle_bad_status(response,
connection_string,
method,
payload,
header,
cert_data,
conflict_ok=conflict_ok)
return data

View File

@ -163,6 +163,8 @@ MAPPING = {
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver', 'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver': 'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver', 'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
} }

View File

@ -0,0 +1,15 @@
---
features:
- Added Datera EDF API 2.1 support.
- Added Datera Multi-Tenancy Support.
- Added Datera Template Support.
- Broke Datera driver up into modules.
upgrade:
- Datera driver location has changed from cinder.volume.drivers
.datera.DateraDriver to cinder.volume.drivers.datera.datera_iscsi
.DateraDriver.
deprecations:
- Deprecated datera_api_version option.
- Removed datera_acl_allow_all option.
- Removed datera_num_replicas option.