Fibrechannel and iSCSI for Violin Memory 6000 Series Arrays
This patch adds both Fibrechannel and iSCSI driver support for Violin Memory 6000 Series All-Flash Arrays. Certification results posted at: https://bugs.launchpad.net/cinder/+bug/1399911 Change-Id: I9f62fe79e892cff01abdb02dbd95be8e432f8ab7 Implements: blueprint violinmemory-v6000-storage-drivers
This commit is contained in:
parent
d1b89778f9
commit
4a679cc7f1
@ -912,3 +912,24 @@ class ISCSITargetAttachFailed(CinderException):
|
||||
# X-IO driver exception.
|
||||
class XIODriverException(VolumeDriverException):
|
||||
message = _("X-IO Volume Driver exception!")
|
||||
|
||||
|
||||
# Violin Memory drivers
|
||||
class ViolinInvalidBackendConfig(CinderException):
|
||||
message = _("Volume backend config is invalid: %(reason)s")
|
||||
|
||||
|
||||
class ViolinRequestRetryTimeout(CinderException):
|
||||
message = _("Backend service retry timeout hit: %(timeout)s sec")
|
||||
|
||||
|
||||
class ViolinBackendErr(CinderException):
|
||||
message = _("Backend reports: %(message)s")
|
||||
|
||||
|
||||
class ViolinBackendErrExists(CinderException):
|
||||
message = _("Backend reports: item already exists")
|
||||
|
||||
|
||||
class ViolinBackendErrNotFound(CinderException):
|
||||
message = _("Backend reports: item not found")
|
||||
|
46
cinder/tests/fake_vmem_xgtools_client.py
Normal file
46
cinder/tests/fake_vmem_xgtools_client.py
Normal file
@ -0,0 +1,46 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Fake VMEM XG-Tools client for testing drivers. Inspired by
|
||||
cinder/tests/fake_hp_3par_client.py.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
import mock
|
||||
|
||||
vmemclient = mock.Mock()
|
||||
vmemclient.__version__ = "unknown"
|
||||
|
||||
sys.modules['vxg'] = vmemclient
|
||||
|
||||
mock_client_conf = [
|
||||
'basic',
|
||||
'basic.login',
|
||||
'basic.get_node_values',
|
||||
'basic.save_config',
|
||||
'lun',
|
||||
'lun.export_lun',
|
||||
'lun.unexport_lun',
|
||||
'snapshot',
|
||||
'snapshot.export_lun_snapshot',
|
||||
'snapshot.unexport_lun_snapshot',
|
||||
'iscsi',
|
||||
'iscsi.bind_ip_to_target',
|
||||
'iscsi.create_iscsi_target',
|
||||
'iscsi.delete_iscsi_target',
|
||||
'igroup',
|
||||
]
|
562
cinder/tests/test_v6000_common.py
Normal file
562
cinder/tests/test_v6000_common.py
Normal file
@ -0,0 +1,562 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Violin Memory 6000 Series All-Flash Array Common Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests import fake_vmem_xgtools_client as vxg
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.violin import v6000_common
|
||||
|
||||
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
|
||||
VOLUME = {
|
||||
"name": "volume-" + VOLUME_ID,
|
||||
"id": VOLUME_ID,
|
||||
"display_name": "fake_volume",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
|
||||
SNAPSHOT = {
|
||||
"name": "snapshot-" + SNAPSHOT_ID,
|
||||
"id": SNAPSHOT_ID,
|
||||
"volume_id": VOLUME_ID,
|
||||
"volume_name": "volume-" + VOLUME_ID,
|
||||
"volume_size": 2,
|
||||
"display_name": "fake_snapshot",
|
||||
"volume": VOLUME,
|
||||
}
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
|
||||
CONNECTOR = {
|
||||
"initiator": INITIATOR_IQN,
|
||||
"host": "irrelevant"
|
||||
}
|
||||
|
||||
|
||||
class V6000CommonTestCase(test.TestCase):
|
||||
"""Test cases for VMEM V6000 driver common class."""
|
||||
def setUp(self):
|
||||
super(V6000CommonTestCase, self).setUp()
|
||||
self.conf = self.setup_configuration()
|
||||
self.driver = v6000_common.V6000Common(self.conf)
|
||||
self.driver.container = 'myContainer'
|
||||
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
|
||||
self.stats = {}
|
||||
|
||||
def tearDown(self):
|
||||
super(V6000CommonTestCase, self).tearDown()
|
||||
|
||||
def setup_configuration(self):
|
||||
config = mock.Mock(spec=conf.Configuration)
|
||||
config.volume_backend_name = 'v6000_common'
|
||||
config.san_ip = '1.1.1.1'
|
||||
config.san_login = 'admin'
|
||||
config.san_password = ''
|
||||
config.san_thin_provision = False
|
||||
config.san_is_local = False
|
||||
config.gateway_mga = '2.2.2.2'
|
||||
config.gateway_mgb = '3.3.3.3'
|
||||
config.use_igroups = False
|
||||
config.request_timeout = 300
|
||||
config.container = 'myContainer'
|
||||
return config
|
||||
|
||||
@mock.patch('vxg.open')
|
||||
def setup_mock_client(self, _m_client, m_conf=None):
|
||||
"""Create a fake backend communication factory.
|
||||
|
||||
The xg-tools creates a VShare connection object (for V6000
|
||||
devices) and returns it for use on a call to vxg.open().
|
||||
"""
|
||||
# configure the vshare object mock with defaults
|
||||
_m_vshare = mock.Mock(name='VShare',
|
||||
version='1.1.1',
|
||||
spec=vxg.mock_client_conf)
|
||||
|
||||
# if m_conf, clobber the defaults with it
|
||||
if m_conf:
|
||||
_m_vshare.configure_mock(**m_conf)
|
||||
|
||||
# set calls to vxg.open() to return this mocked vshare object
|
||||
_m_client.return_value = _m_vshare
|
||||
|
||||
return _m_client
|
||||
|
||||
def setup_mock_vshare(self, m_conf=None):
|
||||
"""Create a fake VShare communication object."""
|
||||
_m_vshare = mock.Mock(name='VShare',
|
||||
version='1.1.1',
|
||||
spec=vxg.mock_client_conf)
|
||||
|
||||
if m_conf:
|
||||
_m_vshare.configure_mock(**m_conf)
|
||||
|
||||
return _m_vshare
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
"""No setup errors are found."""
|
||||
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn_thresholds = {bn1: 0, bn2: 100}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': bn_thresholds,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
|
||||
|
||||
result = self.driver.check_for_setup_error()
|
||||
|
||||
self.driver._is_supported_vmos_version.assert_called_with(
|
||||
self.driver.vip.version)
|
||||
self.driver.vip.basic.get_node_values.assert_called_with(
|
||||
[bn1, bn2])
|
||||
self.assertEqual(None, result)
|
||||
|
||||
def test_check_for_setup_error_no_container(self):
|
||||
"""No container was configured."""
|
||||
self.driver.vip = self.setup_mock_vshare()
|
||||
self.driver.container = ''
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
def test_check_for_setup_error_invalid_usedspace_threshold(self):
|
||||
"""The array's usedspace threshold was altered (not supported)."""
|
||||
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn_thresholds = {bn1: 99, bn2: 100}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': bn_thresholds,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
|
||||
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
def test_check_for_setup_error_invalid_provisionedspace_threshold(self):
|
||||
"""The array's provisioned threshold was altered (not supported)."""
|
||||
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
|
||||
"/threshold_hard_val" % self.driver.container)
|
||||
bn_thresholds = {bn1: 0, bn2: 99}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': bn_thresholds,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
|
||||
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
def test_create_lun(self):
|
||||
"""Lun is successfully created."""
|
||||
response = {'code': 0, 'message': 'LUN create: success!'}
|
||||
|
||||
conf = {
|
||||
'lun.create_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._create_lun(VOLUME)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vip.lun.create_lun, 'LUN create: success!',
|
||||
self.driver.container, VOLUME['id'], VOLUME['size'], 1, "0",
|
||||
"0", "w", 1, 512, False, False, None)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_create_lun_lun_already_exists(self):
|
||||
"""Array returns error that the lun already exists."""
|
||||
response = {'code': 14005,
|
||||
'message': 'LUN with name ... already exists'}
|
||||
|
||||
conf = {
|
||||
'lun.create_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_client(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(
|
||||
side_effect=exception.ViolinBackendErrExists(
|
||||
response['message']))
|
||||
|
||||
self.assertTrue(self.driver._create_lun(VOLUME) is None)
|
||||
|
||||
def test_create_lun_create_fails_with_exception(self):
|
||||
"""Array returns a out of space error."""
|
||||
response = {'code': 512, 'message': 'Not enough space available'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
conf = {
|
||||
'lun.create_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
|
||||
self.assertRaises(failure, self.driver._create_lun, VOLUME)
|
||||
|
||||
def test_delete_lun(self):
|
||||
"""Lun is deleted successfully."""
|
||||
response = {'code': 0, 'message': 'lun deletion started'}
|
||||
success_msgs = ['lun deletion started', '']
|
||||
|
||||
conf = {
|
||||
'lun.delete_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._delete_lun(VOLUME)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vip.lun.bulk_delete_luns,
|
||||
success_msgs, self.driver.container, VOLUME['id'])
|
||||
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_lun_empty_response_message(self):
|
||||
"""Array bug where delete action returns no message."""
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
conf = {
|
||||
'lun.delete_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
|
||||
|
||||
def test_delete_lun_lun_already_deleted(self):
|
||||
"""Array fails to delete a lun that doesn't exist."""
|
||||
response = {'code': 14005, 'message': 'LUN ... does not exist.'}
|
||||
|
||||
conf = {
|
||||
'lun.delete_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(
|
||||
side_effect=exception.ViolinBackendErrNotFound(
|
||||
response['message']))
|
||||
|
||||
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
|
||||
|
||||
def test_delete_lun_delete_fails_with_exception(self):
|
||||
"""Array returns a generic error."""
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
conf = {
|
||||
'lun.delete_lun.return_value': response
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
|
||||
self.assertRaises(failure, self.driver._delete_lun, VOLUME)
|
||||
|
||||
def test_extend_lun(self):
|
||||
"""Volume extend completes successfully."""
|
||||
new_volume_size = 10
|
||||
response = {'code': 0, 'message': 'Success '}
|
||||
|
||||
conf = {
|
||||
'lun.resize_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._extend_lun(VOLUME, new_volume_size)
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vip.lun.resize_lun,
|
||||
'Success', self.driver.container,
|
||||
VOLUME['id'], new_volume_size)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_extend_lun_new_size_is_too_small(self):
|
||||
"""Volume extend fails when new size would shrink the volume."""
|
||||
new_volume_size = 0
|
||||
response = {'code': 14036, 'message': 'Failure'}
|
||||
|
||||
conf = {
|
||||
'lun.resize_lun.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(
|
||||
side_effect=exception.ViolinBackendErr(message='fail'))
|
||||
|
||||
self.assertRaises(exception.ViolinBackendErr,
|
||||
self.driver._extend_lun, VOLUME, new_volume_size)
|
||||
|
||||
def test_create_lun_snapshot(self):
|
||||
"""Snapshot creation completes successfully."""
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
success_msg = 'Snapshot create: success!'
|
||||
|
||||
conf = {
|
||||
'snapshot.create_lun_snapshot.return_value': response
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._create_lun_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vip.snapshot.create_lun_snapshot, success_msg,
|
||||
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_lun_snapshot(self):
|
||||
"""Snapshot deletion completes successfully."""
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
success_msg = 'Snapshot delete: success!'
|
||||
|
||||
conf = {
|
||||
'snapshot.delete_lun_snapshot.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._delete_lun_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver._send_cmd.assert_called_with(
|
||||
self.driver.vip.snapshot.delete_lun_snapshot, success_msg,
|
||||
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_get_lun_id(self):
|
||||
bn = "/vshare/config/export/container/%s/lun/%s/target/**" \
|
||||
% (self.conf.container, VOLUME['id'])
|
||||
response = {("/vshare/config/export/container/%s/lun"
|
||||
"/%s/target/hba-a1/initiator/openstack/lun_id"
|
||||
% (self.conf.container, VOLUME['id'])): 1}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_lun_id(VOLUME['id'])
|
||||
|
||||
self.driver.vip.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertEqual(1, result)
|
||||
|
||||
def test_get_lun_id_with_no_lun_config(self):
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertRaises(exception.ViolinBackendErrNotFound,
|
||||
self.driver._get_lun_id, VOLUME['id'])
|
||||
|
||||
def test_get_snapshot_id(self):
|
||||
bn = ("/vshare/config/export/snapshot/container/%s/lun/%s/snap/%s"
|
||||
"/target/**") % (self.conf.container, VOLUME['id'],
|
||||
SNAPSHOT['id'])
|
||||
response = {("/vshare/config/export/snapshot/container/%s/lun"
|
||||
"/%s/snap/%s/target/hba-a1/initiator/openstack/lun_id"
|
||||
% (self.conf.container, VOLUME['id'],
|
||||
SNAPSHOT['id'])): 1}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_snapshot_id(VOLUME['id'], SNAPSHOT['id'])
|
||||
|
||||
self.driver.vip.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertEqual(1, result)
|
||||
|
||||
def test_get_snapshot_id_with_no_lun_config(self):
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertRaises(exception.ViolinBackendErrNotFound,
|
||||
self.driver._get_snapshot_id,
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'])
|
||||
|
||||
def test_send_cmd(self):
|
||||
"""Command callback completes successfully."""
|
||||
success_msg = 'success'
|
||||
request_args = ['arg1', 'arg2', 'arg3']
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
|
||||
request_func = mock.Mock(return_value=response)
|
||||
self.driver._fatal_error_code = mock.Mock(return_value=None)
|
||||
|
||||
result = self.driver._send_cmd(request_func, success_msg, request_args)
|
||||
|
||||
self.driver._fatal_error_code.assert_called_with(response)
|
||||
self.assertEqual(response, result)
|
||||
|
||||
def test_send_cmd_request_timed_out(self):
|
||||
"""The callback retry timeout hits immediately."""
|
||||
success_msg = 'success'
|
||||
request_args = ['arg1', 'arg2', 'arg3']
|
||||
self.conf.request_timeout = 0
|
||||
|
||||
request_func = mock.Mock()
|
||||
|
||||
self.assertRaises(exception.ViolinRequestRetryTimeout,
|
||||
self.driver._send_cmd,
|
||||
request_func, success_msg, request_args)
|
||||
|
||||
def test_send_cmd_response_has_no_message(self):
|
||||
"""The callback returns no message on the first call."""
|
||||
success_msg = 'success'
|
||||
request_args = ['arg1', 'arg2', 'arg3']
|
||||
response1 = {'code': 0, 'message': None}
|
||||
response2 = {'code': 0, 'message': 'success'}
|
||||
|
||||
request_func = mock.Mock(side_effect=[response1, response2])
|
||||
self.driver._fatal_error_code = mock.Mock(return_value=None)
|
||||
|
||||
self.assertEqual(response2, self.driver._send_cmd
|
||||
(request_func, success_msg, request_args))
|
||||
|
||||
def test_send_cmd_response_has_fatal_error(self):
|
||||
"""The callback response contains a fatal error code."""
|
||||
success_msg = 'success'
|
||||
request_args = ['arg1', 'arg2', 'arg3']
|
||||
response = {'code': 14000, 'message': 'try again later.'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
request_func = mock.Mock(return_value=response)
|
||||
self.driver._fatal_error_code = mock.Mock(
|
||||
side_effect=failure(message='fail'))
|
||||
self.assertRaises(failure, self.driver._send_cmd,
|
||||
request_func, success_msg, request_args)
|
||||
|
||||
def test_get_igroup(self):
|
||||
"""The igroup is verified and already exists."""
|
||||
bn = '/vshare/config/igroup/%s' % CONNECTOR['host']
|
||||
response = {bn: CONNECTOR['host']}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_igroup(VOLUME, CONNECTOR)
|
||||
|
||||
self.driver.vip.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertEqual(CONNECTOR['host'], result)
|
||||
|
||||
def test_get_igroup_with_new_name(self):
|
||||
"""The igroup is verified but must be created on the backend."""
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertEqual(CONNECTOR['host'],
|
||||
self.driver._get_igroup(VOLUME, CONNECTOR))
|
||||
|
||||
def test_wait_for_export_config(self):
|
||||
"""Queries to cluster nodes verify export config."""
|
||||
bn = "/vshare/config/export/container/myContainer/lun/%s" \
|
||||
% VOLUME['id']
|
||||
response = {'/vshare/config/export/container/myContainer/lun/vol-01':
|
||||
VOLUME['id']}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.mga = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver.mgb = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._wait_for_export_config(VOLUME['id'], state=True)
|
||||
|
||||
self.driver.mga.basic.get_node_values.assert_called_with(bn)
|
||||
self.driver.mgb.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_for_export_config_with_no_config(self):
|
||||
"""Queries to cluster nodes verify *no* export config."""
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.mga = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver.mgb = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertTrue(self.driver._wait_for_export_config(
|
||||
VOLUME['id'], state=False))
|
||||
|
||||
def test_is_supported_vmos_version(self):
|
||||
"""Currently supported VMOS version."""
|
||||
version = 'V6.3.1'
|
||||
self.assertTrue(self.driver._is_supported_vmos_version(version))
|
||||
|
||||
def test_is_supported_vmos_version_supported_future_version(self):
|
||||
"""Potential future supported VMOS version."""
|
||||
version = 'V6.3.7'
|
||||
self.assertTrue(self.driver._is_supported_vmos_version(version))
|
||||
|
||||
def test_is_supported_vmos_version_unsupported_past_version(self):
|
||||
"""Currently unsupported VMOS version."""
|
||||
version = 'G5.5.2'
|
||||
self.assertFalse(self.driver._is_supported_vmos_version(version))
|
||||
|
||||
def test_is_supported_vmos_version_unsupported_future_version(self):
|
||||
"""Future incompatible VMOS version."""
|
||||
version = 'V7.0.0'
|
||||
self.assertFalse(self.driver._is_supported_vmos_version(version))
|
||||
|
||||
def test_fatal_error_code(self):
|
||||
"""Return an exception for a valid fatal error code."""
|
||||
response = {'code': 14000, 'message': 'fail city'}
|
||||
self.assertRaises(exception.ViolinBackendErr,
|
||||
self.driver._fatal_error_code,
|
||||
response)
|
||||
|
||||
def test_fatal_error_code_non_fatal_error(self):
|
||||
"""Returns no exception for a non-fatal error code."""
|
||||
response = {'code': 1024, 'message': 'try again!'}
|
||||
self.assertEqual(None, self.driver._fatal_error_code(response))
|
585
cinder/tests/test_v6000_fcp.py
Normal file
585
cinder/tests/test_v6000_fcp.py
Normal file
@ -0,0 +1,585 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Violin Memory 6000 Series All-Flash Array Fibrechannel Driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
from oslo.utils import units
|
||||
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests import fake_vmem_xgtools_client as vxg
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.violin import v6000_common
|
||||
from cinder.volume.drivers.violin import v6000_fcp
|
||||
|
||||
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
|
||||
VOLUME = {
|
||||
"name": "volume-" + VOLUME_ID,
|
||||
"id": VOLUME_ID,
|
||||
"display_name": "fake_volume",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
|
||||
SNAPSHOT = {
|
||||
"name": "snapshot-" + SNAPSHOT_ID,
|
||||
"id": SNAPSHOT_ID,
|
||||
"volume_id": VOLUME_ID,
|
||||
"volume_name": "volume-" + VOLUME_ID,
|
||||
"volume_size": 2,
|
||||
"display_name": "fake_snapshot",
|
||||
"volume": VOLUME,
|
||||
}
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
|
||||
CONNECTOR = {
|
||||
"initiator": INITIATOR_IQN,
|
||||
"host": "irrelevant",
|
||||
'wwpns': [u'50014380186b3f65', u'50014380186b3f67'],
|
||||
}
|
||||
FC_TARGET_WWPNS = [
|
||||
'31000024ff45fb22', '21000024ff45fb23',
|
||||
'51000024ff45f1be', '41000024ff45f1bf'
|
||||
]
|
||||
FC_INITIATOR_WWPNS = [
|
||||
'50014380186b3f65', '50014380186b3f67'
|
||||
]
|
||||
FC_FABRIC_MAP = {
|
||||
'fabricA':
|
||||
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
|
||||
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
|
||||
'fabricB':
|
||||
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
|
||||
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
|
||||
}
|
||||
FC_INITIATOR_TARGET_MAP = {
|
||||
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
|
||||
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
|
||||
}
|
||||
|
||||
|
||||
class V6000FCPDriverTestCase(test.TestCase):
|
||||
"""Test cases for VMEM FCP driver."""
|
||||
def setUp(self):
|
||||
super(V6000FCPDriverTestCase, self).setUp()
|
||||
self.conf = self.setup_configuration()
|
||||
self.driver = v6000_fcp.V6000FCDriver(configuration=self.conf)
|
||||
self.driver.common.container = 'myContainer'
|
||||
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
|
||||
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
|
||||
self.stats = {}
|
||||
self.driver.set_initialized()
|
||||
|
||||
def tearDown(self):
|
||||
super(V6000FCPDriverTestCase, self).tearDown()
|
||||
|
||||
def setup_configuration(self):
|
||||
config = mock.Mock(spec=conf.Configuration)
|
||||
config.volume_backend_name = 'v6000_fcp'
|
||||
config.san_ip = '1.1.1.1'
|
||||
config.san_login = 'admin'
|
||||
config.san_password = ''
|
||||
config.san_thin_provision = False
|
||||
config.san_is_local = False
|
||||
config.gateway_mga = '2.2.2.2'
|
||||
config.gateway_mgb = '3.3.3.3'
|
||||
config.use_igroups = False
|
||||
config.request_timeout = 300
|
||||
config.container = 'myContainer'
|
||||
return config
|
||||
|
||||
def setup_mock_vshare(self, m_conf=None):
|
||||
"""Create a fake VShare communication object."""
|
||||
_m_vshare = mock.Mock(name='VShare',
|
||||
version='1.1.1',
|
||||
spec=vxg.mock_client_conf)
|
||||
|
||||
if m_conf:
|
||||
_m_vshare.configure_mock(**m_conf)
|
||||
|
||||
return _m_vshare
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error(self, m_setup_func):
|
||||
"""No setup errors are found."""
|
||||
result = self.driver.check_for_setup_error()
|
||||
m_setup_func.assert_called_with()
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
|
||||
"""No wwns were found during setup."""
|
||||
self.driver.gateway_fc_wwns = []
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
def test_create_volume(self):
|
||||
"""Volume created successfully."""
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume(VOLUME)
|
||||
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_volume(self):
|
||||
"""Volume deleted successfully."""
|
||||
self.driver.common._delete_lun = mock.Mock()
|
||||
|
||||
result = self.driver.delete_volume(VOLUME)
|
||||
|
||||
self.driver.common._delete_lun.assert_called_with(VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
"""Snapshot created successfully."""
|
||||
self.driver.common._create_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.create_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
"""Snapshot deleted successfully."""
|
||||
self.driver.common._delete_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.delete_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(context, 'get_admin_context')
|
||||
def test_create_volume_from_snapshot(self, m_context_func):
|
||||
"""Volume created from a snapshot successfully."""
|
||||
m_context_func.return_value = None
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
self.driver.copy_volume_data = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
|
||||
|
||||
m_context_func.assert_called_with()
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.driver.copy_volume_data.assert_called_with(None, SNAPSHOT, VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(context, 'get_admin_context')
|
||||
def test_create_cloned_volume(self, m_context_func):
|
||||
"""Volume clone created successfully."""
|
||||
m_context_func.return_value = None
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
self.driver.copy_volume_data = mock.Mock()
|
||||
|
||||
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
|
||||
|
||||
m_context_func.assert_called_with()
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.driver.copy_volume_data.assert_called_with(None, SRC_VOL, VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_initialize_connection(self):
|
||||
lun_id = 1
|
||||
igroup = None
|
||||
target_wwns = self.driver.gateway_fc_wwns
|
||||
init_targ_map = {}
|
||||
volume = mock.Mock(spec=models.Volume)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._export_lun = mock.Mock(return_value=lun_id)
|
||||
self.driver._build_initiator_target_map = mock.Mock(
|
||||
return_value=(target_wwns, init_targ_map))
|
||||
|
||||
props = self.driver.initialize_connection(volume, CONNECTOR)
|
||||
|
||||
self.driver._export_lun.assert_called_with(volume, CONNECTOR, igroup)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.driver._build_initiator_target_map.assert_called_with(
|
||||
CONNECTOR)
|
||||
self.assertEqual("fibre_channel", props['driver_volume_type'])
|
||||
self.assertEqual(True, props['data']['target_discovered'])
|
||||
self.assertEqual(target_wwns, props['data']['target_wwn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
|
||||
|
||||
def test_initialize_connection_with_snapshot_object(self):
|
||||
lun_id = 1
|
||||
igroup = None
|
||||
target_wwns = self.driver.gateway_fc_wwns
|
||||
init_targ_map = {}
|
||||
snapshot = mock.Mock(spec=models.Snapshot)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._export_snapshot = mock.Mock(return_value=lun_id)
|
||||
self.driver._build_initiator_target_map = mock.Mock(
|
||||
return_value=(target_wwns, init_targ_map))
|
||||
|
||||
props = self.driver.initialize_connection(snapshot, CONNECTOR)
|
||||
|
||||
self.driver._export_snapshot.assert_called_with(
|
||||
snapshot, CONNECTOR, igroup)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.driver._build_initiator_target_map.assert_called_with(
|
||||
CONNECTOR)
|
||||
self.assertEqual("fibre_channel", props['driver_volume_type'])
|
||||
self.assertEqual(True, props['data']['target_discovered'])
|
||||
self.assertEqual(target_wwns, props['data']['target_wwn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
|
||||
|
||||
def test_terminate_connection(self):
|
||||
target_wwns = self.driver.gateway_fc_wwns
|
||||
init_targ_map = {}
|
||||
volume = mock.Mock(spec=models.Volume)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._unexport_lun = mock.Mock()
|
||||
self.driver._is_initiator_connected_to_array = mock.Mock(
|
||||
return_value=False)
|
||||
self.driver._build_initiator_target_map = mock.Mock(
|
||||
return_value=(target_wwns, init_targ_map))
|
||||
|
||||
props = self.driver.terminate_connection(volume, CONNECTOR)
|
||||
|
||||
self.driver._unexport_lun.assert_called_with(volume)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.driver._is_initiator_connected_to_array.assert_called_with(
|
||||
CONNECTOR)
|
||||
self.driver._build_initiator_target_map.assert_called_with(
|
||||
CONNECTOR)
|
||||
self.assertEqual("fibre_channel", props['driver_volume_type'])
|
||||
self.assertEqual(target_wwns, props['data']['target_wwn'])
|
||||
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
|
||||
|
||||
def test_terminate_connection_snapshot_object(self):
|
||||
target_wwns = self.driver.gateway_fc_wwns
|
||||
init_targ_map = {}
|
||||
snapshot = mock.Mock(spec=models.Snapshot)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._unexport_snapshot = mock.Mock()
|
||||
self.driver._is_initiator_connected_to_array = mock.Mock(
|
||||
return_value=False)
|
||||
self.driver._build_initiator_target_map = mock.Mock(
|
||||
return_value=(target_wwns, init_targ_map))
|
||||
|
||||
props = self.driver.terminate_connection(snapshot, CONNECTOR)
|
||||
|
||||
self.assertEqual("fibre_channel", props['driver_volume_type'])
|
||||
self.assertEqual(target_wwns, props['data']['target_wwn'])
|
||||
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
self.driver._update_stats = mock.Mock()
|
||||
self.driver._update_stats()
|
||||
|
||||
result = self.driver.get_volume_stats(True)
|
||||
|
||||
self.driver._update_stats.assert_called_with()
|
||||
self.assertEqual(self.driver.stats, result)
|
||||
|
||||
def test_export_lun(self):
|
||||
lun_id = '1'
|
||||
igroup = 'test-igroup-1'
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._export_lun(VOLUME, CONNECTOR, igroup)
|
||||
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vip.lun.export_lun,
|
||||
self.driver.common._wait_for_export_config, '',
|
||||
[self.driver.common.container, VOLUME['id'], 'all',
|
||||
igroup, 'auto'], [VOLUME['id'], 'state=True'])
|
||||
self.driver.common._get_lun_id.assert_called_with(VOLUME['id'])
|
||||
self.assertEqual(lun_id, result)
|
||||
|
||||
def test_export_lun_fails_with_exception(self):
|
||||
lun_id = '1'
|
||||
igroup = 'test-igroup-1'
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
self.assertRaises(failure, self.driver._export_lun,
|
||||
VOLUME, CONNECTOR, igroup)
|
||||
|
||||
def test_unexport_lun(self):
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
|
||||
result = self.driver._unexport_lun(VOLUME)
|
||||
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vip.lun.unexport_lun,
|
||||
self.driver.common._wait_for_export_config, '',
|
||||
[self.driver.common.container, VOLUME['id'], 'all', 'all', 'auto'],
|
||||
[VOLUME['id'], 'state=False'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_unexport_lun_fails_with_exception(self):
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
|
||||
self.assertRaises(failure, self.driver._unexport_lun, VOLUME)
|
||||
|
||||
def test_export_snapshot(self):
|
||||
lun_id = '1'
|
||||
igroup = 'test-igroup-1'
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver.common._wait_for_export_config = mock.Mock()
|
||||
self.driver.common._get_snapshot_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._export_snapshot(SNAPSHOT, CONNECTOR, igroup)
|
||||
|
||||
self.driver.common._send_cmd.assert_called_with(
|
||||
self.driver.common.vip.snapshot.export_lun_snapshot, '',
|
||||
self.driver.common.container, SNAPSHOT['volume_id'],
|
||||
SNAPSHOT['id'], igroup, 'all', 'auto')
|
||||
self.driver.common._wait_for_export_config.assert_called_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=True)
|
||||
self.driver.common._get_snapshot_id.assert_called_once_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'])
|
||||
self.assertEqual(lun_id, result)
|
||||
|
||||
def test_unexport_snapshot(self):
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver.common._wait_for_export_config = mock.Mock()
|
||||
|
||||
result = self.driver._unexport_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._send_cmd.assert_called_with(
|
||||
self.driver.common.vip.snapshot.unexport_lun_snapshot, '',
|
||||
self.driver.common.container, SNAPSHOT['volume_id'],
|
||||
SNAPSHOT['id'], 'all', 'all', 'auto', False)
|
||||
self.driver.common._wait_for_export_config.assert_called_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=False)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_add_igroup_member(self):
|
||||
igroup = 'test-group-1'
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
wwpns = ['wwn.50:01:43:80:18:6b:3f:65', 'wwn.50:01:43:80:18:6b:3f:67']
|
||||
|
||||
conf = {
|
||||
'igroup.add_initiators.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
|
||||
return_value=wwpns)
|
||||
|
||||
result = self.driver._add_igroup_member(CONNECTOR, igroup)
|
||||
|
||||
self.driver._convert_wwns_openstack_to_vmem.assert_called_with(
|
||||
CONNECTOR['wwpns'])
|
||||
self.driver.common.vip.igroup.add_initiators.assert_called_with(
|
||||
igroup, wwpns)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_build_initiator_target_map(self):
|
||||
"""Successfully build a map when zoning is enabled."""
|
||||
expected_targ_wwns = FC_TARGET_WWPNS
|
||||
expected_init_targ_map = FC_INITIATOR_TARGET_MAP
|
||||
|
||||
self.driver.lookup_service = mock.Mock()
|
||||
self.driver.lookup_service.get_device_mapping_from_network.\
|
||||
return_value = FC_FABRIC_MAP
|
||||
|
||||
(targ_wwns, init_targ_map) = \
|
||||
self.driver._build_initiator_target_map(CONNECTOR)
|
||||
|
||||
self.driver.lookup_service.get_device_mapping_from_network.\
|
||||
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns)
|
||||
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
|
||||
self.assertEqual(expected_init_targ_map, init_targ_map)
|
||||
|
||||
def test_build_initiator_target_map_no_lookup_service(self):
|
||||
"""Successfully build a map when zoning is disabled."""
|
||||
expected_targ_wwns = FC_TARGET_WWPNS
|
||||
expected_init_targ_map = {
|
||||
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
|
||||
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
|
||||
}
|
||||
self.driver.lookup_service = None
|
||||
|
||||
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
|
||||
CONNECTOR)
|
||||
|
||||
self.assertEqual(expected_targ_wwns, targ_wwns)
|
||||
self.assertEqual(expected_init_targ_map, init_targ_map)
|
||||
|
||||
def test_is_initiator_connected_to_array(self):
|
||||
"""Successfully finds an initiator with remaining active session."""
|
||||
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
|
||||
'50:01:43:80:18:6b:3f:67']
|
||||
prefix = "/vshare/config/export/container"
|
||||
bn = "%s/%s/lun/**" % (prefix, self.driver.common.container)
|
||||
resp_binding0 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
|
||||
% (prefix, self.driver.common.container, VOLUME['id'],
|
||||
converted_wwpns[0])
|
||||
resp_binding1 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
|
||||
% (prefix, self.driver.common.container, VOLUME['id'],
|
||||
converted_wwpns[1])
|
||||
response = {
|
||||
resp_binding0: converted_wwpns[0],
|
||||
resp_binding1: converted_wwpns[1]
|
||||
}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
|
||||
return_value=converted_wwpns)
|
||||
|
||||
self.assertTrue(self.driver._is_initiator_connected_to_array(
|
||||
CONNECTOR))
|
||||
self.driver.common.vip.basic.get_node_values.assert_called_with(bn)
|
||||
|
||||
def test_is_initiator_connected_to_array_empty_response(self):
|
||||
"""Successfully finds no initiators with remaining active sessions."""
|
||||
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
|
||||
'50:01:43:80:18:6b:3f:67']
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
|
||||
return_value=converted_wwpns)
|
||||
|
||||
self.assertFalse(self.driver._is_initiator_connected_to_array(
|
||||
CONNECTOR))
|
||||
|
||||
def test_update_stats(self):
|
||||
backend_name = self.conf.volume_backend_name
|
||||
vendor_name = "Violin Memory, Inc."
|
||||
tot_bytes = 100 * units.Gi
|
||||
free_bytes = 50 * units.Gi
|
||||
bn0 = '/cluster/state/master_id'
|
||||
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
|
||||
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
|
||||
response1 = {bn0: '1'}
|
||||
response2 = {bn1: tot_bytes, bn2: free_bytes}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect': [response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._update_stats()
|
||||
|
||||
calls = [mock.call(bn0), mock.call([bn1, bn2])]
|
||||
self.driver.common.vip.basic.get_node_values.assert_has_calls(calls)
|
||||
self.assertEqual(100, self.driver.stats['total_capacity_gb'])
|
||||
self.assertEqual(50, self.driver.stats['free_capacity_gb'])
|
||||
self.assertEqual(backend_name,
|
||||
self.driver.stats['volume_backend_name'])
|
||||
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_update_stats_fails_data_query(self):
|
||||
backend_name = self.conf.volume_backend_name
|
||||
vendor_name = "Violin Memory, Inc."
|
||||
bn0 = '/cluster/state/master_id'
|
||||
response1 = {bn0: '1'}
|
||||
response2 = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect': [response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertTrue(self.driver._update_stats() is None)
|
||||
self.assertEqual(0, self.driver.stats['total_capacity_gb'])
|
||||
self.assertEqual(0, self.driver.stats['free_capacity_gb'])
|
||||
self.assertEqual(backend_name,
|
||||
self.driver.stats['volume_backend_name'])
|
||||
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
|
||||
|
||||
def test_get_active_fc_targets(self):
|
||||
bn0 = '/vshare/state/global/*'
|
||||
response0 = {'/vshare/state/global/1': 1,
|
||||
'/vshare/state/global/2': 2}
|
||||
bn1 = '/vshare/state/global/1/target/fc/**'
|
||||
response1 = {'/vshare/state/global/1/target/fc/hba-a1/wwn':
|
||||
'wwn.21:00:00:24:ff:45:fb:22'}
|
||||
bn2 = '/vshare/state/global/2/target/fc/**'
|
||||
response2 = {'/vshare/state/global/2/target/fc/hba-a1/wwn':
|
||||
'wwn.21:00:00:24:ff:45:e2:30'}
|
||||
wwpns = ['21000024ff45fb22', '21000024ff45e230']
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect':
|
||||
[response0, response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_active_fc_targets()
|
||||
|
||||
calls = [mock.call(bn0), mock.call(bn1), mock.call(bn2)]
|
||||
self.driver.common.vip.basic.get_node_values.assert_has_calls(
|
||||
calls, any_order=True)
|
||||
self.assertEqual(wwpns, result)
|
||||
|
||||
def test_convert_wwns_openstack_to_vmem(self):
|
||||
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
|
||||
openstack_wwns = ['50014380186b3f65']
|
||||
result = self.driver._convert_wwns_openstack_to_vmem(openstack_wwns)
|
||||
self.assertEqual(vmem_wwns, result)
|
||||
|
||||
def test_convert_wwns_vmem_to_openstack(self):
|
||||
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
|
||||
openstack_wwns = ['50014380186b3f65']
|
||||
result = self.driver._convert_wwns_vmem_to_openstack(vmem_wwns)
|
||||
self.assertEqual(openstack_wwns, result)
|
718
cinder/tests/test_v6000_iscsi.py
Normal file
718
cinder/tests/test_v6000_iscsi.py
Normal file
@ -0,0 +1,718 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Tests for Violin Memory 6000 Series All-Flash Array iSCSI driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
from oslo.utils import units
|
||||
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests import fake_vmem_xgtools_client as vxg
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.violin import v6000_common
|
||||
from cinder.volume.drivers.violin import v6000_iscsi
|
||||
|
||||
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
|
||||
VOLUME = {
|
||||
"name": "volume-" + VOLUME_ID,
|
||||
"id": VOLUME_ID,
|
||||
"display_name": "fake_volume",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
|
||||
SNAPSHOT = {
|
||||
"name": "snapshot-" + SNAPSHOT_ID,
|
||||
"id": SNAPSHOT_ID,
|
||||
"volume_id": VOLUME_ID,
|
||||
"volume_name": "volume-" + VOLUME_ID,
|
||||
"volume_size": 2,
|
||||
"display_name": "fake_snapshot",
|
||||
"volume": VOLUME,
|
||||
}
|
||||
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
|
||||
SRC_VOL = {
|
||||
"name": "volume-" + SRC_VOL_ID,
|
||||
"id": SRC_VOL_ID,
|
||||
"display_name": "fake_src_vol",
|
||||
"size": 2,
|
||||
"host": "irrelevant",
|
||||
"volume_type": None,
|
||||
"volume_type_id": None,
|
||||
}
|
||||
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
|
||||
CONNECTOR = {
|
||||
"initiator": INITIATOR_IQN,
|
||||
"host": "irrelevant"
|
||||
}
|
||||
|
||||
|
||||
class V6000ISCSIDriverTestCase(test.TestCase):
|
||||
"""Test cases for VMEM iSCSI driver."""
|
||||
def setUp(self):
|
||||
super(V6000ISCSIDriverTestCase, self).setUp()
|
||||
self.conf = self.setup_configuration()
|
||||
self.driver = v6000_iscsi.V6000ISCSIDriver(configuration=self.conf)
|
||||
self.driver.common.container = 'myContainer'
|
||||
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
|
||||
self.driver.gateway_iscsi_ip_addresses_mga = '1.2.3.4'
|
||||
self.driver.gateway_iscsi_ip_addresses_mgb = '1.2.3.4'
|
||||
self.driver.array_info = [{"node": 'hostname_mga',
|
||||
"addr": '1.2.3.4',
|
||||
"conn": self.driver.common.mga},
|
||||
{"node": 'hostname_mgb',
|
||||
"addr": '1.2.3.4',
|
||||
"conn": self.driver.common.mgb}]
|
||||
self.stats = {}
|
||||
self.driver.set_initialized()
|
||||
|
||||
def tearDown(self):
|
||||
super(V6000ISCSIDriverTestCase, self).tearDown()
|
||||
|
||||
def setup_configuration(self):
|
||||
config = mock.Mock(spec=conf.Configuration)
|
||||
config.volume_backend_name = 'v6000_iscsi'
|
||||
config.san_ip = '1.1.1.1'
|
||||
config.san_login = 'admin'
|
||||
config.san_password = ''
|
||||
config.san_thin_provision = False
|
||||
config.san_is_local = False
|
||||
config.gateway_mga = '2.2.2.2'
|
||||
config.gateway_mgb = '3.3.3.3'
|
||||
config.use_igroups = False
|
||||
config.request_timeout = 300
|
||||
config.container = 'myContainer'
|
||||
config.iscsi_port = 3260
|
||||
config.iscsi_target_prefix = 'iqn.2004-02.com.vmem:'
|
||||
return config
|
||||
|
||||
def setup_mock_vshare(self, m_conf=None):
|
||||
"""Create a fake VShare communication object."""
|
||||
_m_vshare = mock.Mock(name='VShare',
|
||||
version='1.1.1',
|
||||
spec=vxg.mock_client_conf)
|
||||
|
||||
if m_conf:
|
||||
_m_vshare.configure_mock(**m_conf)
|
||||
|
||||
return _m_vshare
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error(self, m_setup_func):
|
||||
bn = "/vshare/config/iscsi/enable"
|
||||
response = {bn: True}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver.check_for_setup_error()
|
||||
|
||||
m_setup_func.assert_called_with()
|
||||
self.driver.common.vip.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error_iscsi_is_disabled(self, m_setup_func):
|
||||
bn = "/vshare/config/iscsi/enable"
|
||||
response = {bn: False}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error_no_iscsi_ips_for_mga(self, m_setup_func):
|
||||
bn = "/vshare/config/iscsi/enable"
|
||||
response = {bn: True}
|
||||
self.driver.gateway_iscsi_ip_addresses_mga = ''
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
|
||||
def test_check_for_setup_error_no_iscsi_ips_for_mgb(self, m_setup_func):
|
||||
bn = "/vshare/config/iscsi/enable"
|
||||
response = {bn: True}
|
||||
self.driver.gateway_iscsi_ip_addresses_mgb = ''
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertRaises(exception.ViolinInvalidBackendConfig,
|
||||
self.driver.check_for_setup_error)
|
||||
|
||||
def test_create_volume(self):
|
||||
"""Volume created successfully."""
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume(VOLUME)
|
||||
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_volume(self):
|
||||
"""Volume deleted successfully."""
|
||||
self.driver.common._delete_lun = mock.Mock()
|
||||
|
||||
result = self.driver.delete_volume(VOLUME)
|
||||
|
||||
self.driver.common._delete_lun.assert_called_with(VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
"""Snapshot created successfully."""
|
||||
self.driver.common._create_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.create_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
"""Snapshot deleted successfully."""
|
||||
self.driver.common._delete_lun_snapshot = mock.Mock()
|
||||
|
||||
result = self.driver.delete_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(context, 'get_admin_context')
|
||||
def test_create_volume_from_snapshot(self, m_context_func):
|
||||
"""Volume created from a snapshot successfully."""
|
||||
m_context_func.return_value = None
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
self.driver.copy_volume_data = mock.Mock()
|
||||
|
||||
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
|
||||
|
||||
m_context_func.assert_called_with()
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.driver.copy_volume_data.assert_called_with(None, SNAPSHOT, VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
@mock.patch.object(context, 'get_admin_context')
|
||||
def test_create_cloned_volume(self, m_context_func):
|
||||
"""Volume clone created successfully."""
|
||||
m_context_func.return_value = None
|
||||
self.driver.common._create_lun = mock.Mock()
|
||||
self.driver.copy_volume_data = mock.Mock()
|
||||
|
||||
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
|
||||
|
||||
m_context_func.assert_called_with()
|
||||
self.driver.common._create_lun.assert_called_with(VOLUME)
|
||||
self.driver.copy_volume_data.assert_called_with(None, SRC_VOL, VOLUME)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_initialize_connection(self):
|
||||
lun_id = 1
|
||||
igroup = None
|
||||
tgt = self.driver.array_info[0]
|
||||
iqn = "%s%s:%s" % (self.conf.iscsi_target_prefix,
|
||||
tgt['node'], VOLUME['id'])
|
||||
volume = mock.MagicMock(spec=models.Volume)
|
||||
|
||||
def getitem(name):
|
||||
return VOLUME[name]
|
||||
|
||||
volume.__getitem__.side_effect = getitem
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver._create_iscsi_target = mock.Mock(return_value=tgt)
|
||||
self.driver._export_lun = mock.Mock(return_value=lun_id)
|
||||
|
||||
props = self.driver.initialize_connection(volume, CONNECTOR)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(volume['id'])
|
||||
self.driver._create_iscsi_target.assert_called_with(volume)
|
||||
self.driver._export_lun.assert_called_with(volume, CONNECTOR, igroup)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.assertEqual("1.2.3.4:3260", props['data']['target_portal'])
|
||||
self.assertEqual(iqn, props['data']['target_iqn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(volume['id'], props['data']['volume_id'])
|
||||
|
||||
def test_initialize_connection_with_snapshot_object(self):
|
||||
lun_id = 1
|
||||
igroup = None
|
||||
tgt = self.driver.array_info[0]
|
||||
iqn = "%s%s:%s" % (self.conf.iscsi_target_prefix,
|
||||
tgt['node'], SNAPSHOT['id'])
|
||||
snapshot = mock.MagicMock(spec=models.Snapshot)
|
||||
|
||||
def getitem(name):
|
||||
return SNAPSHOT[name]
|
||||
|
||||
snapshot.__getitem__.side_effect = getitem
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=SNAPSHOT['id'])
|
||||
self.driver._create_iscsi_target = mock.Mock(return_value=tgt)
|
||||
self.driver._export_snapshot = mock.Mock(return_value=lun_id)
|
||||
|
||||
props = self.driver.initialize_connection(snapshot, CONNECTOR)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(SNAPSHOT['id'])
|
||||
self.driver._create_iscsi_target.assert_called_with(snapshot)
|
||||
self.driver._export_snapshot.assert_called_with(snapshot, CONNECTOR,
|
||||
igroup)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.assertEqual("1.2.3.4:3260", props['data']['target_portal'])
|
||||
self.assertEqual(iqn, props['data']['target_iqn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(SNAPSHOT['id'], props['data']['volume_id'])
|
||||
|
||||
def test_initialize_connection_with_igroups_enabled(self):
|
||||
self.conf.use_igroups = True
|
||||
lun_id = 1
|
||||
igroup = 'test-igroup-1'
|
||||
tgt = self.driver.array_info[0]
|
||||
iqn = "%s%s:%s" % (self.conf.iscsi_target_prefix,
|
||||
tgt['node'], VOLUME['id'])
|
||||
volume = mock.MagicMock(spec=models.Volume)
|
||||
|
||||
def getitem(name):
|
||||
return VOLUME[name]
|
||||
|
||||
volume.__getitem__.side_effect = getitem
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._get_igroup = mock.Mock(return_value=igroup)
|
||||
self.driver._add_igroup_member = mock.Mock()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver._create_iscsi_target = mock.Mock(return_value=tgt)
|
||||
self.driver._export_lun = mock.Mock(return_value=lun_id)
|
||||
|
||||
props = self.driver.initialize_connection(volume, CONNECTOR)
|
||||
|
||||
self.driver.common._get_igroup.assert_called_with(volume, CONNECTOR)
|
||||
self.driver._add_igroup_member.assert_called_with(CONNECTOR, igroup)
|
||||
self.driver._get_short_name.assert_called_with(volume['id'])
|
||||
self.driver._create_iscsi_target.assert_called_with(volume)
|
||||
self.driver._export_lun.assert_called_with(volume, CONNECTOR, igroup)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.assertEqual("1.2.3.4:3260", props['data']['target_portal'])
|
||||
self.assertEqual(iqn, props['data']['target_iqn'])
|
||||
self.assertEqual(lun_id, props['data']['target_lun'])
|
||||
self.assertEqual(volume['id'], props['data']['volume_id'])
|
||||
|
||||
def test_terminate_connection(self):
|
||||
volume = mock.MagicMock(spec=models.Volume)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._unexport_lun = mock.Mock()
|
||||
self.driver._delete_iscsi_target = mock.Mock()
|
||||
|
||||
result = self.driver.terminate_connection(volume, CONNECTOR)
|
||||
|
||||
self.driver._unexport_lun.assert_called_with(volume)
|
||||
self.driver._delete_iscsi_target.assert_called_with(volume)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_terminate_connection_with_snapshot_object(self):
|
||||
snapshot = mock.MagicMock(spec=models.Snapshot)
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._unexport_snapshot = mock.Mock()
|
||||
self.driver._delete_iscsi_target = mock.Mock()
|
||||
|
||||
result = self.driver.terminate_connection(snapshot, CONNECTOR)
|
||||
|
||||
self.driver._unexport_snapshot.assert_called_with(snapshot)
|
||||
self.driver._delete_iscsi_target.assert_called_with(snapshot)
|
||||
self.driver.common.vip.basic.save_config.assert_called_with()
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
self.driver._update_stats = mock.Mock()
|
||||
self.driver._update_stats()
|
||||
|
||||
result = self.driver.get_volume_stats(True)
|
||||
|
||||
self.driver._update_stats.assert_called_with()
|
||||
self.assertEqual(self.driver.stats, result)
|
||||
|
||||
def test_create_iscsi_target(self):
|
||||
target_name = VOLUME['id']
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
|
||||
m_vshare = self.setup_mock_vshare()
|
||||
|
||||
self.driver.common.vip = m_vshare
|
||||
self.driver.common.mga = m_vshare
|
||||
self.driver.common.mgb = m_vshare
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
calls = [mock.call(self.driver.common.mga.iscsi.bind_ip_to_target, '',
|
||||
VOLUME['id'],
|
||||
self.driver.gateway_iscsi_ip_addresses_mga),
|
||||
mock.call(self.driver.common.mgb.iscsi.bind_ip_to_target, '',
|
||||
VOLUME['id'],
|
||||
self.driver.gateway_iscsi_ip_addresses_mgb)]
|
||||
|
||||
result = self.driver._create_iscsi_target(VOLUME)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(VOLUME['id'])
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vip.iscsi.create_iscsi_target,
|
||||
self.driver._wait_for_targetstate, '',
|
||||
[target_name], [target_name])
|
||||
self.driver.common._send_cmd.assert_has_calls(calls)
|
||||
self.assertTrue(result in self.driver.array_info)
|
||||
|
||||
def test_delete_iscsi_target(self):
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
|
||||
result = self.driver._delete_iscsi_target(VOLUME)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(VOLUME['id'])
|
||||
self.driver.common._send_cmd(
|
||||
self.driver.common.vip.iscsi.delete_iscsi_target,
|
||||
'', VOLUME['id'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_delete_iscsi_target_fails_with_exception(self):
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver.common._send_cmd = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
|
||||
self.assertRaises(failure, self.driver._delete_iscsi_target, VOLUME)
|
||||
|
||||
def test_export_lun(self):
|
||||
igroup = 'test-igroup-1'
|
||||
lun_id = '1'
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._export_lun(VOLUME, CONNECTOR, igroup)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(VOLUME['id'])
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vip.lun.export_lun,
|
||||
self.driver.common._wait_for_export_config, '',
|
||||
[self.driver.common.container, VOLUME['id'], VOLUME['id'],
|
||||
igroup, 'auto'], [VOLUME['id'], 'state=True'])
|
||||
self.driver.common._get_lun_id.assert_called_with(VOLUME['id'])
|
||||
self.assertEqual(lun_id, result)
|
||||
|
||||
def test_export_lun_fails_with_exception(self):
|
||||
igroup = 'test-igroup-1'
|
||||
lun_id = '1'
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=VOLUME['id'])
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
self.assertRaises(failure, self.driver._export_lun,
|
||||
VOLUME, CONNECTOR, igroup)
|
||||
|
||||
def test_unexport_lun(self):
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
return_value=response)
|
||||
|
||||
result = self.driver._unexport_lun(VOLUME)
|
||||
|
||||
self.driver.common._send_cmd_and_verify.assert_called_with(
|
||||
self.driver.common.vip.lun.unexport_lun,
|
||||
self.driver.common._wait_for_export_config, '',
|
||||
[self.driver.common.container, VOLUME['id'], 'all', 'all', 'auto'],
|
||||
[VOLUME['id'], 'state=False'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_unexport_lun_fails_with_exception(self):
|
||||
response = {'code': 14000, 'message': 'Generic error'}
|
||||
failure = exception.ViolinBackendErr
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd_and_verify = mock.Mock(
|
||||
side_effect=failure(response['message']))
|
||||
|
||||
self.assertRaises(failure, self.driver._unexport_lun, VOLUME)
|
||||
|
||||
def test_export_snapshot(self):
|
||||
lun_id = '1'
|
||||
igroup = 'test-igroup-1'
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver._get_short_name = mock.Mock(return_value=SNAPSHOT['id'])
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver.common._wait_for_export_config = mock.Mock()
|
||||
self.driver.common._get_snapshot_id = mock.Mock(return_value=lun_id)
|
||||
|
||||
result = self.driver._export_snapshot(SNAPSHOT, CONNECTOR, igroup)
|
||||
|
||||
self.driver._get_short_name.assert_called_with(SNAPSHOT['id'])
|
||||
self.driver.common._send_cmd.assert_called_with(
|
||||
self.driver.common.vip.snapshot.export_lun_snapshot, '',
|
||||
self.driver.common.container, SNAPSHOT['volume_id'],
|
||||
SNAPSHOT['id'], igroup, SNAPSHOT['id'], 'auto')
|
||||
self.driver.common._wait_for_export_config.assert_called_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=True)
|
||||
self.driver.common._get_snapshot_id.assert_called_once_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'])
|
||||
|
||||
self.assertEqual(lun_id, result)
|
||||
|
||||
def test_unexport_snapshot(self):
|
||||
response = {'code': 0, 'message': ''}
|
||||
|
||||
self.driver.common.vip = self.setup_mock_vshare()
|
||||
self.driver.common._send_cmd = mock.Mock(return_value=response)
|
||||
self.driver.common._wait_for_export_config = mock.Mock()
|
||||
|
||||
result = self.driver._unexport_snapshot(SNAPSHOT)
|
||||
|
||||
self.driver.common._send_cmd.assert_called_with(
|
||||
self.driver.common.vip.snapshot.unexport_lun_snapshot, '',
|
||||
self.driver.common.container, SNAPSHOT['volume_id'],
|
||||
SNAPSHOT['id'], 'all', 'all', 'auto', False)
|
||||
self.driver.common._wait_for_export_config.assert_called_with(
|
||||
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=False)
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_add_igroup_member(self):
|
||||
igroup = 'test-group-1'
|
||||
response = {'code': 0, 'message': 'success'}
|
||||
|
||||
conf = {
|
||||
'igroup.add_initiators.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._add_igroup_member(CONNECTOR, igroup)
|
||||
|
||||
self.driver.common.vip.igroup.add_initiators.assert_called_with(
|
||||
igroup, CONNECTOR['initiator'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_update_stats(self):
|
||||
backend_name = self.conf.volume_backend_name
|
||||
vendor_name = "Violin Memory, Inc."
|
||||
tot_bytes = 100 * units.Gi
|
||||
free_bytes = 50 * units.Gi
|
||||
bn0 = '/cluster/state/master_id'
|
||||
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
|
||||
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
|
||||
response1 = {bn0: '1'}
|
||||
response2 = {bn1: tot_bytes, bn2: free_bytes}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect': [response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._update_stats()
|
||||
|
||||
calls = [mock.call(bn0), mock.call([bn1, bn2])]
|
||||
self.driver.common.vip.basic.get_node_values.assert_has_calls(calls)
|
||||
self.assertEqual(100, self.driver.stats['total_capacity_gb'])
|
||||
self.assertEqual(50, self.driver.stats['free_capacity_gb'])
|
||||
self.assertEqual(backend_name,
|
||||
self.driver.stats['volume_backend_name'])
|
||||
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
|
||||
self.assertTrue(result is None)
|
||||
|
||||
def test_update_stats_fails_data_query(self):
|
||||
backend_name = self.conf.volume_backend_name
|
||||
vendor_name = "Violin Memory, Inc."
|
||||
bn0 = '/cluster/state/master_id'
|
||||
response1 = {bn0: '1'}
|
||||
response2 = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect': [response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertTrue(self.driver._update_stats() is None)
|
||||
self.assertEqual(0, self.driver.stats['total_capacity_gb'])
|
||||
self.assertEqual(0, self.driver.stats['free_capacity_gb'])
|
||||
self.assertEqual(backend_name,
|
||||
self.driver.stats['volume_backend_name'])
|
||||
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
|
||||
|
||||
def testGetShortName_LongName(self):
|
||||
long_name = "abcdefghijklmnopqrstuvwxyz1234567890"
|
||||
short_name = "abcdefghijklmnopqrstuvwxyz123456"
|
||||
self.assertEqual(short_name, self.driver._get_short_name(long_name))
|
||||
|
||||
def testGetShortName_ShortName(self):
|
||||
long_name = "abcdef"
|
||||
short_name = "abcdef"
|
||||
self.assertEqual(short_name, self.driver._get_short_name(long_name))
|
||||
|
||||
def testGetShortName_EmptyName(self):
|
||||
long_name = ""
|
||||
short_name = ""
|
||||
self.assertEqual(short_name, self.driver._get_short_name(long_name))
|
||||
|
||||
def test_get_active_iscsi_ips(self):
|
||||
bn0 = "/net/interface/config/*"
|
||||
bn1 = ["/net/interface/state/eth4/addr/ipv4/1/ip",
|
||||
"/net/interface/state/eth4/flags/link_up"]
|
||||
response1 = {"/net/interface/config/eth4": "eth4"}
|
||||
response2 = {"/net/interface/state/eth4/addr/ipv4/1/ip": "1.1.1.1",
|
||||
"/net/interface/state/eth4/flags/link_up": True}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.side_effect': [response1, response2],
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
results = self.driver._get_active_iscsi_ips(self.driver.common.vip)
|
||||
|
||||
calls = [mock.call(bn0), mock.call(bn1)]
|
||||
self.driver.common.vip.basic.get_node_values.assert_has_calls(calls)
|
||||
self.assertEqual(1, len(results))
|
||||
self.assertEqual("1.1.1.1", results[0])
|
||||
|
||||
def test_get_active_iscsi_ips_with_invalid_interfaces(self):
|
||||
response = {"/net/interface/config/lo": "lo",
|
||||
"/net/interface/config/vlan10": "vlan10",
|
||||
"/net/interface/config/eth1": "eth1",
|
||||
"/net/interface/config/eth2": "eth2",
|
||||
"/net/interface/config/eth3": "eth3"}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_active_iscsi_ips(self.driver.common.vip)
|
||||
|
||||
self.assertEqual(0, len(result))
|
||||
|
||||
def test_get_active_iscsi_ips_with_no_interfaces(self):
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_active_iscsi_ips(self.driver.common.vip)
|
||||
|
||||
self.assertEqual(0, len(result))
|
||||
|
||||
def test_get_hostname(self):
|
||||
bn = '/system/hostname'
|
||||
response = {bn: 'MYHOST'}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._get_hostname()
|
||||
|
||||
self.driver.common.vip.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertEqual("MYHOST", result)
|
||||
|
||||
def test_get_hostname_mga(self):
|
||||
bn = '/system/hostname'
|
||||
response = {bn: 'MYHOST'}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver.common.mga = self.setup_mock_vshare(m_conf=conf)
|
||||
self.assertEqual("MYHOST", self.driver._get_hostname('mga'))
|
||||
|
||||
def test_get_hostname_mgb(self):
|
||||
response = {"/system/hostname": "MYHOST"}
|
||||
bn = '/system/hostname'
|
||||
response = {bn: 'MYHOST'}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver.common.mgb = self.setup_mock_vshare(m_conf=conf)
|
||||
self.assertEqual("MYHOST", self.driver._get_hostname('mgb'))
|
||||
|
||||
def test_get_hostname_query_fails(self):
|
||||
response = {}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
self.assertEqual(self.conf.san_ip, self.driver._get_hostname())
|
||||
|
||||
def test_wait_for_targetstate(self):
|
||||
target = 'mytarget'
|
||||
bn = "/vshare/config/iscsi/target/%s" % target
|
||||
response = {bn: target}
|
||||
|
||||
conf = {
|
||||
'basic.get_node_values.return_value': response,
|
||||
}
|
||||
self.driver.common.mga = self.setup_mock_vshare(m_conf=conf)
|
||||
self.driver.common.mgb = self.setup_mock_vshare(m_conf=conf)
|
||||
|
||||
result = self.driver._wait_for_targetstate(target)
|
||||
|
||||
self.driver.common.mga.basic.get_node_values.assert_called_with(bn)
|
||||
self.driver.common.mgb.basic.get_node_values.assert_called_with(bn)
|
||||
self.assertTrue(result)
|
0
cinder/volume/drivers/violin/__init__.py
Normal file
0
cinder/volume/drivers/violin/__init__.py
Normal file
616
cinder/volume/drivers/violin/v6000_common.py
Normal file
616
cinder/volume/drivers/violin/v6000_common.py
Normal file
@ -0,0 +1,616 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Violin Memory 6000 Series All-Flash Array Common Driver for Openstack Cinder
|
||||
|
||||
Provides common (ie., non-protocol specific) management functions for
|
||||
V6000 series flash arrays.
|
||||
|
||||
Backend array communication is handled via VMEM's python library
|
||||
called 'xg-tools'.
|
||||
|
||||
NOTE: this driver file requires the use of synchronization points for
|
||||
certain types of backend operations, and as a result may not work
|
||||
properly in an active-active HA configuration. See OpenStack Cinder
|
||||
driver documentation for more information.
|
||||
"""
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.utils import importutils
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LW, _LI
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.openstack.common import loopingcall
|
||||
from cinder import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
vxg = importutils.try_import("vxg")
|
||||
if vxg:
|
||||
LOG.info(_LI("Running with xg-tools version: %s."), vxg.__version__)
|
||||
|
||||
# version vmos versions V6.3.0.4 or newer
|
||||
VMOS_SUPPORTED_VERSION_PATTERNS = ['V6.3.0.[4-9]', 'V6.3.[1-9].?[0-9]?']
|
||||
|
||||
violin_opts = [
|
||||
cfg.StrOpt('gateway_mga',
|
||||
default=None,
|
||||
help='IP address or hostname of mg-a'),
|
||||
cfg.StrOpt('gateway_mgb',
|
||||
default=None,
|
||||
help='IP address or hostname of mg-b'),
|
||||
cfg.BoolOpt('use_igroups',
|
||||
default=False,
|
||||
help='Use igroups to manage targets and initiators'),
|
||||
cfg.IntOpt('request_timeout',
|
||||
default=300,
|
||||
help='Global backend request timeout, in seconds'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(violin_opts)
|
||||
|
||||
|
||||
class V6000Common(object):
|
||||
"""Contains common code for the Violin V6000 drivers.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
"""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, config):
|
||||
self.vip = None
|
||||
self.mga = None
|
||||
self.mgb = None
|
||||
self.container = ""
|
||||
self.config = config
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the driver does while starting."""
|
||||
if not self.config.san_ip:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Gateway VIP option \'san_ip\' is not set'))
|
||||
if not self.config.gateway_mga:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Gateway MG-A IP option \'gateway_mga\' is not set'))
|
||||
if not self.config.gateway_mgb:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Gateway MG-B IP option \'gateway_mgb\' is not set'))
|
||||
if self.config.request_timeout <= 0:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Global timeout option \'request_timeout\' must be '
|
||||
'greater than 0'))
|
||||
|
||||
self.vip = vxg.open(self.config.san_ip, self.config.san_login,
|
||||
self.config.san_password, keepalive=True)
|
||||
self.mga = vxg.open(self.config.gateway_mga, self.config.san_login,
|
||||
self.config.san_password, keepalive=True)
|
||||
self.mgb = vxg.open(self.config.gateway_mgb, self.config.san_login,
|
||||
self.config.san_password, keepalive=True)
|
||||
|
||||
ret_dict = self.vip.basic.get_node_values(
|
||||
"/vshare/state/local/container/*")
|
||||
if ret_dict:
|
||||
self.container = ret_dict.items()[0][1]
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
|
||||
if len(self.container) == 0:
|
||||
msg = _('container is missing')
|
||||
raise exception.ViolinInvalidBackendConfig(reason=msg)
|
||||
|
||||
if not self._is_supported_vmos_version(self.vip.version):
|
||||
msg = _('VMOS version is not supported')
|
||||
raise exception.ViolinInvalidBackendConfig(reason=msg)
|
||||
|
||||
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
|
||||
"/threshold_hard_val" % self.container)
|
||||
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
|
||||
"/threshold_hard_val" % self.container)
|
||||
ret_dict = self.vip.basic.get_node_values([bn1, bn2])
|
||||
|
||||
for node in ret_dict:
|
||||
# The infrastructure does not support space reclamation so
|
||||
# ensure it is disabled. When used space exceeds the hard
|
||||
# limit, snapshot space reclamation begins. Default is 0
|
||||
# => no space reclamation.
|
||||
#
|
||||
if node.endswith('/usedspace/threshold_hard_val'):
|
||||
if ret_dict[node] != 0:
|
||||
msg = _('space reclamation threshold is enabled but not '
|
||||
'supported by Cinder infrastructure.')
|
||||
raise exception.ViolinInvalidBackendConfig(reason=msg)
|
||||
|
||||
# The infrastructure does not support overprovisioning so
|
||||
# ensure it is disabled. When provisioned space exceeds
|
||||
# the hard limit, further provisioning is stopped.
|
||||
# Default is 100 => provisioned space equals usable space.
|
||||
#
|
||||
elif node.endswith('/provision/threshold_hard_val'):
|
||||
if ret_dict[node] != 100:
|
||||
msg = _('provisioned space threshold is not equal to '
|
||||
'usable space.')
|
||||
raise exception.ViolinInvalidBackendConfig(reason=msg)
|
||||
|
||||
@utils.synchronized('vmem-lun')
|
||||
def _create_lun(self, volume):
|
||||
"""Creates a new lun.
|
||||
|
||||
The equivalent CLI command is "lun create container
|
||||
<container_name> name <lun_name> size <gb>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
lun_type = '0'
|
||||
|
||||
LOG.debug("Creating LUN %(name)s, %(size)s GB." %
|
||||
{'name': volume['name'], 'size': volume['size']})
|
||||
|
||||
if self.config.san_thin_provision:
|
||||
lun_type = '1'
|
||||
|
||||
# using the defaults for fields: quantity, nozero,
|
||||
# readonly, startnum, blksize, naca, alua, preferredport
|
||||
#
|
||||
try:
|
||||
self._send_cmd(self.vip.lun.create_lun,
|
||||
'LUN create: success!',
|
||||
self.container, volume['id'],
|
||||
volume['size'], 1, '0', lun_type, 'w',
|
||||
1, 512, False, False, None)
|
||||
|
||||
except exception.ViolinBackendErrExists:
|
||||
LOG.debug("Lun %s already exists, continuing.", volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.warn(_LW("Lun create for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-lun')
|
||||
def _delete_lun(self, volume):
|
||||
"""Deletes a lun.
|
||||
|
||||
The equivalent CLI command is "no lun create container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
success_msgs = ['lun deletion started', '']
|
||||
|
||||
LOG.debug("Deleting lun %s.", volume['id'])
|
||||
|
||||
try:
|
||||
self._send_cmd(self.vip.lun.bulk_delete_luns,
|
||||
success_msgs, self.container, volume['id'])
|
||||
|
||||
except exception.ViolinBackendErrNotFound:
|
||||
LOG.debug("Lun %s already deleted, continuing.", volume['id'])
|
||||
|
||||
except exception.ViolinBackendErrExists:
|
||||
LOG.warn(_LW("Lun %s has dependent snapshots, skipping."),
|
||||
volume['id'])
|
||||
raise exception.VolumeIsBusy(volume_name=volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Lun delete for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-lun')
|
||||
def _extend_lun(self, volume, new_size):
|
||||
"""Extend an existing volume's size.
|
||||
|
||||
The equivalent CLI command is "lun resize container
|
||||
<container_name> name <lun_name> size <gb>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
new_size -- new (increased) size in GB to be applied
|
||||
"""
|
||||
LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s GB." %
|
||||
{'id': volume['id'], 'size': volume['size'],
|
||||
'new_size': new_size})
|
||||
|
||||
try:
|
||||
self._send_cmd(self.vip.lun.resize_lun, 'Success',
|
||||
self.container, volume['id'], new_size)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN extend for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-snap')
|
||||
def _create_lun_snapshot(self, snapshot):
|
||||
"""Creates a new snapshot for a lun.
|
||||
|
||||
The equivalent CLI command is "snapshot create container
|
||||
<container> lun <volume_name> name <snapshot_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
"""
|
||||
LOG.debug("Creating snapshot %s.", snapshot['id'])
|
||||
|
||||
try:
|
||||
self._send_cmd(self.vip.snapshot.create_lun_snapshot,
|
||||
'Snapshot create: success!',
|
||||
self.container, snapshot['volume_id'],
|
||||
snapshot['id'])
|
||||
|
||||
except exception.ViolinBackendErrExists:
|
||||
LOG.debug("Snapshot %s already exists, continuing.",
|
||||
snapshot['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN snapshot create for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-snap')
|
||||
def _delete_lun_snapshot(self, snapshot):
|
||||
"""Deletes an existing snapshot for a lun.
|
||||
|
||||
The equivalent CLI command is "no snapshot create container
|
||||
<container> lun <volume_name> name <snapshot_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
"""
|
||||
LOG.debug("Deleting snapshot %s.", snapshot['id'])
|
||||
|
||||
try:
|
||||
self._send_cmd(self.vip.snapshot.delete_lun_snapshot,
|
||||
'Snapshot delete: success!',
|
||||
self.container, snapshot['volume_id'],
|
||||
snapshot['id'])
|
||||
|
||||
except exception.ViolinBackendErrNotFound:
|
||||
LOG.debug("Snapshot %s already deleted, continuing.",
|
||||
snapshot['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN snapshot delete for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
def _get_lun_id(self, volume_name):
|
||||
"""Queries the gateway to find the lun id for the exported volume.
|
||||
|
||||
Arguments:
|
||||
volume_name -- LUN to query
|
||||
|
||||
Returns:
|
||||
LUN ID for the exported lun.
|
||||
"""
|
||||
lun_id = -1
|
||||
|
||||
prefix = "/vshare/config/export/container"
|
||||
bn = "%s/%s/lun/%s/target/**" % (prefix, self.container, volume_name)
|
||||
resp = self.vip.basic.get_node_values(bn)
|
||||
|
||||
for node in resp:
|
||||
if node.endswith('/lun_id'):
|
||||
lun_id = resp[node]
|
||||
break
|
||||
|
||||
if lun_id == -1:
|
||||
raise exception.ViolinBackendErrNotFound()
|
||||
return lun_id
|
||||
|
||||
def _get_snapshot_id(self, volume_name, snapshot_name):
|
||||
"""Queries the gateway to find the lun id for the exported snapshot.
|
||||
|
||||
Arguments:
|
||||
volume_name -- LUN to query
|
||||
snapshot_name -- Exported snapshot associated with LUN
|
||||
|
||||
Returns:
|
||||
LUN ID for the exported lun
|
||||
"""
|
||||
lun_id = -1
|
||||
|
||||
prefix = "/vshare/config/export/snapshot/container"
|
||||
bn = "%s/%s/lun/%s/snap/%s/target/**" \
|
||||
% (prefix, self.container, volume_name, snapshot_name)
|
||||
resp = self.vip.basic.get_node_values(bn)
|
||||
|
||||
for node in resp:
|
||||
if node.endswith('/lun_id'):
|
||||
lun_id = resp[node]
|
||||
break
|
||||
|
||||
if lun_id == -1:
|
||||
raise exception.ViolinBackendErrNotFound()
|
||||
return lun_id
|
||||
|
||||
def _send_cmd(self, request_func, success_msgs, *args):
|
||||
"""Run an XG request function, and retry as needed.
|
||||
|
||||
The request will be retried until it returns a success
|
||||
message, a failure message, or the global request timeout is
|
||||
hit.
|
||||
|
||||
This wrapper is meant to deal with backend requests that can
|
||||
fail for any variety of reasons, for instance, when the system
|
||||
is already busy handling other LUN requests. It is also smart
|
||||
enough to give up if clustering is down (eg no HA available),
|
||||
there is no space left, or other "fatal" errors are returned
|
||||
(see _fatal_error_code() for a list of all known error
|
||||
conditions).
|
||||
|
||||
Arguments:
|
||||
request_func -- XG api method to call
|
||||
success_msgs -- Success messages expected from the backend
|
||||
*args -- argument array to be passed to the request_func
|
||||
|
||||
Returns:
|
||||
The response dict from the last XG call.
|
||||
"""
|
||||
resp = {}
|
||||
start = time.time()
|
||||
done = False
|
||||
|
||||
if isinstance(success_msgs, basestring):
|
||||
success_msgs = [success_msgs]
|
||||
|
||||
while not done:
|
||||
if time.time() - start >= self.config.request_timeout:
|
||||
raise exception.ViolinRequestRetryTimeout(
|
||||
timeout=self.config.request_timeout)
|
||||
|
||||
resp = request_func(*args)
|
||||
|
||||
if not resp['message']:
|
||||
# XG requests will return None for a message if no message
|
||||
# string is passed in the raw response
|
||||
resp['message'] = ''
|
||||
|
||||
for msg in success_msgs:
|
||||
if not resp['code'] and msg in resp['message']:
|
||||
done = True
|
||||
break
|
||||
|
||||
self._fatal_error_code(resp)
|
||||
|
||||
return resp
|
||||
|
||||
def _send_cmd_and_verify(self, request_func, verify_func,
|
||||
request_success_msgs, rargs=None, vargs=None):
|
||||
"""Run an XG request function, retry if needed, and verify success.
|
||||
|
||||
If the verification fails, then retry the request/verify
|
||||
cycle until both functions are successful, the request
|
||||
function returns a failure message, or the global request
|
||||
timeout is hit.
|
||||
|
||||
This wrapper is meant to deal with backend requests that can
|
||||
fail for any variety of reasons, for instance, when the system
|
||||
is already busy handling other LUN requests. It is also smart
|
||||
enough to give up if clustering is down (eg no HA available),
|
||||
there is no space left, or other "fatal" errors are returned
|
||||
(see _fatal_error_code() for a list of all known error
|
||||
conditions).
|
||||
|
||||
Arguments:
|
||||
request_func -- XG api method to call
|
||||
verify_func -- function to call to verify request was
|
||||
completed successfully (eg for export)
|
||||
request_success_msg -- Success message expected from the backend
|
||||
for the request_func
|
||||
rargs -- argument array to be passed to the
|
||||
request_func
|
||||
vargs -- argument array to be passed to the
|
||||
verify_func
|
||||
|
||||
Returns:
|
||||
The response dict from the last XG call.
|
||||
"""
|
||||
resp = {}
|
||||
start = time.time()
|
||||
request_needed = True
|
||||
verify_needed = True
|
||||
|
||||
if isinstance(request_success_msgs, basestring):
|
||||
request_success_msgs = [request_success_msgs]
|
||||
|
||||
rargs = rargs if rargs else []
|
||||
vargs = vargs if vargs else []
|
||||
|
||||
while request_needed or verify_needed:
|
||||
if time.time() - start >= self.config.request_timeout:
|
||||
raise exception.ViolinRequestRetryTimeout(
|
||||
timeout=self.config.request_timeout)
|
||||
|
||||
if request_needed:
|
||||
resp = request_func(*rargs)
|
||||
if not resp['message']:
|
||||
# XG requests will return None for a message if no message
|
||||
# string is passed int the raw response
|
||||
resp['message'] = ''
|
||||
for msg in request_success_msgs:
|
||||
if not resp['code'] and msg in resp['message']:
|
||||
# XG request func was completed
|
||||
request_needed = False
|
||||
break
|
||||
self._fatal_error_code(resp)
|
||||
|
||||
elif verify_needed:
|
||||
success = verify_func(*vargs)
|
||||
if success:
|
||||
# XG verify func was completed
|
||||
verify_needed = False
|
||||
else:
|
||||
# try sending the request again
|
||||
request_needed = True
|
||||
|
||||
return resp
|
||||
|
||||
def _get_igroup(self, volume, connector):
|
||||
"""Gets the igroup that should be used when configuring a volume.
|
||||
|
||||
Arguments:
|
||||
volume -- volume object used to determine the igroup name
|
||||
|
||||
Returns:
|
||||
igroup_name -- name of igroup (for configuring targets &
|
||||
initiators)
|
||||
"""
|
||||
# Use the connector's primary hostname and use that as the
|
||||
# name of the igroup. The name must follow syntax rules
|
||||
# required by the array: "must contain only alphanumeric
|
||||
# characters, dashes, and underscores. The first character
|
||||
# must be alphanumeric".
|
||||
#
|
||||
igroup_name = re.sub(r'[\W]', '_', connector['host'])
|
||||
|
||||
# verify that the igroup has been created on the backend, and
|
||||
# if it doesn't exist, create it!
|
||||
#
|
||||
bn = "/vshare/config/igroup/%s" % igroup_name
|
||||
resp = self.vip.basic.get_node_values(bn)
|
||||
|
||||
if not len(resp):
|
||||
self.vip.igroup.create_igroup(igroup_name)
|
||||
|
||||
return igroup_name
|
||||
|
||||
def _wait_for_export_config(self, volume_name, snapshot_name=None,
|
||||
state=False):
|
||||
"""Polls backend to verify volume's export configuration.
|
||||
|
||||
XG sets/queries following a request to create or delete a lun
|
||||
export may fail on the backend if vshared is still processing
|
||||
the export action (or times out). We can check whether it is
|
||||
done by polling the export binding for a lun to ensure it is
|
||||
created or deleted.
|
||||
|
||||
This function will try to verify the creation or removal of
|
||||
export state on both gateway nodes of the array every 5
|
||||
seconds.
|
||||
|
||||
Arguments:
|
||||
volume_name -- name of volume
|
||||
snapshot_name -- name of volume's snapshot
|
||||
state -- True to poll for existence, False for lack of
|
||||
|
||||
Returns:
|
||||
True if the export state was correctly added or removed
|
||||
(depending on 'state' param)
|
||||
"""
|
||||
if not snapshot_name:
|
||||
bn = "/vshare/config/export/container/%s/lun/%s" \
|
||||
% (self.container, volume_name)
|
||||
else:
|
||||
bn = "/vshare/config/export/snapshot/container/%s/lun/%s/snap/%s" \
|
||||
% (self.container, volume_name, snapshot_name)
|
||||
|
||||
def _loop_func(state):
|
||||
status = [False, False]
|
||||
mg_conns = [self.mga, self.mgb]
|
||||
|
||||
LOG.debug("Entering _wait_for_export_config loop: state=%s.",
|
||||
state)
|
||||
|
||||
for node_id in xrange(2):
|
||||
resp = mg_conns[node_id].basic.get_node_values(bn)
|
||||
if state and len(resp.keys()):
|
||||
status[node_id] = True
|
||||
elif (not state) and (not len(resp.keys())):
|
||||
status[node_id] = True
|
||||
|
||||
if status[0] and status[1]:
|
||||
raise loopingcall.LoopingCallDone(retvalue=True)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_loop_func, state)
|
||||
success = timer.start(interval=5).wait()
|
||||
|
||||
return success
|
||||
|
||||
def _is_supported_vmos_version(self, version_string):
|
||||
"""Check that the array s/w version is supported. """
|
||||
for pattern in VMOS_SUPPORTED_VERSION_PATTERNS:
|
||||
if re.match(pattern, version_string):
|
||||
LOG.info(_LI("Verified VMOS version %s is supported."),
|
||||
version_string)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _fatal_error_code(self, response):
|
||||
"""Raise an exception for certain errors in a XG response.
|
||||
|
||||
Error codes are extracted from vdmd_mgmt.c.
|
||||
|
||||
Arguments:
|
||||
response -- a response dict result from an XG request
|
||||
"""
|
||||
# known non-fatal response codes:
|
||||
# 1024: 'lun deletion in progress, try again later'
|
||||
# 14032: 'lc_err_lock_busy'
|
||||
|
||||
if response['code'] == 14000:
|
||||
# lc_generic_error
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14002:
|
||||
# lc_err_assertion_failed
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14004:
|
||||
# lc_err_not_found
|
||||
raise exception.ViolinBackendErrNotFound()
|
||||
elif response['code'] == 14005:
|
||||
# lc_err_exists
|
||||
raise exception.ViolinBackendErrExists()
|
||||
elif response['code'] == 14008:
|
||||
# lc_err_unexpected_arg
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14014:
|
||||
# lc_err_io_error
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14016:
|
||||
# lc_err_io_closed
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14017:
|
||||
# lc_err_io_timeout
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14021:
|
||||
# lc_err_unexpected_case
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14025:
|
||||
# lc_err_no_fs_space
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14035:
|
||||
# lc_err_range
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14036:
|
||||
# lc_err_invalid_param
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 14121:
|
||||
# lc_err_cancelled_err
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 512:
|
||||
# Not enough free space in container (vdmd bug)
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
||||
elif response['code'] == 1 and 'LUN ID conflict' \
|
||||
in response['message']:
|
||||
# lun id conflict while attempting to export
|
||||
raise exception.ViolinBackendErr(message=response['message'])
|
522
cinder/volume/drivers/violin/v6000_fcp.py
Normal file
522
cinder/volume/drivers/violin/v6000_fcp.py
Normal file
@ -0,0 +1,522 @@
|
||||
# Copyright 2014 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Violin Memory Fibre Channel Driver for Openstack Cinder
|
||||
|
||||
Provides fibre channel specific LUN services for V6000 series flash
|
||||
arrays.
|
||||
|
||||
This driver requires VMOS v6.3.0.4 or newer software on the array.
|
||||
|
||||
You will need to install the python xg-tools client:
|
||||
sudo pip install xg-tools
|
||||
|
||||
Set the following in the cinder.conf file to enable the VMEM V6000
|
||||
Fibre Channel Driver along with the required flags:
|
||||
|
||||
volume_driver=cinder.volume.drivers.violin.v6000_fcp.V6000FCDriver
|
||||
|
||||
NOTE: this driver file requires the use of synchronization points for
|
||||
certain types of backend operations, and as a result may not work
|
||||
properly in an active-active HA configuration. See OpenStack Cinder
|
||||
driver documentation for more information.
|
||||
"""
|
||||
|
||||
from oslo.utils import units
|
||||
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume.drivers.violin import v6000_common
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class V6000FCDriver(driver.FibreChannelDriver):
|
||||
"""Executes commands relating to fibre channel based Violin Memory
|
||||
Arrays.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
"""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(V6000FCDriver, self).__init__(*args, **kwargs)
|
||||
self.gateway_fc_wwns = []
|
||||
self.stats = {}
|
||||
self.configuration.append_config_values(v6000_common.violin_opts)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
self.common = v6000_common.V6000Common(self.configuration)
|
||||
self.lookup_service = fczm_utils.create_lookup_service()
|
||||
|
||||
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
|
||||
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the driver does while starting."""
|
||||
super(V6000FCDriver, self).do_setup(context)
|
||||
self.common.do_setup(context)
|
||||
self.gateway_fc_wwns = self._get_active_fc_targets()
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self.common.check_for_setup_error()
|
||||
|
||||
if len(self.gateway_fc_wwns) == 0:
|
||||
raise exception.ViolinInvalidBackendConfig(
|
||||
reason=_('No FCP targets found'))
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
self.common._create_lun(volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
self.common._delete_lun(volume)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Deletes a volume."""
|
||||
self.common._extend_lun(volume, new_size)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot from an existing volume."""
|
||||
self.common._create_lun_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
self.common._delete_lun_snapshot(snapshot)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
ctxt = context.get_admin_context()
|
||||
snapshot['size'] = snapshot['volume']['size']
|
||||
self.common._create_lun(volume)
|
||||
self.copy_volume_data(ctxt, snapshot, volume)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a full clone of the specified volume."""
|
||||
ctxt = context.get_admin_context()
|
||||
self.common._create_lun(volume)
|
||||
self.copy_volume_data(ctxt, src_vref, volume)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously checks and re-exports volumes at cinder start time."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
@fczm_utils.AddFCZone
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection (target<-->initiator)."""
|
||||
igroup = None
|
||||
|
||||
if self.configuration.use_igroups:
|
||||
#
|
||||
# Most drivers don't use igroups, because there are a
|
||||
# number of issues with multipathing and iscsi/fcp where
|
||||
# lun devices either aren't cleaned up properly or are
|
||||
# stale (from previous scans).
|
||||
#
|
||||
# If the customer really wants igroups for whatever
|
||||
# reason, we create a new igroup for each host/hypervisor.
|
||||
# Every lun that is exported to the particular
|
||||
# hypervisor/host will be contained in this igroup. This
|
||||
# should prevent other hosts from seeing luns they aren't
|
||||
# using when they perform scans.
|
||||
#
|
||||
igroup = self.common._get_igroup(volume, connector)
|
||||
self._add_igroup_member(connector, igroup)
|
||||
|
||||
if isinstance(volume, models.Volume):
|
||||
lun_id = self._export_lun(volume, connector, igroup)
|
||||
else:
|
||||
lun_id = self._export_snapshot(volume, connector, igroup)
|
||||
self.common.vip.basic.save_config()
|
||||
|
||||
target_wwns, init_targ_map = self._build_initiator_target_map(
|
||||
connector)
|
||||
|
||||
properties = {}
|
||||
properties['target_discovered'] = True
|
||||
properties['target_wwn'] = target_wwns
|
||||
properties['target_lun'] = lun_id
|
||||
properties['initiator_target_map'] = init_targ_map
|
||||
|
||||
LOG.debug("Return FC data for zone addition: %(properties)s."
|
||||
% {'properties': properties})
|
||||
|
||||
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
||||
|
||||
@fczm_utils.RemoveFCZone
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
"""Terminates the connection (target<-->initiator)."""
|
||||
|
||||
if isinstance(volume, models.Volume):
|
||||
self._unexport_lun(volume)
|
||||
else:
|
||||
self._unexport_snapshot(volume)
|
||||
|
||||
self.common.vip.basic.save_config()
|
||||
|
||||
properties = {}
|
||||
|
||||
if not self._is_initiator_connected_to_array(connector):
|
||||
target_wwns, init_targ_map = self._build_initiator_target_map(
|
||||
connector)
|
||||
properties['target_wwn'] = target_wwns
|
||||
properties['initiator_target_map'] = init_targ_map
|
||||
|
||||
LOG.debug("Return FC data for zone deletion: %(properties)s."
|
||||
% {'properties': properties})
|
||||
|
||||
return {'driver_volume_type': 'fibre_channel', 'data': properties}
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats."""
|
||||
if refresh or not self.stats:
|
||||
self._update_stats()
|
||||
return self.stats
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _export_lun(self, volume, connector=None, igroup=None):
|
||||
"""Generates the export configuration for the given volume.
|
||||
|
||||
The equivalent CLI command is "lun export container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
connector -- connector object provided by the Manager
|
||||
igroup -- name of igroup to use for exporting
|
||||
|
||||
Returns:
|
||||
lun_id -- the LUN ID assigned by the backend
|
||||
"""
|
||||
lun_id = -1
|
||||
export_to = ''
|
||||
v = self.common.vip
|
||||
|
||||
if igroup:
|
||||
export_to = igroup
|
||||
elif connector:
|
||||
export_to = self._convert_wwns_openstack_to_vmem(
|
||||
connector['wwpns'])
|
||||
else:
|
||||
raise exception.Error(_("No initiators found, cannot proceed"))
|
||||
|
||||
LOG.debug("Exporting lun %s." % volume['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd_and_verify(
|
||||
v.lun.export_lun, self.common._wait_for_export_config, '',
|
||||
[self.common.container, volume['id'], 'all', export_to,
|
||||
'auto'], [volume['id'], 'state=True'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
lun_id = self.common._get_lun_id(volume['id'])
|
||||
|
||||
return lun_id
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _unexport_lun(self, volume):
|
||||
"""Removes the export configuration for the given volume.
|
||||
|
||||
The equivalent CLI command is "no lun export container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
|
||||
LOG.debug("Unexporting lun %s.", volume['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd_and_verify(
|
||||
v.lun.unexport_lun, self.common._wait_for_export_config, '',
|
||||
[self.common.container, volume['id'], 'all', 'all', 'auto'],
|
||||
[volume['id'], 'state=False'])
|
||||
|
||||
except exception.ViolinBackendErrNotFound:
|
||||
LOG.debug("Lun %s already unexported, continuing.", volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN unexport for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _export_snapshot(self, snapshot, connector=None, igroup=None):
|
||||
"""Generates the export configuration for the given snapshot.
|
||||
|
||||
The equivalent CLI command is "snapshot export container
|
||||
PROD08 lun <snapshot_name> name <volume_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
connector -- connector object provided by the Manager
|
||||
igroup -- name of igroup to use for exporting
|
||||
|
||||
Returns:
|
||||
lun_id -- the LUN ID assigned by the backend
|
||||
"""
|
||||
lun_id = -1
|
||||
export_to = ''
|
||||
v = self.common.vip
|
||||
|
||||
if igroup:
|
||||
export_to = igroup
|
||||
elif connector:
|
||||
export_to = self._convert_wwns_openstack_to_vmem(
|
||||
connector['wwpns'])
|
||||
else:
|
||||
raise exception.Error(_("No initiators found, cannot proceed"))
|
||||
|
||||
LOG.debug("Exporting snapshot %s.", snapshot['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
|
||||
self.common.container, snapshot['volume_id'],
|
||||
snapshot['id'], export_to, 'all', 'auto')
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Snapshot export for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
else:
|
||||
self.common._wait_for_export_config(snapshot['volume_id'],
|
||||
snapshot['id'], state=True)
|
||||
lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
|
||||
snapshot['id'])
|
||||
|
||||
return lun_id
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _unexport_snapshot(self, snapshot):
|
||||
"""Removes the export configuration for the given snapshot.
|
||||
|
||||
The equivalent CLI command is "no snapshot export container
|
||||
PROD08 lun <snapshot_name> name <volume_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
|
||||
LOG.debug("Unexporting snapshot %s.", snapshot['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.snapshot.unexport_lun_snapshot, '',
|
||||
self.common.container, snapshot['volume_id'],
|
||||
snapshot['id'], 'all', 'all', 'auto', False)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Snapshot unexport for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
else:
|
||||
self.common._wait_for_export_config(snapshot['volume_id'],
|
||||
snapshot['id'], state=False)
|
||||
|
||||
def _add_igroup_member(self, connector, igroup):
|
||||
"""Add an initiator to the openstack igroup so it can see exports.
|
||||
|
||||
The equivalent CLI command is "igroup addto name <igroup_name>
|
||||
initiators <initiator_name>"
|
||||
|
||||
Arguments:
|
||||
connector -- connector object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])
|
||||
|
||||
LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s." %
|
||||
{'wwpns': wwpns, 'igroup': igroup})
|
||||
|
||||
resp = v.igroup.add_initiators(igroup, wwpns)
|
||||
|
||||
if resp['code'] != 0:
|
||||
raise exception.Error(
|
||||
_('Failed to add igroup member: %(code)d, %(message)s') % resp)
|
||||
|
||||
def _build_initiator_target_map(self, connector):
|
||||
"""Build the target_wwns and the initiator target map."""
|
||||
target_wwns = []
|
||||
init_targ_map = {}
|
||||
|
||||
if self.lookup_service:
|
||||
dev_map = self.lookup_service.get_device_mapping_from_network(
|
||||
connector['wwpns'], self.gateway_fc_wwns)
|
||||
|
||||
for fabric_name in dev_map:
|
||||
fabric = dev_map[fabric_name]
|
||||
target_wwns += fabric['target_port_wwn_list']
|
||||
for initiator in fabric['initiator_port_wwn_list']:
|
||||
if initiator not in init_targ_map:
|
||||
init_targ_map[initiator] = []
|
||||
init_targ_map[initiator] += fabric['target_port_wwn_list']
|
||||
init_targ_map[initiator] = list(
|
||||
set(init_targ_map[initiator]))
|
||||
|
||||
target_wwns = list(set(target_wwns))
|
||||
|
||||
else:
|
||||
initiator_wwns = connector['wwpns']
|
||||
target_wwns = self.gateway_fc_wwns
|
||||
for initiator in initiator_wwns:
|
||||
init_targ_map[initiator] = target_wwns
|
||||
|
||||
return target_wwns, init_targ_map
|
||||
|
||||
def _is_initiator_connected_to_array(self, connector):
|
||||
"""Check array to see if any initiator wwns still have active sessions.
|
||||
|
||||
We only need to check to see if any one initiator wwn is
|
||||
connected, since all initiators are connected to all targets
|
||||
on a lun export for fibrechannel.
|
||||
"""
|
||||
v = self.common.vip
|
||||
initiator_wwns = self._convert_wwns_openstack_to_vmem(
|
||||
connector['wwpns'])
|
||||
|
||||
bn = "/vshare/config/export/container/%s/lun/**" \
|
||||
% self.common.container
|
||||
global_export_config = v.basic.get_node_values(bn)
|
||||
|
||||
for node in global_export_config:
|
||||
if node.endswith(initiator_wwns[0]):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _update_stats(self):
|
||||
"""Gathers array stats from the backend and converts them to GB values.
|
||||
"""
|
||||
data = {}
|
||||
total_gb = 0
|
||||
free_gb = 0
|
||||
v = self.common.vip
|
||||
|
||||
master_cluster_id = v.basic.get_node_values(
|
||||
'/cluster/state/master_id').values()[0]
|
||||
|
||||
bn1 = "/vshare/state/global/%s/container/%s/total_bytes" \
|
||||
% (master_cluster_id, self.common.container)
|
||||
bn2 = "/vshare/state/global/%s/container/%s/free_bytes" \
|
||||
% (master_cluster_id, self.common.container)
|
||||
resp = v.basic.get_node_values([bn1, bn2])
|
||||
|
||||
if bn1 in resp:
|
||||
total_gb = resp[bn1] / units.Gi
|
||||
else:
|
||||
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
|
||||
|
||||
if bn2 in resp:
|
||||
free_gb = resp[bn2] / units.Gi
|
||||
else:
|
||||
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
|
||||
|
||||
backend_name = self.configuration.volume_backend_name
|
||||
data['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
data['vendor_name'] = 'Violin Memory, Inc.'
|
||||
data['driver_version'] = self.VERSION
|
||||
data['storage_protocol'] = 'fibre_channel'
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
data['total_capacity_gb'] = total_gb
|
||||
data['free_capacity_gb'] = free_gb
|
||||
|
||||
for i in data:
|
||||
LOG.debug("stat update: %(name)s=%(data)s." %
|
||||
{'name': i, 'data': data[i]})
|
||||
self.stats = data
|
||||
|
||||
def _get_active_fc_targets(self):
|
||||
"""Get a list of gateway WWNs that can be used as FCP targets.
|
||||
|
||||
Arguments:
|
||||
mg_conn -- active XG connection to one of the gateways
|
||||
|
||||
Returns:
|
||||
active_gw_fcp_wwns -- list of WWNs
|
||||
"""
|
||||
v = self.common.vip
|
||||
active_gw_fcp_wwns = []
|
||||
|
||||
gateway_ids = v.basic.get_node_values(
|
||||
'/vshare/state/global/*').values()
|
||||
|
||||
for i in gateway_ids:
|
||||
bn = "/vshare/state/global/%d/target/fc/**" % i
|
||||
resp = v.basic.get_node_values(bn)
|
||||
|
||||
for node in resp:
|
||||
if node.endswith('/wwn'):
|
||||
active_gw_fcp_wwns.append(resp[node])
|
||||
|
||||
return self._convert_wwns_vmem_to_openstack(active_gw_fcp_wwns)
|
||||
|
||||
def _convert_wwns_openstack_to_vmem(self, wwns):
|
||||
"""Convert a list of Openstack WWNs to VMEM compatible WWN strings.
|
||||
|
||||
Input format is '50014380186b3f65', output format is
|
||||
'wwn.50:01:43:80:18:6b:3f:65'.
|
||||
|
||||
Arguments:
|
||||
wwns -- list of Openstack-based WWN strings.
|
||||
|
||||
Returns:
|
||||
output -- list of VMEM-based WWN strings.
|
||||
"""
|
||||
output = []
|
||||
for w in wwns:
|
||||
output.append('wwn.{0}'.format(
|
||||
':'.join(w[x:x + 2] for x in xrange(0, len(w), 2))))
|
||||
return output
|
||||
|
||||
def _convert_wwns_vmem_to_openstack(self, wwns):
|
||||
"""Convert a list of VMEM WWNs to Openstack compatible WWN strings.
|
||||
|
||||
Input format is 'wwn.50:01:43:80:18:6b:3f:65', output format
|
||||
is '50014380186b3f65'.
|
||||
|
||||
Arguments:
|
||||
wwns -- list of VMEM-based WWN strings.
|
||||
|
||||
Returns:
|
||||
output -- list of Openstack-based WWN strings.
|
||||
"""
|
||||
output = []
|
||||
for w in wwns:
|
||||
output.append(''.join(w[4:].split(':')))
|
||||
return output
|
598
cinder/volume/drivers/violin/v6000_iscsi.py
Normal file
598
cinder/volume/drivers/violin/v6000_iscsi.py
Normal file
@ -0,0 +1,598 @@
|
||||
# Copyright 2013 Violin Memory, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Violin Memory iSCSI Driver for Openstack Cinder
|
||||
|
||||
Provides iSCSI specific LUN services for V6000 series flash arrays.
|
||||
|
||||
This driver requires VMOS v6.3.0.4 or newer software on the array.
|
||||
|
||||
You will need to install the python xg-tools client:
|
||||
sudo pip install xg-tools
|
||||
|
||||
Set the following in the cinder.conf file to enable the VMEM V6000
|
||||
ISCSI Driver along with the required flags:
|
||||
|
||||
volume_driver=cinder.volume.drivers.violin.v6000_iscsi.V6000ISCSIDriver
|
||||
|
||||
NOTE: this driver file requires the use of synchronization points for
|
||||
certain types of backend operations, and as a result may not work
|
||||
properly in an active-active HA configuration. See OpenStack Cinder
|
||||
driver documentation for more information.
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
from oslo.utils import units
|
||||
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.openstack.common import loopingcall
|
||||
from cinder import utils
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume.drivers.violin import v6000_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class V6000ISCSIDriver(driver.ISCSIDriver):
|
||||
"""Executes commands relating to iSCSI-based Violin Memory Arrays.
|
||||
|
||||
Version history:
|
||||
1.0 - Initial driver
|
||||
"""
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(V6000ISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.array_info = []
|
||||
self.gateway_iscsi_ip_addresses_mga = []
|
||||
self.gateway_iscsi_ip_addresses_mgb = []
|
||||
self.stats = {}
|
||||
self.configuration.append_config_values(v6000_common.violin_opts)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
self.common = v6000_common.V6000Common(self.configuration)
|
||||
|
||||
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s.") %
|
||||
{'name': self.__class__.__name__, 'vers': self.VERSION})
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Any initialization the driver does while starting."""
|
||||
super(V6000ISCSIDriver, self).do_setup(context)
|
||||
self.common.do_setup(context)
|
||||
|
||||
self.gateway_iscsi_ip_addresses_mga = self._get_active_iscsi_ips(
|
||||
self.common.mga)
|
||||
for ip in self.gateway_iscsi_ip_addresses_mga:
|
||||
self.array_info.append({"node": self._get_hostname('mga'),
|
||||
"addr": ip,
|
||||
"conn": self.common.mga})
|
||||
self.gateway_iscsi_ip_addresses_mgb = self._get_active_iscsi_ips(
|
||||
self.common.mgb)
|
||||
for ip in self.gateway_iscsi_ip_addresses_mgb:
|
||||
self.array_info.append({"node": self._get_hostname('mgb'),
|
||||
"addr": ip,
|
||||
"conn": self.common.mgb})
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self.common.check_for_setup_error()
|
||||
|
||||
bn = "/vshare/config/iscsi/enable"
|
||||
resp = self.common.vip.basic.get_node_values(bn)
|
||||
if resp[bn] is not True:
|
||||
raise exception.ViolinInvalidBackendConfig(
|
||||
reason=_('iSCSI is not enabled'))
|
||||
if len(self.gateway_iscsi_ip_addresses_mga) == 0:
|
||||
raise exception.ViolinInvalidBackendConfig(
|
||||
reason=_('no available iSCSI IPs on mga'))
|
||||
if len(self.gateway_iscsi_ip_addresses_mgb) == 0:
|
||||
raise exception.ViolinInvalidBackendConfig(
|
||||
reason=_('no available iSCSI IPs on mgb'))
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Creates a volume."""
|
||||
self.common._create_lun(volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume."""
|
||||
self.common._delete_lun(volume)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Deletes a volume."""
|
||||
self.common._extend_lun(volume, new_size)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot from an existing volume."""
|
||||
self.common._create_lun_snapshot(snapshot)
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
self.common._delete_lun_snapshot(snapshot)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot."""
|
||||
ctxt = context.get_admin_context()
|
||||
snapshot['size'] = snapshot['volume']['size']
|
||||
self.common._create_lun(volume)
|
||||
self.copy_volume_data(ctxt, snapshot, volume)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a full clone of the specified volume."""
|
||||
ctxt = context.get_admin_context()
|
||||
self.common._create_lun(volume)
|
||||
self.copy_volume_data(ctxt, src_vref, volume)
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Synchronously checks and re-exports volumes at cinder start time."""
|
||||
pass
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Exports the volume."""
|
||||
pass
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Removes an export for a logical volume."""
|
||||
pass
|
||||
|
||||
def initialize_connection(self, volume, connector):
|
||||
"""Initializes the connection (target<-->initiator)."""
|
||||
igroup = None
|
||||
|
||||
if self.configuration.use_igroups:
|
||||
#
|
||||
# Most drivers don't use igroups, because there are a
|
||||
# number of issues with multipathing and iscsi/fcp where
|
||||
# lun devices either aren't cleaned up properly or are
|
||||
# stale (from previous scans).
|
||||
#
|
||||
|
||||
# If the customer really wants igroups for whatever
|
||||
# reason, we create a new igroup for each host/hypervisor.
|
||||
# Every lun that is exported to the particular
|
||||
# hypervisor/host will be contained in this igroup. This
|
||||
# should prevent other hosts from seeing luns they aren't
|
||||
# using when they perform scans.
|
||||
#
|
||||
igroup = self.common._get_igroup(volume, connector)
|
||||
self._add_igroup_member(connector, igroup)
|
||||
|
||||
vol = self._get_short_name(volume['id'])
|
||||
tgt = self._create_iscsi_target(volume)
|
||||
if isinstance(volume, models.Volume):
|
||||
lun = self._export_lun(volume, connector, igroup)
|
||||
else:
|
||||
lun = self._export_snapshot(volume, connector, igroup)
|
||||
|
||||
iqn = "%s%s:%s" % (self.configuration.iscsi_target_prefix,
|
||||
tgt['node'], vol)
|
||||
self.common.vip.basic.save_config()
|
||||
|
||||
properties = {}
|
||||
properties['target_discovered'] = False
|
||||
properties['target_portal'] = '%s:%d' \
|
||||
% (tgt['addr'], self.configuration.iscsi_port)
|
||||
properties['target_iqn'] = iqn
|
||||
properties['target_lun'] = lun
|
||||
properties['volume_id'] = volume['id']
|
||||
properties['auth_method'] = 'CHAP'
|
||||
properties['auth_username'] = ''
|
||||
properties['auth_password'] = ''
|
||||
|
||||
return {'driver_volume_type': 'iscsi', 'data': properties}
|
||||
|
||||
def terminate_connection(self, volume, connector, force=False, **kwargs):
|
||||
"""Terminates the connection (target<-->initiator)."""
|
||||
if isinstance(volume, models.Volume):
|
||||
self._unexport_lun(volume)
|
||||
else:
|
||||
self._unexport_snapshot(volume)
|
||||
self._delete_iscsi_target(volume)
|
||||
self.common.vip.basic.save_config()
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume stats."""
|
||||
if refresh or not self.stats:
|
||||
self._update_stats()
|
||||
return self.stats
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _create_iscsi_target(self, volume):
|
||||
"""Creates a new target for use in exporting a lun.
|
||||
|
||||
Openstack does not yet support multipathing. We still create
|
||||
HA targets but we pick a single random target for the
|
||||
Openstack infrastructure to use. This at least allows us to
|
||||
evenly distribute LUN connections across the storage cluster.
|
||||
The equivalent CLI commands are "iscsi target create
|
||||
<target_name>" and "iscsi target bind <target_name> to
|
||||
<ip_of_mg_eth_intf>".
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
|
||||
Returns:
|
||||
reference to randomly selected target object
|
||||
"""
|
||||
v = self.common.vip
|
||||
target_name = self._get_short_name(volume['id'])
|
||||
|
||||
LOG.debug("Creating iscsi target %s.", target_name)
|
||||
|
||||
try:
|
||||
self.common._send_cmd_and_verify(v.iscsi.create_iscsi_target,
|
||||
self._wait_for_targetstate,
|
||||
'', [target_name], [target_name])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to create iscsi target!"))
|
||||
raise
|
||||
|
||||
try:
|
||||
self.common._send_cmd(self.common.mga.iscsi.bind_ip_to_target,
|
||||
'', target_name,
|
||||
self.gateway_iscsi_ip_addresses_mga)
|
||||
self.common._send_cmd(self.common.mgb.iscsi.bind_ip_to_target,
|
||||
'', target_name,
|
||||
self.gateway_iscsi_ip_addresses_mgb)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to bind iSCSI targets!"))
|
||||
raise
|
||||
|
||||
return self.array_info[random.randint(0, len(self.array_info) - 1)]
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _delete_iscsi_target(self, volume):
|
||||
"""Deletes the iscsi target for a lun.
|
||||
|
||||
The CLI equivalent is "no iscsi target create <target_name>".
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
success_msgs = ['', 'Invalid target']
|
||||
target_name = self._get_short_name(volume['id'])
|
||||
|
||||
LOG.debug("Deleting iscsi target for %s.", target_name)
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.iscsi.delete_iscsi_target,
|
||||
success_msgs, target_name)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed to delete iSCSI target!"))
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _export_lun(self, volume, connector=None, igroup=None):
|
||||
"""Generates the export configuration for the given volume.
|
||||
|
||||
The equivalent CLI command is "lun export container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
connector -- connector object provided by the Manager
|
||||
igroup -- name of igroup to use for exporting
|
||||
|
||||
Returns:
|
||||
lun_id -- the LUN ID assigned by the backend
|
||||
"""
|
||||
lun_id = -1
|
||||
export_to = ''
|
||||
v = self.common.vip
|
||||
|
||||
if igroup:
|
||||
export_to = igroup
|
||||
elif connector:
|
||||
export_to = connector['initiator']
|
||||
else:
|
||||
raise exception.Error(_("No initiators found, cannot proceed"))
|
||||
|
||||
target_name = self._get_short_name(volume['id'])
|
||||
|
||||
LOG.debug("Exporting lun %s." % volume['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd_and_verify(
|
||||
v.lun.export_lun, self.common._wait_for_export_config, '',
|
||||
[self.common.container, volume['id'], target_name,
|
||||
export_to, 'auto'], [volume['id'], 'state=True'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
lun_id = self.common._get_lun_id(volume['id'])
|
||||
|
||||
return lun_id
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _unexport_lun(self, volume):
|
||||
"""Removes the export configuration for the given volume.
|
||||
|
||||
The equivalent CLI command is "no lun export container
|
||||
<container_name> name <lun_name>"
|
||||
|
||||
Arguments:
|
||||
volume -- volume object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
|
||||
LOG.debug("Unexporting lun %s.", volume['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd_and_verify(
|
||||
v.lun.unexport_lun, self.common._wait_for_export_config, '',
|
||||
[self.common.container, volume['id'], 'all', 'all', 'auto'],
|
||||
[volume['id'], 'state=False'])
|
||||
|
||||
except exception.ViolinBackendErrNotFound:
|
||||
LOG.debug("Lun %s already unexported, continuing.", volume['id'])
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("LUN unexport for %s failed!"), volume['id'])
|
||||
raise
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _export_snapshot(self, snapshot, connector=None, igroup=None):
|
||||
"""Generates the export configuration for the given snapshot.
|
||||
|
||||
The equivalent CLI command is "snapshot export container
|
||||
PROD08 lun <snapshot_name> name <volume_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
connector -- connector object provided by the Manager
|
||||
igroup -- name of igroup to use for exporting
|
||||
|
||||
Returns:
|
||||
lun_id -- the LUN ID assigned by the backend
|
||||
"""
|
||||
lun_id = -1
|
||||
export_to = ''
|
||||
v = self.common.vip
|
||||
|
||||
target_name = self._get_short_name(snapshot['id'])
|
||||
|
||||
LOG.debug("Exporting snapshot %s.", snapshot['id'])
|
||||
|
||||
if igroup:
|
||||
export_to = igroup
|
||||
elif connector:
|
||||
export_to = connector['initiator']
|
||||
else:
|
||||
raise exception.Error(_("No initiators found, cannot proceed"))
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
|
||||
self.common.container, snapshot['volume_id'],
|
||||
snapshot['id'], export_to, target_name,
|
||||
'auto')
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Snapshot export for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
else:
|
||||
self.common._wait_for_export_config(snapshot['volume_id'],
|
||||
snapshot['id'], state=True)
|
||||
lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
|
||||
snapshot['id'])
|
||||
|
||||
return lun_id
|
||||
|
||||
@utils.synchronized('vmem-export')
|
||||
def _unexport_snapshot(self, snapshot):
|
||||
"""Removes the export configuration for the given snapshot.
|
||||
|
||||
The equivalent CLI command is "no snapshot export container
|
||||
PROD08 lun <snapshot_name> name <volume_name>"
|
||||
|
||||
Arguments:
|
||||
snapshot -- snapshot object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
|
||||
LOG.debug("Unexporting snapshot %s.", snapshot['id'])
|
||||
|
||||
try:
|
||||
self.common._send_cmd(v.snapshot.unexport_lun_snapshot, '',
|
||||
self.common.container, snapshot['volume_id'],
|
||||
snapshot['id'], 'all', 'all', 'auto', False)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE("Snapshot unexport for %s failed!"),
|
||||
snapshot['id'])
|
||||
raise
|
||||
|
||||
else:
|
||||
self.common._wait_for_export_config(snapshot['volume_id'],
|
||||
snapshot['id'], state=False)
|
||||
|
||||
def _add_igroup_member(self, connector, igroup):
|
||||
"""Add an initiator to an igroup so it can see exports.
|
||||
|
||||
The equivalent CLI command is "igroup addto name <igroup_name>
|
||||
initiators <initiator_name>"
|
||||
|
||||
Arguments:
|
||||
connector -- connector object provided by the Manager
|
||||
"""
|
||||
v = self.common.vip
|
||||
|
||||
LOG.debug("Adding initiator %s to igroup.", connector['initiator'])
|
||||
|
||||
resp = v.igroup.add_initiators(igroup, connector['initiator'])
|
||||
|
||||
if resp['code'] != 0:
|
||||
raise exception.Error(
|
||||
_('Failed to add igroup member: %(code)d, %(message)s') % resp)
|
||||
|
||||
def _update_stats(self):
|
||||
"""Gathers array stats from the backend and converts them to GB values.
|
||||
"""
|
||||
data = {}
|
||||
total_gb = 0
|
||||
free_gb = 0
|
||||
v = self.common.vip
|
||||
|
||||
master_cluster_id = v.basic.get_node_values(
|
||||
'/cluster/state/master_id').values()[0]
|
||||
|
||||
bn1 = "/vshare/state/global/%s/container/%s/total_bytes" \
|
||||
% (master_cluster_id, self.common.container)
|
||||
bn2 = "/vshare/state/global/%s/container/%s/free_bytes" \
|
||||
% (master_cluster_id, self.common.container)
|
||||
resp = v.basic.get_node_values([bn1, bn2])
|
||||
|
||||
if bn1 in resp:
|
||||
total_gb = resp[bn1] / units.Gi
|
||||
else:
|
||||
LOG.warn(_LW("Failed to receive update for total_gb stat!"))
|
||||
|
||||
if bn2 in resp:
|
||||
free_gb = resp[bn2] / units.Gi
|
||||
else:
|
||||
LOG.warn(_LW("Failed to receive update for free_gb stat!"))
|
||||
|
||||
backend_name = self.configuration.volume_backend_name
|
||||
data['volume_backend_name'] = backend_name or self.__class__.__name__
|
||||
data['vendor_name'] = 'Violin Memory, Inc.'
|
||||
data['driver_version'] = self.VERSION
|
||||
data['storage_protocol'] = 'iSCSI'
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
data['total_capacity_gb'] = total_gb
|
||||
data['free_capacity_gb'] = free_gb
|
||||
|
||||
for i in data:
|
||||
LOG.debug("stat update: %(name)s=%(data)s." %
|
||||
{'name': i, 'data': data[i]})
|
||||
|
||||
self.stats = data
|
||||
|
||||
def _get_short_name(self, volume_name):
|
||||
"""Creates a vSHARE-compatible iSCSI target name.
|
||||
|
||||
The Folsom-style volume names are prefix(7) + uuid(36), which
|
||||
is too long for vSHARE for target names. To keep things
|
||||
simple we can just truncate the name to 32 chars.
|
||||
|
||||
Arguments:
|
||||
volume_name -- name of volume/lun
|
||||
|
||||
Returns:
|
||||
Shortened volume name as a string.
|
||||
"""
|
||||
return volume_name[:32]
|
||||
|
||||
def _get_active_iscsi_ips(self, mg_conn):
|
||||
"""Get a list of gateway IP addresses that can be used for iSCSI.
|
||||
|
||||
Arguments:
|
||||
mg_conn -- active XG connection to one of the gateways
|
||||
|
||||
Returns:
|
||||
active_gw_iscsi_ips -- list of IP addresses
|
||||
"""
|
||||
active_gw_iscsi_ips = []
|
||||
interfaces_to_skip = ['lo', 'vlan10', 'eth1', 'eth2', 'eth3']
|
||||
|
||||
bn = "/net/interface/config/*"
|
||||
intf_list = mg_conn.basic.get_node_values(bn)
|
||||
|
||||
for i in intf_list:
|
||||
if intf_list[i] in interfaces_to_skip:
|
||||
continue
|
||||
|
||||
bn1 = "/net/interface/state/%s/addr/ipv4/1/ip" % intf_list[i]
|
||||
bn2 = "/net/interface/state/%s/flags/link_up" % intf_list[i]
|
||||
resp = mg_conn.basic.get_node_values([bn1, bn2])
|
||||
|
||||
if len(resp.keys()) == 2 and resp[bn2] is True:
|
||||
active_gw_iscsi_ips.append(resp[bn1])
|
||||
|
||||
return active_gw_iscsi_ips
|
||||
|
||||
def _get_hostname(self, mg_to_query=None):
|
||||
"""Get the hostname of one of the mgs (hostname is used in IQN).
|
||||
|
||||
If the remote query fails then fall back to using the hostname
|
||||
provided in the cinder configuration file.
|
||||
|
||||
Arguments:
|
||||
mg_to_query -- name of gateway to query 'mga' or 'mgb'
|
||||
|
||||
Returns: hostname -- hostname as a string
|
||||
"""
|
||||
hostname = self.configuration.san_ip
|
||||
conn = self.common.vip
|
||||
|
||||
if mg_to_query == "mga":
|
||||
hostname = self.configuration.gateway_mga
|
||||
conn = self.common.mga
|
||||
elif mg_to_query == "mgb":
|
||||
hostname = self.configuration.gateway_mgb
|
||||
conn = self.common.mgb
|
||||
|
||||
ret_dict = conn.basic.get_node_values("/system/hostname")
|
||||
if ret_dict:
|
||||
hostname = ret_dict.items()[0][1]
|
||||
else:
|
||||
LOG.debug("Unable to fetch gateway hostname for %s." % mg_to_query)
|
||||
|
||||
return hostname
|
||||
|
||||
def _wait_for_targetstate(self, target_name):
|
||||
"""Polls backend to verify an iscsi target configuration.
|
||||
|
||||
This function will try to verify the creation of an iscsi
|
||||
target on both gateway nodes of the array every 5 seconds.
|
||||
|
||||
Arguments:
|
||||
target_name -- name of iscsi target to be polled
|
||||
|
||||
Returns:
|
||||
True if the export state was correctly added
|
||||
"""
|
||||
bn = "/vshare/config/iscsi/target/%s" % (target_name)
|
||||
|
||||
def _loop_func():
|
||||
status = [False, False]
|
||||
mg_conns = [self.common.mga, self.common.mgb]
|
||||
|
||||
LOG.debug("Entering _wait_for_targetstate loop: target=%s.",
|
||||
target_name)
|
||||
|
||||
for node_id in xrange(2):
|
||||
resp = mg_conns[node_id].basic.get_node_values(bn)
|
||||
if len(resp.keys()):
|
||||
status[node_id] = True
|
||||
|
||||
if status[0] and status[1]:
|
||||
raise loopingcall.LoopingCallDone(retvalue=True)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
|
||||
success = timer.start(interval=5).wait()
|
||||
|
||||
return success
|
Loading…
x
Reference in New Issue
Block a user