Merge "Re-add Nexenta drivers"

This commit is contained in:
Jenkins 2016-01-12 18:53:44 +00:00 committed by Gerrit Code Review
commit d957f90b15
11 changed files with 2546 additions and 49 deletions

View File

@ -48,7 +48,6 @@ CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=500, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
@ -1040,3 +1039,8 @@ class CohoException(VolumeDriverException):
# Tegile Storage drivers
class TegileAPIException(VolumeBackendAPIException):
message = _("Unexpected response from Tegile IntelliFlash API")
# NexentaStor driver exception
class NexentaException(VolumeDriverException):
message = _("%(message)s")

View File

@ -128,8 +128,8 @@ from cinder.volume.drivers.lenovo import lenovo_common as \
from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm
from cinder.volume.drivers.netapp import options as \
cinder_volume_drivers_netapp_options
from cinder.volume.drivers.nexenta.nexentaedge import iscsi as \
cinder_volume_drivers_nexenta_nexentaedge_iscsi
from cinder.volume.drivers.nexenta import options as \
cinder_volume_drivers_nexenta_options
from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs
from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble
from cinder.volume.drivers.prophetstor import options as \
@ -218,6 +218,12 @@ def list_opts():
cinder_test.test_opts,
cinder_volume_drivers_ibm_gpfs.gpfs_opts,
cinder_volume_drivers_violin_v7000common.violin_opts,
cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS,
cinder_exception.exc_log_opts,
cinder_common_config.global_opts,
cinder_scheduler_weights_capacity.capacity_weight_opts,
@ -310,8 +316,6 @@ def list_opts():
cinder_volume_drivers_hpe_hpexpopts.HORCM_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts,
cinder_volume_manager.volume_manager_opts,
cinder_volume_drivers_nexenta_nexentaedge_iscsi.
nexenta_edge_opts,
cinder_volume_drivers_ibm_flashsystemiscsi.
flashsystem_iscsi_opts,
cinder_volume_drivers_tegile.tegile_opts,

View File

@ -0,0 +1,609 @@
#
# Copyright 2015 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for OpenStack Cinder volume driver
"""
import mock
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.nexenta import iscsi
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import nfs
from cinder.volume.drivers.nexenta import utils
class TestNexentaISCSIDriver(test.TestCase):
TEST_VOLUME_NAME = 'volume1'
TEST_VOLUME_NAME2 = 'volume2'
TEST_SNAPSHOT_NAME = 'snapshot1'
TEST_VOLUME_REF = {
'name': TEST_VOLUME_NAME,
'size': 1,
'id': '1',
'status': 'available'
}
TEST_VOLUME_REF2 = {
'name': TEST_VOLUME_NAME2,
'size': 1,
'id': '2',
'status': 'in-use'
}
TEST_SNAPSHOT_REF = {
'name': TEST_SNAPSHOT_NAME,
'volume_name': TEST_VOLUME_NAME,
}
def __init__(self, method):
super(TestNexentaISCSIDriver, self).__init__(method)
def setUp(self):
super(TestNexentaISCSIDriver, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.ctxt = context.get_admin_context()
self.cfg.nexenta_dataset_description = ''
self.cfg.nexenta_host = '1.1.1.1'
self.cfg.nexenta_user = 'admin'
self.cfg.nexenta_password = 'nexenta'
self.cfg.nexenta_volume = 'cinder'
self.cfg.nexenta_rest_port = 2000
self.cfg.nexenta_rest_protocol = 'http'
self.cfg.nexenta_iscsi_target_portal_port = 3260
self.cfg.nexenta_target_prefix = 'iqn:'
self.cfg.nexenta_target_group_prefix = 'cinder/'
self.cfg.nexenta_blocksize = '8K'
self.cfg.nexenta_sparse = True
self.cfg.nexenta_dataset_compression = 'on'
self.cfg.nexenta_dataset_dedup = 'off'
self.cfg.nexenta_rrmgr_compression = 1
self.cfg.nexenta_rrmgr_tcp_buf_size = 1024
self.cfg.nexenta_rrmgr_connections = 2
self.cfg.reserved_percentage = 20
self.nms_mock = mock.Mock()
for mod in ['volume', 'zvol', 'iscsitarget', 'appliance',
'stmf', 'scsidisk', 'snapshot']:
setattr(self.nms_mock, mod, mock.Mock())
self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
lambda *_, **__: self.nms_mock)
self.drv = iscsi.NexentaISCSIDriver(
configuration=self.cfg)
self.drv.db = db
self.drv.do_setup(self.ctxt)
def test_check_do_setup(self):
self.assertEqual('http', self.drv.nms_protocol)
def test_check_for_setup_error(self):
self.nms_mock.volume.object_exists.return_value = False
self.assertRaises(LookupError, self.drv.check_for_setup_error)
def test_local_path(self):
self.assertRaises(NotImplementedError, self.drv.local_path, '')
def test_create_volume(self):
self.drv.create_volume(self.TEST_VOLUME_REF)
self.nms_mock.zvol.create.assert_called_with(
'cinder/%s' % self.TEST_VOLUME_REF['name'], '1G',
self.cfg.nexenta_blocksize, self.cfg.nexenta_sparse)
def test_delete_volume(self):
self.nms_mock.zvol.get_child_props.return_value = (
{'origin': 'cinder/volume0@snapshot'})
self.drv.delete_volume(self.TEST_VOLUME_REF)
self.nms_mock.zvol.get_child_props.assert_called_with(
'cinder/volume1', 'origin')
self.nms_mock.zvol.destroy.assert_called_with(
'cinder/volume1', '')
self.nms_mock.zvol.get_child_props.assert_called_with(
'cinder/volume1', 'origin')
self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '')
self.drv.delete_volume(self.TEST_VOLUME_REF)
self.nms_mock.zvol.get_child_props.assert_called_with(
'cinder/volume1', 'origin')
self.nms_mock.zvol.get_child_props.return_value = (
{'origin': 'cinder/volume0@cinder-clone-snapshot-1'})
self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '')
self.drv.delete_volume(self.TEST_VOLUME_REF)
self.nms_mock.snapshot.destroy.assert_called_with(
'cinder/volume0@cinder-clone-snapshot-1', '')
self.nms_mock.volume.object_exists.assert_called_with('cinder/volume0')
def test_create_cloned_volume(self):
vol = self.TEST_VOLUME_REF2
src_vref = self.TEST_VOLUME_REF
snapshot = {
'volume_name': src_vref['name'],
'name': 'cinder-clone-snapshot-%s' % vol['id'],
}
self.drv.create_cloned_volume(vol, src_vref)
self.nms_mock.zvol.create_snapshot.assert_called_with(
'cinder/%s' % src_vref['name'], snapshot['name'], '')
self.nms_mock.zvol.clone.assert_called_with(
'cinder/%s@%s' % (src_vref['name'], snapshot['name']),
'cinder/%s' % vol['name'])
def test_migrate_volume(self):
volume = self.TEST_VOLUME_REF
host = {
'capabilities': {
'vendor_name': 'Nexenta',
'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder',
'free_capacity_gb': 1,
'iscsi_target_portal_port': 3260,
'nms_url': 'http://admin:password@1.1.1.1:2000'
}
}
snapshot = {
'volume_name': volume['name'],
'name': 'cinder-migrate-snapshot-%s' % volume['id'],
}
volume_name = 'cinder/%s' % volume['name']
self.nms_mock.appliance.ssh_list_bindings.return_value = (
{'0': [True, True, True, '1.1.1.1']})
self.nms_mock.zvol.get_child_props.return_value = None
self.drv.migrate_volume(None, volume, host)
self.nms_mock.zvol.create_snapshot.assert_called_with(
'cinder/%s' % volume['name'], snapshot['name'], '')
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': 'cinder',
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = '1.1.1.1:cinder'
cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst])
self.nms_mock.appliance.execute.assert_called_with(cmd)
snapshot_name = 'cinder/%(volume)s@%(snapshot)s' % {
'volume': volume['name'],
'snapshot': snapshot['name']
}
self.nms_mock.snapshot.destroy.assert_called_with(snapshot_name, '')
self.nms_mock.zvol.destroy.assert_called_with(volume_name, '')
self.nms_mock.snapshot.destroy.assert_called_with(
'cinder/%(volume)s@%(snapshot)s' % {
'volume': volume['name'],
'snapshot': snapshot['name']
}, '')
self.nms_mock.volume.object_exists.assert_called_with(volume_name)
self.mox.ReplayAll()
def test_create_snapshot(self):
self.drv.create_snapshot(self.TEST_SNAPSHOT_REF)
self.nms_mock.zvol.create_snapshot.assert_called_with(
'cinder/volume1', 'snapshot1', '')
def test_create_volume_from_snapshot(self):
self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2,
self.TEST_SNAPSHOT_REF)
self.nms_mock.zvol.clone.assert_called_with(
'cinder/volume1@snapshot1', 'cinder/volume2')
def test_delete_snapshot(self):
self._create_volume_db_entry()
self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
self.nms_mock.snapshot.destroy.assert_called_with(
'cinder/volume1@snapshot1', '')
self.nms_mock.volume.object_exists.assert_called_with(
'cinder/volume1')
# Check that exception not raised if snapshot does not exist
self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
self.nms_mock.snapshot.destroy.side_effect = (
exception.NexentaException('does not exist'))
self.nms_mock.snapshot.destroy.assert_called_with(
'cinder/volume1@snapshot1', '')
self.nms_mock.volume.object_exists.assert_called_with(
'cinder/volume1')
def _mock_all_export_methods(self, fail=False):
self.assertTrue(self.nms_mock.stmf.list_targets.called)
self.nms_mock.iscsitarget.create_target.assert_called_with(
{'target_name': 'iqn:1.1.1.1-0'})
self.nms_mock.stmf.list_targetgroups()
zvol_name = 'cinder/volume1'
self.nms_mock.stmf.create_targetgroup.assert_called_with(
'cinder/1.1.1.1-0')
self.nms_mock.stmf.list_targetgroup_members.assert_called_with(
'cinder/1.1.1.1-0')
self.nms_mock.scsidisk.lu_exists.assert_called_with(zvol_name)
self.nms_mock.scsidisk.create_lu.assert_called_with(zvol_name, {})
def _stub_all_export_methods(self):
self.nms_mock.scsidisk.lu_exists.return_value = False
self.nms_mock.scsidisk.lu_shared.side_effect = (
exception.NexentaException(['does not exist for zvol']))
self.nms_mock.scsidisk.create_lu.return_value = {'lun': 0}
self.nms_mock.stmf.list_targets.return_value = []
self.nms_mock.stmf.list_targetgroups.return_value = []
self.nms_mock.stmf.list_targetgroup_members.return_value = []
self.nms_mock._get_target_name.return_value = ['iqn:1.1.1.1-0']
self.nms_mock.iscsitarget.create_targetgroup.return_value = ({
'target_name': 'cinder/1.1.1.1-0'})
self.nms_mock.scsidisk.add_lun_mapping_entry.return_value = {'lun': 0}
def test_create_export(self):
self._stub_all_export_methods()
retval = self.drv.create_export({}, self.TEST_VOLUME_REF, None)
self._mock_all_export_methods()
location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % {
'host': self.cfg.nexenta_host,
'port': self.cfg.nexenta_iscsi_target_portal_port,
'name': 'iqn:1.1.1.1-0',
'lun': '0'
}
self.assertEqual({'provider_location': location}, retval)
def test_ensure_export(self):
self._stub_all_export_methods()
self.drv.ensure_export({}, self.TEST_VOLUME_REF)
self._mock_all_export_methods()
def test_remove_export(self):
self.nms_mock.stmf.list_targets.return_value = ['iqn:1.1.1.1-0']
self.nms_mock.stmf.list_targetgroups.return_value = (
['cinder/1.1.1.1-0'])
self.nms_mock.stmf.list_targetgroup_members.return_value = (
['iqn:1.1.1.1-0'])
self.drv.remove_export({}, self.TEST_VOLUME_REF)
self.assertTrue(self.nms_mock.stmf.list_targets.called)
self.assertTrue(self.nms_mock.stmf.list_targetgroups.called)
self.nms_mock.scsidisk.delete_lu.assert_called_with('cinder/volume1')
def test_get_volume_stats(self):
stats = {'size': '5368709120G',
'used': '5368709120G',
'available': '5368709120G',
'health': 'ONLINE'}
self.nms_mock.volume.get_child_props.return_value = stats
stats = self.drv.get_volume_stats(True)
self.assertEqual('iSCSI', stats['storage_protocol'])
self.assertEqual(5368709120.0, stats['total_capacity_gb'])
self.assertEqual(5368709120.0, stats['free_capacity_gb'])
self.assertEqual(20, stats['reserved_percentage'])
self.assertFalse(stats['QoS_support'])
def _create_volume_db_entry(self):
vol = {
'id': '1',
'size': 1,
'status': 'available',
'provider_location': self.TEST_VOLUME_NAME
}
return db.volume_create(self.ctxt, vol)['id']
class TestNexentaNfsDriver(test.TestCase):
TEST_EXPORT1 = 'host1:/volumes/stack/share'
TEST_NMS1 = 'http://admin:nexenta@host1:2000'
TEST_EXPORT2 = 'host2:/volumes/stack/share'
TEST_NMS2 = 'http://admin:nexenta@host2:2000'
TEST_EXPORT2_OPTIONS = '-o intr'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf'
TEST_SHARE_SVC = 'svc:/network/nfs/server:default'
TEST_SHARE_OPTS = {
'read_only': '',
'read_write': '*',
'recursive': 'true',
'anonymous_rw': 'true',
'extra_options': 'anon=0',
'root': 'nobody'
}
def _create_volume_db_entry(self):
vol = {
'id': '1',
'size': 1,
'status': 'available',
'provider_location': self.TEST_EXPORT1
}
return db.volume_create(self.ctxt, vol)['id']
def setUp(self):
super(TestNexentaNfsDriver, self).setUp()
self.ctxt = context.get_admin_context()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.nexenta_dataset_description = ''
self.cfg.nexenta_shares_config = None
self.cfg.nexenta_mount_point_base = '$state_path/mnt'
self.cfg.nexenta_sparsed_volumes = True
self.cfg.nexenta_dataset_compression = 'on'
self.cfg.nexenta_dataset_dedup = 'off'
self.cfg.nexenta_rrmgr_compression = 1
self.cfg.nexenta_rrmgr_tcp_buf_size = 1024
self.cfg.nexenta_rrmgr_connections = 2
self.cfg.nfs_mount_point_base = '/mnt/test'
self.cfg.nfs_mount_options = None
self.cfg.nas_mount_options = None
self.cfg.nexenta_nms_cache_volroot = False
self.cfg.nfs_mount_attempts = 3
self.cfg.reserved_percentage = 20
self.cfg.nfs_used_ratio = .95
self.cfg.nfs_oversub_ratio = 1.0
self.cfg.max_over_subscription_ratio = 20.0
self.nms_mock = mock.Mock()
for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc',
'snapshot', 'netsvc'):
setattr(self.nms_mock, mod, mock.Mock())
self.nms_mock.__hash__ = lambda *_, **__: 1
self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
lambda *_, **__: self.nms_mock)
self.drv = nfs.NexentaNfsDriver(configuration=self.cfg)
self.drv.shares = {}
self.drv.share2nms = {}
def test_check_for_setup_error(self):
self.drv.share2nms = {
'host1:/volumes/stack/share': self.nms_mock
}
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.volume.object_exists.return_value = True
self.nms_mock.folder.object_exists.return_value = True
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
self.drv.check_for_setup_error()
self.nms_mock.netstorsvc.share_folder.assert_called_with(
'svc:/network/nfs/server:default', 'stack/share', share_opts)
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.volume.object_exists.return_value = False
self.assertRaises(LookupError, self.drv.check_for_setup_error)
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.volume.object_exists.return_value = True
self.nms_mock.folder.object_exists.return_value = False
self.assertRaises(LookupError, self.drv.check_for_setup_error)
def test_initialize_connection(self):
self.drv.shares = {
self.TEST_EXPORT1: None
}
volume = {
'provider_location': self.TEST_EXPORT1,
'name': 'volume'
}
result = self.drv.initialize_connection(volume, None)
self.assertEqual('%s/volume' % self.TEST_EXPORT1,
result['data']['export'])
def test_do_create_volume(self):
volume = {
'provider_location': self.TEST_EXPORT1,
'size': 1,
'name': 'volume-1'
}
self.drv.shares = {self.TEST_EXPORT1: None}
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
compression = self.cfg.nexenta_dataset_compression
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default',
'configure').AndReturn({
'nfs_server_versmax': {
'current': u'3'}})
self.nms_mock.netsvc.get_confopts.return_value = {
'nfs_server_versmax': {'current': 4}}
self.nms_mock._ensure_share_mounted.return_value = True
self.drv._do_create_volume(volume)
self.nms_mock.folder.create_with_props.assert_called_with(
'stack', 'share/volume-1', {'compression': compression})
self.nms_mock.netstorsvc.share_folder.assert_called_with(
self.TEST_SHARE_SVC, 'stack/share/volume-1', self.TEST_SHARE_OPTS)
mock_chmod = self.nms_mock.appliance.execute
mock_chmod.assert_called_with(
'chmod ugo+rw /volumes/stack/share/volume-1/volume')
mock_truncate = self.nms_mock.appliance.execute
mock_truncate.side_effect = exception.NexentaException()
self.nms_mock.server.get_prop.return_value = '/volumes'
self.assertRaises(exception.NexentaException,
self.drv._do_create_volume, volume)
def test_create_sparsed_file(self):
self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1)
self.nms_mock.appliance.execute.assert_called_with(
'truncate --size 1G /tmp/path')
def test_create_regular_file(self):
self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1)
self.nms_mock.appliance.execute.assert_called_with(
'dd if=/dev/zero of=/tmp/path bs=1M count=1024')
def test_set_rw_permissions_for_all(self):
path = '/tmp/path'
self.drv._set_rw_permissions_for_all(self.nms_mock, path)
self.nms_mock.appliance.execute.assert_called_with(
'chmod ugo+rw %s' % path)
def test_local_path(self):
volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'}
path = self.drv.local_path(volume)
self.assertEqual(
'$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume',
path
)
def test_remote_path(self):
volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'}
path = self.drv.remote_path(volume)
self.assertEqual('/volumes/stack/share/volume-1/volume', path)
def test_share_folder(self):
self.drv._share_folder(self.nms_mock, 'stack', 'share/folder')
path = 'stack/share/folder'
self.nms_mock.netstorsvc.share_folder.assert_called_with(
self.TEST_SHARE_SVC, path, self.TEST_SHARE_OPTS)
def test_load_shares_config(self):
self.drv.configuration.nfs_shares_config = (
self.TEST_SHARES_CONFIG_FILE)
config_data = [
'%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1),
'# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2),
'',
'%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2,
self.TEST_EXPORT2_OPTIONS)
]
with mock.patch.object(self.drv, '_read_config_file') as \
mock_read_config_file:
mock_read_config_file.return_value = config_data
self.drv._load_shares_config(
self.drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_EXPORT1, self.drv.shares)
self.assertIn(self.TEST_EXPORT2, self.drv.shares)
self.assertEqual(2, len(self.drv.shares))
self.assertIn(self.TEST_EXPORT1, self.drv.share2nms)
self.assertIn(self.TEST_EXPORT2, self.drv.share2nms)
self.assertEqual(2, len(self.drv.share2nms.keys()))
self.assertEqual(self.TEST_EXPORT2_OPTIONS,
self.drv.shares[self.TEST_EXPORT2])
def test_get_capacity_info(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.folder.get_child_props.return_value = {
'available': '1G',
'used': '2G'
}
total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1)
self.assertEqual(3 * units.Gi, total)
self.assertEqual(units.Gi, free)
self.assertEqual(2 * units.Gi, allocated)
def test_get_share_datasets(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self.nms_mock.server.get_prop.return_value = '/volumes'
volume_name, folder_name = (
self.drv._get_share_datasets(self.TEST_EXPORT1))
self.assertEqual('stack', volume_name)
self.assertEqual('share', folder_name)
def test_delete_snapshot(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self._create_volume_db_entry()
self.nms_mock.server.get_prop.return_value = '/volumes'
self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'})
self.nms_mock.snapshot.destroy.assert_called_with(
'stack/share/volume-1@snapshot1', '')
def test_delete_volume(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
self._create_volume_db_entry()
self.drv._ensure_share_mounted = lambda *_, **__: 0
self.drv._execute = lambda *_, **__: 0
self.nms_mock.server.get_prop.return_value = '/volumes'
self.nms_mock.folder.get_child_props.return_value = None
self.drv.delete_volume({
'id': '1',
'name': 'volume-1',
'provider_location': self.TEST_EXPORT1
})
self.nms_mock.folder.destroy.assert_called_with(
'stack/share/volume-1', '-r')
# Check that exception not raised if folder does not exist on
# NexentaStor appliance.
mock = self.nms_mock.folder.destroy
mock.side_effect = exception.NexentaException('Folder does not exist')
self.drv.delete_volume({
'id': '1',
'name': 'volume-1',
'provider_location': self.TEST_EXPORT1
})
class TestNexentaUtils(test.TestCase):
def test_str2size(self):
values_to_test = (
# Test empty value
(None, 0),
('', 0),
('0', 0),
('12', 12),
# Test int values
(10, 10),
# Test bytes string
('1b', 1),
('1B', 1),
('1023b', 1023),
('0B', 0),
# Test other units
('1M', units.Mi),
('1.0M', units.Mi),
)
for value, result in values_to_test:
self.assertEqual(result, utils.str2size(value))
# Invalid format value
self.assertRaises(ValueError, utils.str2size, 'A')
def test_str2gib_size(self):
self.assertEqual(1, utils.str2gib_size('1024M'))
self.assertEqual(300 * units.Mi // units.Gi,
utils.str2gib_size('300M'))
self.assertEqual(1.2 * units.Ti // units.Gi,
utils.str2gib_size('1.2T'))
self.assertRaises(ValueError, utils.str2gib_size, 'A')
def test_parse_nms_url(self):
urls = (
('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta',
'192.168.1.1', '2000', '/rest/nms/')),
('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta',
'192.168.1.1', '8080', '/rest/nms/')),
('https://root:password@192.168.1.1:8080',
(False, 'https', 'root', 'password', '192.168.1.1', '8080',
'/rest/nms/')),
)
for url, result in urls:
self.assertEqual(result, utils.parse_nms_url(url))

View File

@ -0,0 +1,683 @@
# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.iscsi
"""
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
VERSION = '1.3.0.1'
LOG = logging.getLogger(__name__)
class NexentaISCSIDriver(driver.ISCSIDriver):
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.0.1 - Fixed bug #1236626: catch "does not exist" exception of
lu_exists.
1.1.0 - Changed class name to NexentaISCSIDriver.
1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy.
1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs
clone.
1.1.3 - Extended volume stats provided by _update_volume_stats method.
1.2.0 - Added volume migration with storage assist method.
1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location
of migrated volume; after migrating volume migrate_volume
destroy snapshot on migration destination.
1.3.0 - Added retype method.
1.3.0.1 - Target creation refactor.
"""
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(NexentaISCSIDriver, self).__init__(*args, **kwargs)
self.nms = None
self.targets = {}
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nms_password = self.configuration.nexenta_password
self.volume = self.configuration.nexenta_volume
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.iscsi_target_portal_port = (
self.configuration.nexenta_iscsi_target_portal_port)
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(self.volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA") %
self.volume)
def _get_zvol_name(self, volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (self.volume, volume_name)
def _create_target(self, target_idx):
target_name = '%s%s-%i' % (
self.configuration.nexenta_target_prefix,
self.nms_host,
target_idx
)
target_group_name = self._get_target_group_name(target_name)
if not self._target_exists(target_name):
try:
self.nms.iscsitarget.create_target({
'target_name': target_name})
except exception.NexentaException as exc:
if 'already' in exc.args[0]:
LOG.info(_LI('Ignored target creation error "%s" while '
'ensuring export.'),
exc)
else:
raise
if not self._target_group_exists(target_group_name):
try:
self.nms.stmf.create_targetgroup(target_group_name)
except exception.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info(_LI('Ignored target group creation error "%s" '
'while ensuring export.'),
exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
target_name):
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except exception.NexentaException as exc:
if ('already' in exc.args[0]):
LOG.info(_LI('Ignored target group member addition error '
'"%s" while ensuring export.'),
exc)
else:
raise
self.targets[target_name] = []
return target_name
def _get_target_name(self, volume):
"""Return iSCSI target name with least LUs."""
provider_location = volume.get('provider_location')
target_names = self.targets.keys()
if provider_location:
target_name = provider_location.split(',1 ')[1].split(' ')[0]
if not(self.targets.get(target_name)):
self.targets[target_name] = []
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
elif not(target_names):
# create first target and target group
target_name = self._create_target(0)
self.targets[target_name].append(volume['name'])
else:
target_name = target_names[0]
for target in target_names:
if len(self.targets[target]) < len(self.targets[target_name]):
target_name = target
if len(self.targets[target_name]) >= 20:
# create new target and target group
target_name = self._create_target(len(target_names))
if not(volume['name'] in self.targets[target_name]):
self.targets[target_name].append(volume['name'])
return target_name
def _get_target_group_name(self, target_name):
"""Return Nexenta iSCSI target group name for volume."""
return target_name.replace(
self.configuration.nexenta_target_prefix,
self.configuration.nexenta_target_group_prefix
)
@staticmethod
def _get_clone_snapshot_name(volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
@staticmethod
def _is_clone_snapshot_name(snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
:return: model update dict for volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
self.configuration.nexenta_blocksize,
self.configuration.nexenta_sparse)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
volume_name = self._get_zvol_name(volume['name'])
try:
props = self.nms.zvol.get_child_props(volume_name, 'origin') or {}
self.nms.zvol.destroy(volume_name, '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Volume %s does not exist, it '
'seems it was already deleted.'), volume_name)
return
if 'zvol has children' in exc.args[0]:
LOG.info(_LI('Volume %s will be deleted later.'), volume_name)
return
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
volume, snapshot = origin.split('@')
volume = volume.lstrip('%s/' % self.configuration.nexenta_volume)
try:
self.delete_snapshot({'volume_name': volume, 'name': snapshot})
except exception.NexentaException as exc:
LOG.warning(_LW('Cannot delete snapshot %(origin)s: %(exc)s'),
{'origin': origin, 'exc': exc})
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume)}
LOG.debug('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s', snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
# we check its origin property and delete source snapshot if needed.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
except exception.NexentaException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
'Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (exception.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
@staticmethod
def get_nms_for_url(url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
return false_ret
if 'capabilities' not in host:
return false_ret
capabilities = host['capabilities']
if ('location_info' not in capabilities or
'iscsi_target_portal_port' not in capabilities or
'nms_url' not in capabilities):
return false_ret
nms_url = capabilities['nms_url']
dst_parts = capabilities['location_info'].split(':')
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
dst_host, dst_volume = dst_parts[1:]
ssh_bound = False
ssh_bindings = self.nms.appliance.ssh_list_bindings()
for bind in ssh_bindings:
if dst_host.startswith(ssh_bindings[bind][3]):
ssh_bound = True
break
if not ssh_bound:
LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
"SSH-bound."), dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
'volume_name': volume['name'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': self.volume,
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume])
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'],
snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
return True, None
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
capabilities = host['capabilities']
src_backend = self.__class__.__name__
dst_backend = capabilities['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning(_LW('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.'),
{
'src_backend': src_backend,
'dst_backend': dst_backend,
})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
nms = self.nms
else:
nms_url = capabilities['nms_url']
nms = self.get_nms_for_url(nms_url)
zvol = '%s/%s' % (
capabilities['location_info'].split(':')[-1], volume['name'])
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.zvol.set_child_prop(
zvol, options[opt], new)
retyped = True
except exception.NexentaException:
LOG.error(_LE('Error trying to change %(opt)s'
' from %(old)s to %(new)s'),
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, None
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: snapshot reference
"""
volume_name = self._get_zvol_name(snapshot['volume_name'])
snapshot_name = '%s@%s' % (volume_name, snapshot['name'])
try:
self.nms.snapshot.destroy(snapshot_name, '')
except exception.NexentaException as exc:
if "does not exist" in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it seems it was '
'already deleted.'), snapshot_name)
elif "snapshot has dependent clones" in exc.args[0]:
LOG.info(_LI('Snapshot %s has dependent clones, will be '
'deleted later.'), snapshot_name)
else:
raise
ctxt = context.get_admin_context()
try:
self.db.volume_get(ctxt, snapshot['volume_name'])
except exception.VolumeNotFound:
LOG.info(_LI('Origin volume %s appears to be removed, try to '
'remove it from backend if it is there.'))
if self.nms.volume.object_exists(volume_name):
self.nms.zvol.destroy(volume_name, '')
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _target_exists(self, target):
"""Check if iSCSI target exist.
:param target: target name
:return: True if target exist, else False
"""
targets = self.nms.stmf.list_targets()
if not targets:
return False
return (target in self.nms.stmf.list_targets())
def _target_group_exists(self, target_group):
"""Check if target group exist.
:param target_group: target group
:return: True if target group exist, else False
"""
groups = self.nms.stmf.list_targetgroups()
if not groups:
return False
return target_group in groups
def _target_member_in_target_group(self, target_group, target_member):
"""Check if target member in target group.
:param target_group: target group
:param target_member: target member
:return: True if target member in target group, else False
:raises: NexentaException if target group doesn't exist
"""
members = self.nms.stmf.list_targetgroup_members(target_group)
if not members:
return False
return target_member in members
def _lu_exists(self, zvol_name):
"""Check if LU exists on appliance.
:param zvol_name: Zvol name
:raises: NexentaException if zvol not exists
:return: True if LU exists, else False
"""
try:
return bool(self.nms.scsidisk.lu_exists(zvol_name))
except exception.NexentaException as exc:
if 'does not exist' not in exc.args[0]:
raise
return False
def _is_lu_shared(self, zvol_name):
"""Check if LU exists on appliance and shared.
:param zvol_name: Zvol name
:raises: NexentaException if Zvol not exist
:return: True if LU exists and shared, else False
"""
try:
shared = self.nms.scsidisk.lu_shared(zvol_name) > 0
except exception.NexentaException as exc:
if 'does not exist for zvol' not in exc.args[0]:
raise # Zvol does not exists
shared = False # LU does not exist
return shared
def create_export(self, _ctx, volume, connector):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
model_update = self._do_export(_ctx, volume)
return model_update
def ensure_export(self, _ctx, volume):
self._do_export(_ctx, volume)
def _do_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume)
target_group_name = self._get_target_group_name(target_name)
entry = None
if not self._lu_exists(zvol_name):
try:
entry = self.nms.scsidisk.create_lu(zvol_name, {})
except exception.NexentaException as exc:
if 'in use' not in exc.args[0]:
raise
LOG.info(_LI('Ignored LU creation error "%s" while ensuring '
'export.'), exc)
if not self._is_lu_shared(zvol_name):
try:
entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name})
except exception.NexentaException as exc:
if 'view entry exists' not in exc.args[0]:
raise
LOG.info(_LI('Ignored LUN mapping entry addition error "%s" '
'while ensuring export.'), exc)
model_update = {}
if entry:
provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % {
'host': self.nms_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'name': target_name,
'lun': entry['lun'],
}
model_update = {'provider_location': provider_location}
return model_update
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
target_name = self._get_target_name(volume)
self.targets[target_name].remove(volume['name'])
zvol_name = self._get_zvol_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')
total_amount = utils.str2gib_size(stats['size'])
free_amount = utils.str2gib_size(stats['available'])
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.nms_host,
'volume': self.volume
}
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_amount,
'free_capacity_gb': free_amount,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_portal_port,
'nms_url': self.nms.url
}

View File

@ -0,0 +1,93 @@
# Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client
=====================================================================
.. automodule:: nexenta.jsonrpc
"""
import socket
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
from cinder import exception
from cinder.utils import retry
LOG = logging.getLogger(__name__)
socket.setdefaulttimeout(100)
class NexentaJSONProxy(object):
retry_exc_tuple = (requests.exceptions.ConnectionError,)
def __init__(self, scheme, host, port, path, user, password, auto=False,
obj=None, method=None):
self.scheme = scheme.lower()
self.host = host
self.port = port
self.path = path
self.user = user
self.password = password
self.auto = auto
self.obj = obj
self.method = method
def __getattr__(self, name):
if not self.obj:
obj, method = name, None
elif not self.method:
obj, method = self.obj, name
else:
obj, method = '%s.%s' % (self.obj, self.method), name
return NexentaJSONProxy(self.scheme, self.host, self.port, self.path,
self.user, self.password, self.auto, obj,
method)
@property
def url(self):
return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path)
def __hash__(self):
return self.url.__hash__()
def __repr__(self):
return 'NMS proxy: %s' % self.url
@retry(retry_exc_tuple, retries=6)
def __call__(self, *args):
data = jsonutils.dumps({
'object': self.obj,
'method': self.method,
'params': args
})
auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1]
headers = {
'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth
}
LOG.debug('Sending JSON data: %s', data)
req = requests.post(self.url, data=data, headers=headers)
response = req.json()
req.close()
LOG.debug('Got response: %s', response)
if response.get('error') is not None:
message = response['error'].get('message', '')
raise exception.NexentaException(message)
return response.get('result')

View File

@ -15,7 +15,6 @@
import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
@ -24,49 +23,9 @@ from cinder import exception
from cinder.i18n import _, _LE
from cinder.volume import driver
from cinder.volume.drivers.nexenta.nexentaedge import jsonrpc
from cinder.volume.drivers.nexenta import options
nexenta_edge_opts = [
cfg.StrOpt('nexenta_rest_address',
default='',
help='IP address of NexentaEdge management REST API endpoint'),
cfg.IntOpt('nexenta_rest_port',
default=8080,
help='HTTP port to connect to NexentaEdge REST API endpoint'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
help='Use http or https for REST connection (default auto)'),
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='NexentaEdge target portal port'),
cfg.StrOpt('nexenta_rest_user',
default='admin',
help='User name to connect to NexentaEdge'),
cfg.StrOpt('nexenta_rest_password',
default='nexenta',
help='Password to connect to NexentaEdge',
secret=True),
cfg.StrOpt('nexenta_lun_container',
default='',
help='NexentaEdge logical path of bucket for LUNs'),
cfg.StrOpt('nexenta_iscsi_service',
default='',
help='NexentaEdge iSCSI service name'),
cfg.StrOpt('nexenta_client_address',
default='',
help='NexentaEdge iSCSI Gateway client '
'address for non-VIP service'),
cfg.IntOpt('nexenta_blocksize',
default=4096,
help='NexentaEdge iSCSI LUN block size'),
cfg.IntOpt('nexenta_chunksize',
default=16384,
help='NexentaEdge iSCSI LUN object chunk size')
]
CONF = cfg.CONF
CONF.register_opts(nexenta_edge_opts)
LOG = logging.getLogger(__name__)
@ -75,14 +34,22 @@ class NexentaEdgeISCSIDriver(driver.ISCSIDriver):
Version history:
1.0.0 - Initial driver version.
1.0.1 - Moved opts to options.py.
"""
VERSION = '1.0.0'
VERSION = '1.0.1'
def __init__(self, *args, **kwargs):
super(NexentaEdgeISCSIDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(nexenta_edge_opts)
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_EDGE_OPTS)
self.restapi_protocol = self.configuration.nexenta_rest_protocol
self.restapi_host = self.configuration.nexenta_rest_address
self.restapi_port = self.configuration.nexenta_rest_port

View File

@ -0,0 +1,817 @@
# Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance.
=======================================================================
.. automodule:: nexenta.nfs
"""
import hashlib
import os
import re
import six
from eventlet import greenthread
from oslo_log import log as logging
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.3.0'
LOG = logging.getLogger(__name__)
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
1.1.3 - Redefined volume_backend_name attribute inherited from
RemoteFsDriver.
1.2.0 - Added migrate and retype methods.
1.3.0 - Extend volume method.
"""
driver_prefix = 'nexenta'
volume_backend_name = 'NexentaNfsDriver'
VERSION = VERSION
VOLUME_FILE_NAME = 'volume'
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTS)
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTS)
self.configuration.append_config_values(
options.NEXENTA_DATASET_OPTS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTS)
self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base
self.volume_compression = (
self.configuration.nexenta_dataset_compression)
self.volume_deduplication = self.configuration.nexenta_dataset_dedup
self.volume_description = (
self.configuration.nexenta_dataset_description)
self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes
self._nms2volroot = {}
self.share2nms = {}
self.nfs_versions = {}
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
shares_config = getattr(self.configuration, self.driver_prefix +
'_shares_config')
if shares_config:
self.configuration.nfs_shares_config = shares_config
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(shares_config)
self._mount_subfolders()
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
self._share_folder(nms, volume_name, dataset)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] not in ('available', 'retyping'):
LOG.warning(_LW("Volume status must be 'available' or 'retyping'."
" Current volume status: %s"), volume['status'])
return false_ret
if 'capabilities' not in host:
LOG.warning(_LW("Unsupported host. No capabilities found"))
return false_ret
capabilities = host['capabilities']
ns_shares = capabilities['ns_shares']
dst_parts = capabilities['location_info'].split(':')
dst_host, dst_volume = dst_parts[1:]
if (capabilities.get('vendor_name') != 'Nexenta' or
dst_parts[0] != self.__class__.__name__ or
capabilities['free_capacity_gb'] < volume['size']):
return false_ret
nms = self.share2nms[volume['provider_location']]
ssh_bindings = nms.appliance.ssh_list_bindings()
shares = []
for bind in ssh_bindings:
for share in ns_shares:
if (share.startswith(ssh_bindings[bind][3]) and
ns_shares[share] >= volume['size']):
shares.append(share)
if len(shares) == 0:
LOG.warning(_LW("Remote NexentaStor appliance at %s should be "
"SSH-bound."), share)
return false_ret
share = sorted(shares, key=ns_shares.get, reverse=True)[0]
snapshot = {
'volume_name': volume['name'],
'volume_id': volume['id'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
location = volume['provider_location']
src = '%(share)s/%(volume)s@%(snapshot)s' % {
'share': location.split(':')[1].split('volumes/')[1],
'volume': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]])
try:
nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self._get_nms_for_url(capabilities['nms_url'])
dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1],
volume['name'], snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except exception.NexentaException as exc:
LOG.warning(_LW("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
return True, {'provider_location': share}
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
options = dict(
compression='compression',
dedup='dedup',
description='nms:description'
)
retyped = False
migrated = False
model_update = None
src_backend = self.__class__.__name__
dst_backend = host['capabilities']['location_info'].split(':')[0]
if src_backend != dst_backend:
LOG.warning(_LW('Cannot retype from %(src_backend)s to '
'%(dst_backend)s.'),
{
'src_backend': src_backend,
'dst_backend': dst_backend
})
return False
hosts = (volume['host'], host['host'])
old, new = hosts
if old != new:
migrated, provider_location = self.migrate_volume(
context, volume, host)
if not migrated:
provider_location = volume['provider_location']
nms = self.share2nms[provider_location]
else:
nms_url = host['capabilities']['nms_url']
nms = self._get_nms_for_url(nms_url)
model_update = provider_location
provider_location = provider_location['provider_location']
share = provider_location.split(':')[1].split('volumes/')[1]
folder = '%(share)s/%(volume)s' % {
'share': share,
'volume': volume['name']
}
for opt in options:
old, new = diff.get('extra_specs').get(opt, (False, False))
if old != new:
LOG.debug('Changing %(opt)s from %(old)s to %(new)s.',
{'opt': opt, 'old': old, 'new': new})
try:
nms.folder.set_child_prop(
folder, options[opt], new)
retyped = True
except exception.NexentaException:
LOG.error(_LE('Error trying to change %(opt)s'
' from %(old)s to %(new)s'),
{'opt': opt, 'old': old, 'new': new})
return False, None
return retyped or migrated, model_update
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_dataset_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
folder_path = '%s/%s' % (vol, folder)
compression = nms.folder.get_child_prop(
folder_path, 'compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set_child_prop(
folder_path, 'compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set_child_prop(
folder_path, 'compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except exception.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except exception.NexentaException:
LOG.warning(_LW("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
if self._get_nfs_server_version(nfs_share) < 4:
sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,
volume)
self._ensure_share_mounted(sub_share, mnt_path)
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'name': self._get_clone_snapshot_name(volume)}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
return self.create_volume_from_snapshot(volume, snapshot)
except exception.NexentaException:
LOG.error(_LE('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (exception.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
nfs_share = volume.get('provider_location')
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
mount_path = self.remote_path(volume).strip(
'/%s' % self.VOLUME_FILE_NAME)
if mount_path in self._remotefsclient._read_mounts():
self._execute('umount', mount_path, run_as_root=True)
try:
props = nms.folder.get_child_props(folder, 'origin') or {}
nms.folder.destroy(folder, '-r')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Folder %s does not exist, it was '
'already deleted.'), folder)
return
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
try:
nms.snapshot.destroy(origin, '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it was '
'already deleted.'), origin)
return
raise
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_LI('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
volume_path = self.remote_path(volume)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, new_size)
else:
block_size_mb = 1
block_count = ((new_size - volume['size']) * units.Gi /
(block_size_mb * units.Mi))
nms.appliance.execute(
'dd if=/dev/zero seek=%(seek)d of=%(path)s'
' bs=%(bs)dM count=%(count)d' % {
'seek': volume['size'] * units.Gi / block_size_mb,
'path': volume_path,
'bs': block_size_mb,
'count': block_count
}
)
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except exception.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not '
'exist, it was already deleted.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
elif 'has dependent clones' in exc.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s has dependent '
'clones, it will be deleted later.'),
{
'folder': folder,
'snapshot': snapshot,
})
return
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
nms.appliance.execute(
'truncate --size %(size)dG %(path)s' % {
'path': path,
'size': size
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_LI('Creating regular file: %s.'
'This may take some time.'), path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_LI('Regular file: %s created.'), path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nfs_share = nfs_share.encode('utf-8')
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = re.split(r'\s+', share, 2)
share_address = share_info[0].strip()
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
if not re.match(r'.+:/.+', share_address):
LOG.warning(_LW("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug('Shares loaded: %s', self.shares)
def _get_subshare_mount_point(self, nfs_share, volume):
mnt_path = '%s/%s' % (
self._get_mount_point_for_share(nfs_share), volume['name'])
sub_share = '%s/%s' % (nfs_share, volume['name'])
return sub_share, mnt_path
def _ensure_share_mounted(self, nfs_share, mount_path=None):
"""Ensure that NFS share is mounted on the host.
Unlike the parent method this one accepts mount_path as an optional
parameter and uses it as a mount point if provided.
:param nfs_share: NFS share name
:param mount_path: mount path on the host
"""
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
num_attempts = max(1, self.configuration.nfs_mount_attempts)
for attempt in range(num_attempts):
try:
if mount_path is None:
self._remotefsclient.mount(nfs_share, mnt_flags)
else:
if mount_path in self._remotefsclient._read_mounts():
LOG.info(_LI('Already mounted: %s'), mount_path)
return
self._execute('mkdir', '-p', mount_path,
check_exit_code=False)
self._remotefsclient._mount_nfs(nfs_share, mount_path,
mnt_flags)
return
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(six.text_type(e))
LOG.warning(
_LW('Mount attempt %(attempt)d failed: %(error)s. '
'Retrying mount ...'), {
'attempt': attempt,
'error': e})
greenthread.sleep(1)
def _mount_subfolders(self):
ctxt = context.get_admin_context()
vol_entries = self.db.volume_get_all_by_host(ctxt, self.host)
for vol in vol_entries:
nfs_share = vol['provider_location']
if ((nfs_share in self.shares) and
(self._get_nfs_server_version(nfs_share) < 4)):
sub_share, mnt_path = self._get_subshare_mount_point(
nfs_share, vol)
self._ensure_share_mounted(sub_share, mnt_path)
def _get_nfs_server_version(self, share):
if not self.nfs_versions.get(share):
nms = self.share2nms[share]
nfs_opts = nms.netsvc.get_confopts(
'svc:/network/nfs/server:default', 'configure')
try:
self.nfs_versions[share] = int(
nfs_opts['nfs_server_versmax']['current'])
except KeyError:
self.nfs_versions[share] = int(
nfs_opts['server_versmax']['current'])
return self.nfs_versions[share]
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder),
'used|available')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path = (
utils.parse_nms_url(url))
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
def _get_clone_snapshot_name(self, volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
def _is_clone_snapshot_name(self, snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
total_space = 0
free_space = 0
shares_with_capacities = {}
for mounted_share in self._mounted_shares:
total, free, allocated = self._get_capacity_info(mounted_share)
shares_with_capacities[mounted_share] = utils.str2gib_size(total)
if total_space < utils.str2gib_size(total):
total_space = utils.str2gib_size(total)
if free_space < utils.str2gib_size(free):
free_space = utils.str2gib_size(free)
share = mounted_share
location_info = '%(driver)s:%(share)s' % {
'driver': self.__class__.__name__,
'share': share
}
nms_url = self.share2nms[share].url
self._stats = {
'vendor_name': 'Nexenta',
'dedup': self.volume_deduplication,
'compression': self.volume_compression,
'description': self.volume_description,
'nms_url': nms_url,
'ns_shares': shares_with_capacities,
'driver_version': self.VERSION,
'storage_protocol': 'NFS',
'total_capacity_gb': total_space,
'free_capacity_gb': free_space,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'location_info': location_info,
'volume_backend_name': self.backend_name,
'nfs_mount_point_base': self.nfs_mount_point_base
}

View File

@ -0,0 +1,149 @@
# Copyright 2016 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.options` -- Contains configuration options for Nexenta drivers.
=============================================================================
.. automodule:: nexenta.options
"""
from oslo_config import cfg
NEXENTA_EDGE_OPTS = [
cfg.StrOpt('nexenta_rest_address',
default='',
help='IP address of NexentaEdge management REST API endpoint'),
cfg.StrOpt('nexenta_rest_user',
default='admin',
help='User name to connect to NexentaEdge'),
cfg.StrOpt('nexenta_rest_password',
default='nexenta',
help='Password to connect to NexentaEdge',
secret=True),
cfg.StrOpt('nexenta_lun_container',
default='',
help='NexentaEdge logical path of bucket for LUNs'),
cfg.StrOpt('nexenta_iscsi_service',
default='',
help='NexentaEdge iSCSI service name'),
cfg.StrOpt('nexenta_client_address',
default='',
help='NexentaEdge iSCSI Gateway client '
'address for non-VIP service'),
cfg.StrOpt('nexenta_chunksize',
default=16384,
help='NexentaEdge iSCSI LUN object chunk size')
]
NEXENTA_CONNECTION_OPTS = [
cfg.StrOpt('nexenta_host',
default='',
help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=8080,
help='HTTP port to connect to Nexenta REST API server'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
choices=['http', 'https', 'auto'],
help='Use http or https for REST connection (default auto)'),
cfg.StrOpt('nexenta_user',
default='admin',
help='User name to connect to Nexenta SA'),
cfg.StrOpt('nexenta_password',
default='nexenta',
help='Password to connect to Nexenta SA',
secret=True),
]
NEXENTA_ISCSI_OPTS = [
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='Nexenta target portal port'),
cfg.StrOpt('nexenta_volume',
default='cinder',
help='SA Pool that holds all volumes'),
cfg.StrOpt('nexenta_target_prefix',
default='iqn.1986-03.com.sun:02:cinder-',
help='IQN prefix for iSCSI targets'),
cfg.StrOpt('nexenta_target_group_prefix',
default='cinder/',
help='Prefix for iSCSI target groups on SA'),
]
NEXENTA_NFS_OPTS = [
cfg.StrOpt('nexenta_shares_config',
default='/etc/cinder/nfs_shares',
help='File with the list of available nfs shares'),
cfg.StrOpt('nexenta_mount_point_base',
default='$state_path/mnt',
help='Base directory that contains NFS share mount points'),
cfg.BoolOpt('nexenta_sparsed_volumes',
default=True,
help='Enables or disables the creation of volumes as '
'sparsed files that take no space. If disabled '
'(False), volume is created as a regular file, '
'which takes a long time.'),
cfg.BoolOpt('nexenta_nms_cache_volroot',
default=True,
help=('If set True cache NexentaStor appliance volroot option '
'value.'))
]
NEXENTA_DATASET_OPTS = [
cfg.StrOpt('nexenta_dataset_compression',
default='on',
choices=['on', 'off', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3',
'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8',
'gzip-9', 'lzjb', 'zle', 'lz4'],
help='Compression value for new ZFS folders.'),
cfg.StrOpt('nexenta_dataset_dedup',
default='off',
choices=['on', 'off', 'sha256', 'verify', 'sha256, verify'],
help='Deduplication value for new ZFS folders.'),
cfg.StrOpt('nexenta_dataset_description',
default='',
help='Human-readable description for the folder.'),
cfg.StrOpt('nexenta_blocksize',
default=4096,
help='Block size for datasets'),
cfg.IntOpt('nexenta_ns5_blocksize',
default=32,
help='Block size for datasets'),
cfg.BoolOpt('nexenta_sparse',
default=False,
help='Enables or disables the creation of sparse datasets'),
]
NEXENTA_RRMGR_OPTS = [
cfg.IntOpt('nexenta_rrmgr_compression',
default=0,
help=('Enable stream compression, level 1..9. 1 - gives best '
'speed; 9 - gives best compression.')),
cfg.IntOpt('nexenta_rrmgr_tcp_buf_size',
default=4096,
help='TCP Buffer size in KiloBytes.'),
cfg.IntOpt('nexenta_rrmgr_connections',
default=2,
help='Number of TCP connections.'),
]
CONF = cfg.CONF
CONF.register_opts(NEXENTA_CONNECTION_OPTS)
CONF.register_opts(NEXENTA_ISCSI_OPTS)
CONF.register_opts(NEXENTA_DATASET_OPTS)
CONF.register_opts(NEXENTA_NFS_OPTS)
CONF.register_opts(NEXENTA_RRMGR_OPTS)
CONF.register_opts(NEXENTA_EDGE_OPTS)

View File

@ -0,0 +1,165 @@
# Copyright 2013 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.utils` -- Nexenta-specific utils functions.
=========================================================
.. automodule:: nexenta.utils
"""
import re
import six
from oslo_utils import units
import six.moves.urllib.parse as urlparse
from cinder.i18n import _
def str2size(s, scale=1024):
"""Convert size-string.
String format: <value>[:space:]<B | K | M | ...> to bytes.
:param s: size-string
:param scale: base size
"""
if not s:
return 0
if isinstance(s, int):
return s
match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s)
if match is None:
raise ValueError(_('Invalid value: "%s"') % s)
groups = match.groups()
value = float(groups[0])
suffix = len(groups) > 1 and groups[1].upper() or 'B'
types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
for i, t in enumerate(types):
if suffix == t:
return int(value * pow(scale, i))
def str2gib_size(s):
"""Covert size-string to size in gigabytes."""
size_in_bytes = str2size(s)
return size_in_bytes // units.Gi
def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None,
connections=None):
"""Returns rrmgr command for source and destination."""
cmd = ['rrmgr', '-s', 'zfs']
if compression:
cmd.extend(['-c', '%s' % compression])
cmd.append('-q')
cmd.append('-e')
if tcp_buf_size:
cmd.extend(['-w', six.text_type(tcp_buf_size)])
if connections:
cmd.extend(['-n', six.text_type(connections)])
cmd.extend([src, dst])
return ' '.join(cmd)
def parse_nms_url(url):
"""Parse NMS url into normalized parts like scheme, user, host and others.
Example NMS URL:
auto://admin:nexenta@192.168.1.1:2000/
NMS URL parts:
auto True if url starts with auto://, protocol will be
automatically switched to https if http not
supported;
scheme (auto) connection protocol (http or https);
user (admin) NMS user;
password (nexenta) NMS password;
host (192.168.1.1) NMS host;
port (2000) NMS port.
:param url: url string
:return: tuple (auto, scheme, user, password, host, port, path)
"""
pr = urlparse.urlparse(url)
scheme = pr.scheme
auto = scheme == 'auto'
if auto:
scheme = 'http'
user = 'admin'
password = 'nexenta'
if '@' not in pr.netloc:
host_and_port = pr.netloc
else:
user_and_password, host_and_port = pr.netloc.split('@', 1)
if ':' in user_and_password:
user, password = user_and_password.split(':')
else:
user = user_and_password
if ':' in host_and_port:
host, port = host_and_port.split(':', 1)
else:
host, port = host_and_port, '2000'
return auto, scheme, user, password, host, port, '/rest/nms/'
def parse_nef_url(url):
"""Parse NMS url into normalized parts like scheme, user, host and others.
Example NMS URL:
auto://admin:nexenta@192.168.1.1:8080/
NMS URL parts:
auto True if url starts with auto://, protocol will be
automatically switched to https if http not
supported;
scheme (auto) connection protocol (http or https);
user (admin) NMS user;
password (nexenta) NMS password;
host (192.168.1.1) NMS host;
port (8080) NMS port.
:param url: url string
:return: tuple (auto, scheme, user, password, host, port)
"""
pr = urlparse.urlparse(url)
scheme = pr.scheme
auto = scheme == 'auto'
if auto:
scheme = 'http'
user = 'admin'
password = 'nexenta'
if '@' not in pr.netloc:
host_and_port = pr.netloc
else:
user_and_password, host_and_port = pr.netloc.split('@', 1)
if ':' in user_and_password:
user, password = user_and_password.split(':')
else:
user = user_and_password
if ':' in host_and_port:
host, port = host_and_port.split(':', 1)
else:
host, port = host_and_port, '8080'
return auto, scheme, user, password, host, port
def get_migrate_snapshot_name(volume):
"""Return name for snapshot that will be used to migrate the volume."""
return 'cinder-migrate-snapshot-%(id)s' % volume

View File

@ -0,0 +1,5 @@
features:
- Added Migrate and Extend for Nexenta NFS driver.
- Added Retype functionality to Nexenta iSCSI and NFS drivers.
upgrades:
- Refactored Nexenta iSCSI driver to use single target and targetgroup with multiple zvols.

View File

@ -106,6 +106,7 @@ cinder.tests.unit.test_netapp
cinder.tests.unit.test_netapp_nfs
cinder.tests.unit.test_netapp_ssc
cinder.tests.unit.test_nexenta_edge
cinder.tests.unit.test_nexenta
cinder.tests.unit.test_nfs
cinder.tests.unit.test_nimble
cinder.tests.unit.test_prophetstor_dpl