Merge "Fix config registration in cinder volume drivers."
This commit is contained in:
commit
dac54e709a
@ -120,6 +120,8 @@ class TestCoraidDriver(test.TestCase):
|
||||
configuration.coraid_user = fake_esm_username
|
||||
configuration.coraid_group = fake_esm_group
|
||||
configuration.coraid_password = fake_esm_password
|
||||
configuration.volume_name_template = "volume-%s"
|
||||
configuration.snapshot_name_template = "snapshot-%s"
|
||||
|
||||
self.drv = CoraidDriver(configuration=configuration)
|
||||
self.drv.do_setup({})
|
||||
|
@ -87,7 +87,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
|
||||
def test_local_path(self):
|
||||
"""local_path common use case."""
|
||||
glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||
glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||
drv = self._driver
|
||||
|
||||
volume = DumbVolume()
|
||||
@ -188,7 +188,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
"""_get_mount_point_for_share should calculate correct value."""
|
||||
drv = self._driver
|
||||
|
||||
glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||
glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||
|
||||
self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc',
|
||||
drv._get_mount_point_for_share(
|
||||
@ -206,7 +206,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
(df_total_size, df_avail)
|
||||
df_output = df_head + df_data
|
||||
|
||||
setattr(glusterfs.FLAGS, 'glusterfs_disk_util', 'df')
|
||||
setattr(glusterfs.CONF, 'glusterfs_disk_util', 'df')
|
||||
|
||||
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
|
||||
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
|
||||
@ -225,7 +225,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
|
||||
mox.VerifyAll()
|
||||
|
||||
delattr(glusterfs.FLAGS, 'glusterfs_disk_util')
|
||||
delattr(glusterfs.CONF, 'glusterfs_disk_util')
|
||||
|
||||
def test_get_available_capacity_with_du(self):
|
||||
"""_get_available_capacity should calculate correct value."""
|
||||
@ -368,7 +368,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
"""do_setup should throw error if shares config is not configured."""
|
||||
drv = self._driver
|
||||
|
||||
glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
||||
glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
||||
|
||||
self.assertRaises(exception.GlusterfsException,
|
||||
drv.do_setup, IsA(context.RequestContext))
|
||||
@ -378,7 +378,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
mox = self._mox
|
||||
drv = self._driver
|
||||
|
||||
glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
||||
glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
||||
|
||||
mox.StubOutWithMock(os.path, 'exists')
|
||||
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
|
||||
@ -457,7 +457,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
drv = self._driver
|
||||
volume = self._simple_volume()
|
||||
|
||||
setattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes', True)
|
||||
setattr(glusterfs.CONF, 'glusterfs_sparsed_volumes', True)
|
||||
|
||||
mox.StubOutWithMock(drv, '_create_sparsed_file')
|
||||
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
|
||||
@ -471,7 +471,7 @@ class GlusterFsDriverTestCase(test.TestCase):
|
||||
|
||||
mox.VerifyAll()
|
||||
|
||||
delattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes')
|
||||
delattr(glusterfs.CONF, 'glusterfs_sparsed_volumes')
|
||||
|
||||
def test_create_nonsparsed_volume(self):
|
||||
mox = self._mox
|
||||
|
@ -77,9 +77,9 @@ class ScalityDriverTestCase(test.TestCase):
|
||||
raise e
|
||||
|
||||
def _configure_driver(self):
|
||||
scality.FLAGS.scality_sofs_config = self.TEST_CONFIG
|
||||
scality.FLAGS.scality_sofs_mount_point = self.TEST_MOUNT
|
||||
scality.FLAGS.scality_sofs_volume_dir = self.TEST_VOLDIR
|
||||
scality.CONF.scality_sofs_config = self.TEST_CONFIG
|
||||
scality.CONF.scality_sofs_mount_point = self.TEST_MOUNT
|
||||
scality.CONF.scality_sofs_volume_dir = self.TEST_VOLDIR
|
||||
|
||||
def _execute_wrapper(self, cmd, *args, **kwargs):
|
||||
try:
|
||||
@ -116,13 +116,13 @@ class ScalityDriverTestCase(test.TestCase):
|
||||
|
||||
def test_setup_no_config(self):
|
||||
"""Missing SOFS configuration shall raise an error."""
|
||||
scality.FLAGS.scality_sofs_config = None
|
||||
scality.CONF.scality_sofs_config = None
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self._driver.do_setup, None)
|
||||
|
||||
def test_setup_missing_config(self):
|
||||
"""Non-existent SOFS configuration file shall raise an error."""
|
||||
scality.FLAGS.scality_sofs_config = 'nonexistent.conf'
|
||||
scality.CONF.scality_sofs_config = 'nonexistent.conf'
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self._driver.do_setup, None)
|
||||
|
||||
|
@ -20,13 +20,14 @@ Unit tests for Windows Server 2012 OpenStack Cinder volume driver
|
||||
"""
|
||||
import sys
|
||||
|
||||
import cinder.flags
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder.tests.windows import basetestcase
|
||||
from cinder.tests.windows import db_fakes
|
||||
from cinder.tests.windows import windowsutils
|
||||
from cinder.volume.drivers import windows
|
||||
|
||||
FLAGS = cinder.flags.FLAGS
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestWindowsDriver(basetestcase.BaseTestCase):
|
||||
@ -88,19 +89,19 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
|
||||
self._wutils.delete_snapshot(self._snapshot_data['name'])
|
||||
if (self._connector_data and
|
||||
self._wutils.initiator_id_exists(
|
||||
"%s%s" % (FLAGS.iscsi_target_prefix,
|
||||
"%s%s" % (CONF.iscsi_target_prefix,
|
||||
self._volume_data['name']),
|
||||
self._connector_data['initiator'])):
|
||||
target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
|
||||
target_name = "%s%s" % (CONF.iscsi_target_prefix,
|
||||
self._volume_data['name'])
|
||||
initiator_name = self._connector_data['initiator']
|
||||
self._wutils.delete_initiator_id(target_name, initiator_name)
|
||||
if (self._volume_data and
|
||||
self._wutils.export_exists("%s%s" %
|
||||
(FLAGS.iscsi_target_prefix,
|
||||
(CONF.iscsi_target_prefix,
|
||||
self._volume_data['name']))):
|
||||
self._wutils.delete_export(
|
||||
"%s%s" % (FLAGS.iscsi_target_prefix,
|
||||
"%s%s" % (CONF.iscsi_target_prefix,
|
||||
self._volume_data['name']))
|
||||
|
||||
finally:
|
||||
@ -182,7 +183,7 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
|
||||
volume_name = self._volume_data['name']
|
||||
self.assertEquals(
|
||||
retval,
|
||||
{'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
|
||||
{'provider_location': "%s%s" % (CONF.iscsi_target_prefix,
|
||||
volume_name)})
|
||||
|
||||
def test_initialize_connection(self):
|
||||
|
@ -54,7 +54,7 @@ def get_configured_driver(server='ignore_server', path='ignore_path'):
|
||||
class DriverTestCase(test.TestCase):
|
||||
|
||||
def assert_flag(self, flagname):
|
||||
self.assertTrue(hasattr(driver.FLAGS, flagname))
|
||||
self.assertTrue(hasattr(driver.CONF, flagname))
|
||||
|
||||
def test_config_options(self):
|
||||
self.assert_flag('xenapi_connection_url')
|
||||
@ -210,10 +210,10 @@ class DriverTestCase(test.TestCase):
|
||||
drv.nfs_ops = ops
|
||||
drv.db = db
|
||||
|
||||
mock.StubOutWithMock(driver, 'FLAGS')
|
||||
driver.FLAGS.xenapi_nfs_server = server
|
||||
driver.FLAGS.xenapi_nfs_serverpath = serverpath
|
||||
driver.FLAGS.xenapi_sr_base_path = sr_base_path
|
||||
mock.StubOutWithMock(driver, 'CONF')
|
||||
driver.CONF.xenapi_nfs_server = server
|
||||
driver.CONF.xenapi_nfs_serverpath = serverpath
|
||||
driver.CONF.xenapi_sr_base_path = sr_base_path
|
||||
|
||||
return mock, drv
|
||||
|
||||
|
@ -22,15 +22,11 @@ Contrib : Larry Matter <support@coraid.com>
|
||||
"""
|
||||
|
||||
import cookielib
|
||||
import os
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import jsonutils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume import driver
|
||||
@ -38,7 +34,6 @@ from cinder.volume import volume_types
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
coraid_opts = [
|
||||
cfg.StrOpt('coraid_esm_address',
|
||||
default='',
|
||||
@ -57,7 +52,9 @@ coraid_opts = [
|
||||
default='coraid_repository',
|
||||
help='Volume Type key name to store ESM Repository Name'),
|
||||
]
|
||||
FLAGS.register_opts(coraid_opts)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(coraid_opts)
|
||||
|
||||
|
||||
class CoraidException(Exception):
|
||||
@ -325,11 +322,11 @@ class CoraidDriver(driver.VolumeDriver):
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Create a Snapshot."""
|
||||
volume_name = (self.configuration.volume_name_template
|
||||
% snapshot['volume_id'])
|
||||
snapshot_name = (self.configuration.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
try:
|
||||
volume_name = (FLAGS.volume_name_template
|
||||
% snapshot['volume_id'])
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
self.esm.create_snapshot(volume_name, snapshot_name)
|
||||
except Exception, e:
|
||||
msg = _('Failed to Create Snapshot %(snapname)s')
|
||||
@ -339,9 +336,9 @@ class CoraidDriver(driver.VolumeDriver):
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Delete a Snapshot."""
|
||||
snapshot_name = (self.configuration.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
try:
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
self.esm.delete_snapshot(snapshot_name)
|
||||
except Exception:
|
||||
msg = _('Failed to Delete Snapshot %(snapname)s')
|
||||
@ -351,10 +348,10 @@ class CoraidDriver(driver.VolumeDriver):
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a Volume from a Snapshot."""
|
||||
snapshot_name = (self.configuration.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
repository = self._get_repository(volume['volume_type'])
|
||||
try:
|
||||
snapshot_name = (FLAGS.snapshot_name_template
|
||||
% snapshot['id'])
|
||||
repository = self._get_repository(volume['volume_type'])
|
||||
self.esm.create_volume_from_snapshot(snapshot_name,
|
||||
volume['name'],
|
||||
repository)
|
||||
|
@ -29,12 +29,11 @@ from oslo.config import cfg
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = cfg.CONF
|
||||
|
||||
try:
|
||||
import pywbem
|
||||
@ -62,7 +61,7 @@ class EMCSMISCommon():
|
||||
default=CINDER_EMC_CONFIG_FILE,
|
||||
help='use this file for cinder emc plugin '
|
||||
'config data')
|
||||
FLAGS.register_opt(opt)
|
||||
CONF.register_opt(opt)
|
||||
self.protocol = prtcl
|
||||
self.configuration = configuration
|
||||
self.configuration.append_config_values([opt])
|
||||
|
@ -20,20 +20,14 @@ ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.emc import emc_smis_common
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
"""EMC ISCSI Drivers for VMAX and VNX using SMI-S."""
|
||||
@ -42,8 +36,8 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
|
||||
|
||||
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.common = emc_smis_common.EMCSMISCommon(
|
||||
'iSCSI',
|
||||
configuration=self.configuration)
|
||||
'iSCSI',
|
||||
configuration=self.configuration)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
pass
|
||||
|
@ -21,7 +21,6 @@ import os
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume.drivers import nfs
|
||||
|
||||
@ -44,8 +43,8 @@ volume_opts = [
|
||||
'In such case volume creation takes a lot of time.'))]
|
||||
VERSION = '1.0'
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class GlusterfsDriver(nfs.RemoteFsDriver):
|
||||
|
@ -48,6 +48,10 @@ VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_'
|
||||
READBUFFERSIZE = 8192
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(huawei_opt)
|
||||
|
||||
|
||||
class SSHConn(utils.SSHPool):
|
||||
"""Define a new class inherited to SSHPool.
|
||||
|
||||
|
@ -28,7 +28,6 @@ from oslo.config import cfg
|
||||
|
||||
from cinder.brick.iscsi import iscsi
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.image import image_utils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
@ -60,8 +59,8 @@ volume_opts = [
|
||||
'this requires lvm_mirrors + 2 pvs with available space'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class LVMVolumeDriver(driver.VolumeDriver):
|
||||
@ -266,7 +265,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
|
||||
volume_name = FLAGS.volume_name_template % src_vref['id']
|
||||
volume_name = self.configuration.volume_name_template % src_vref['id']
|
||||
temp_id = 'tmp-snap-%s' % src_vref['id']
|
||||
temp_snapshot = {'volume_name': volume_name,
|
||||
'size': src_vref['size'],
|
||||
@ -346,8 +345,10 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
|
||||
"provision for volume: %s"), volume['id'])
|
||||
return
|
||||
|
||||
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
|
||||
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
|
||||
volume['name'])
|
||||
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
|
||||
volume['name'])
|
||||
iscsi_target = 1
|
||||
|
||||
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
|
||||
@ -611,17 +612,22 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
|
||||
out, err = self._execute('lvs', '--option',
|
||||
'name', '--noheadings',
|
||||
run_as_root=True)
|
||||
pool_name = "%s-pool" % FLAGS.volume_group
|
||||
pool_name = "%s-pool" % self.configuration.volume_group
|
||||
if pool_name not in out:
|
||||
if not FLAGS.pool_size:
|
||||
out, err = self._execute('vgs', FLAGS.volume_group,
|
||||
'--noheadings', '--options',
|
||||
'name,size', run_as_root=True)
|
||||
if not self.configuration.pool_size:
|
||||
out, err = self._execute('vgs',
|
||||
self.configuration.volume_group,
|
||||
'--noheadings',
|
||||
'--options',
|
||||
'name,size',
|
||||
run_as_root=True)
|
||||
|
||||
size = re.sub(r'[\.][\d][\d]', '', out.split()[1])
|
||||
else:
|
||||
size = "%s" % FLAGS.pool_size
|
||||
size = "%s" % self.configuration.pool_size
|
||||
|
||||
pool_path = '%s/%s' % (FLAGS.volume_group, pool_name)
|
||||
pool_path = '%s/%s' % (self.configuration.volume_group,
|
||||
pool_name)
|
||||
out, err = self._execute('lvcreate', '-T', '-L', size,
|
||||
pool_path, run_as_root=True)
|
||||
|
||||
@ -638,7 +644,8 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
|
||||
"""Creates a logical volume. Can optionally return a Dictionary of
|
||||
changes to the volume object to be persisted."""
|
||||
sizestr = self._sizestr(volume['size'])
|
||||
vg_name = ("%s/%s-pool" % (FLAGS.volume_group, FLAGS.volume_group))
|
||||
vg_name = ("%s/%s-pool" % (self.configuration.volume_group,
|
||||
self.configuration.volume_group))
|
||||
self._try_execute('lvcreate', '-T', '-V', sizestr, '-n',
|
||||
volume['name'], vg_name, run_as_root=True)
|
||||
|
||||
@ -647,19 +654,21 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
|
||||
if self._volume_not_present(volume['name']):
|
||||
return True
|
||||
self._try_execute('lvremove', '-f', "%s/%s" %
|
||||
(FLAGS.volume_group,
|
||||
(self.configuration.volume_group,
|
||||
self._escape_snapshot(volume['name'])),
|
||||
run_as_root=True)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Creates a clone of the specified volume."""
|
||||
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
|
||||
orig_lv_name = "%s/%s" % (FLAGS.volume_group, src_vref['name'])
|
||||
orig_lv_name = "%s/%s" % (self.configuration.volume_group,
|
||||
src_vref['name'])
|
||||
self._do_lvm_snapshot(orig_lv_name, volume, False)
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot of a volume."""
|
||||
orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
|
||||
orig_lv_name = "%s/%s" % (self.configuration.volume_group,
|
||||
snapshot['volume_name'])
|
||||
self._do_lvm_snapshot(orig_lv_name, snapshot)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
|
@ -85,6 +85,10 @@ netapp_opts = [
|
||||
' 7 mode'), ]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(netapp_opts)
|
||||
|
||||
|
||||
class DfmDataset(object):
|
||||
def __init__(self, id, name, project, type):
|
||||
self.id = id
|
||||
|
@ -42,6 +42,10 @@ netapp_nfs_opts = [
|
||||
help='Does snapshot creation call returns immediately')]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(netapp_nfs_opts)
|
||||
|
||||
|
||||
class NetAppNFSDriver(nfs.NfsDriver):
|
||||
"""Executes commands relating to Volumes."""
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
@ -25,7 +25,6 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers import nexenta
|
||||
@ -33,7 +32,6 @@ from cinder.volume.drivers.nexenta import jsonrpc
|
||||
|
||||
VERSION = '1.0'
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
nexenta_opts = [
|
||||
cfg.StrOpt('nexenta_host',
|
||||
@ -71,7 +69,9 @@ nexenta_opts = [
|
||||
default=False,
|
||||
help='flag to create sparse volumes'),
|
||||
]
|
||||
FLAGS.register_opts(nexenta_opts)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(nexenta_opts)
|
||||
|
||||
|
||||
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
@ -81,38 +81,38 @@ class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
super(NexentaDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def do_setup(self, context):
|
||||
protocol = FLAGS.nexenta_rest_protocol
|
||||
protocol = CONF.nexenta_rest_protocol
|
||||
auto = protocol == 'auto'
|
||||
if auto:
|
||||
protocol = 'http'
|
||||
self.nms = jsonrpc.NexentaJSONProxy(
|
||||
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
|
||||
FLAGS.nexenta_rest_port),
|
||||
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
|
||||
'%s://%s:%s/rest/nms/' % (protocol, CONF.nexenta_host,
|
||||
CONF.nexenta_rest_port),
|
||||
CONF.nexenta_user, CONF.nexenta_password, auto=auto)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Verify that the volume for our zvols exists.
|
||||
|
||||
:raise: :py:exc:`LookupError`
|
||||
"""
|
||||
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
|
||||
if not self.nms.volume.object_exists(CONF.nexenta_volume):
|
||||
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
|
||||
FLAGS.nexenta_volume)
|
||||
CONF.nexenta_volume)
|
||||
|
||||
@staticmethod
|
||||
def _get_zvol_name(volume_name):
|
||||
"""Return zvol name that corresponds given volume name."""
|
||||
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
|
||||
return '%s/%s' % (CONF.nexenta_volume, volume_name)
|
||||
|
||||
@staticmethod
|
||||
def _get_target_name(volume_name):
|
||||
"""Return iSCSI target name to access volume."""
|
||||
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
|
||||
return '%s%s' % (CONF.nexenta_target_prefix, volume_name)
|
||||
|
||||
@staticmethod
|
||||
def _get_target_group_name(volume_name):
|
||||
"""Return Nexenta iSCSI target group name for volume."""
|
||||
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
|
||||
return '%s%s' % (CONF.nexenta_target_group_prefix, volume_name)
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a zvol on appliance.
|
||||
@ -122,7 +122,7 @@ class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
self.nms.zvol.create(
|
||||
self._get_zvol_name(volume['name']),
|
||||
'%sG' % (volume['size'],),
|
||||
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
|
||||
CONF.nexenta_blocksize, CONF.nexenta_sparse)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Destroy a zvol on appliance.
|
||||
@ -237,8 +237,8 @@ class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
else:
|
||||
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
|
||||
' while ensuring export'), exc)
|
||||
return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
|
||||
FLAGS.nexenta_iscsi_target_portal_port,
|
||||
return '%s:%s,1 %s 0' % (CONF.nexenta_host,
|
||||
CONF.nexenta_iscsi_target_portal_port,
|
||||
target_name)
|
||||
|
||||
def create_export(self, _ctx, volume):
|
||||
@ -324,7 +324,7 @@ class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
|
||||
data["driver_version"] = VERSION
|
||||
data["storage_protocol"] = 'iSCSI'
|
||||
|
||||
stats = self.nms.volume.get_child_props(FLAGS.nexenta_volume,
|
||||
stats = self.nms.volume.get_child_props(CONF.nexenta_volume,
|
||||
'health|size|used|available')
|
||||
total_unit = stats['size'][-1]
|
||||
total_amount = float(stats['size'][:-1])
|
||||
|
@ -59,6 +59,9 @@ volume_opts = [
|
||||
|
||||
VERSION = '1.1'
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class RemoteFsDriver(driver.VolumeDriver):
|
||||
"""Common base for drivers that work like NFS."""
|
||||
|
@ -128,6 +128,9 @@ class RADOSClient(object):
|
||||
def __exit__(self, type_, value, traceback):
|
||||
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(rbd_opts)
|
||||
|
||||
|
||||
class RBDDriver(driver.VolumeDriver):
|
||||
"""Implements RADOS block device (RBD) volume commands"""
|
||||
|
@ -94,6 +94,10 @@ hp3par_opts = [
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(hp3par_opts)
|
||||
|
||||
|
||||
class HP3PARCommon(object):
|
||||
|
||||
stats = {}
|
||||
@ -834,7 +838,7 @@ exit
|
||||
# use the wwn to see if we can find the hostname
|
||||
hostname = self._get_3par_hostname_from_wwn_iqn(wwn_iqn)
|
||||
# no 3par host, re-throw
|
||||
if (hostname == None):
|
||||
if (hostname is None):
|
||||
raise
|
||||
else:
|
||||
# not a 'host does not exist' HTTPNotFound exception, re-throw
|
||||
|
@ -27,7 +27,6 @@ from eventlet import greenthread
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder import utils
|
||||
from cinder.volume.driver import ISCSIDriver
|
||||
@ -72,8 +71,8 @@ san_opts = [
|
||||
help='Maximum ssh connections in the pool'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(san_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(san_opts)
|
||||
|
||||
|
||||
class SanISCSIDriver(ISCSIDriver):
|
||||
|
@ -15,7 +15,6 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume.drivers.san.san import SanISCSIDriver
|
||||
|
||||
@ -26,8 +25,8 @@ solaris_opts = [
|
||||
default='rpool/',
|
||||
help='The ZFS path under which to create zvols for volumes.'), ]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(solaris_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(solaris_opts)
|
||||
|
||||
|
||||
class SolarisISCSIDriver(SanISCSIDriver):
|
||||
@ -61,6 +60,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
||||
super(SolarisISCSIDriver, self).__init__(*cmd,
|
||||
execute=self._execute,
|
||||
**kwargs)
|
||||
self.configuration.append_config_values(solaris_opts)
|
||||
|
||||
def _execute(self, *cmd, **kwargs):
|
||||
new_cmd = ['pfexec']
|
||||
@ -123,7 +123,8 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
||||
return iscsi_target_name in self._get_iscsi_targets()
|
||||
|
||||
def _build_zfs_poolname(self, volume):
|
||||
zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name'])
|
||||
zfs_poolname = '%s%s' % (self.configuration.san_zfs_volume_base,
|
||||
volume['name'])
|
||||
return zfs_poolname
|
||||
|
||||
def create_volume(self, volume):
|
||||
@ -137,7 +138,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
||||
|
||||
# Create a zfs volume
|
||||
cmd = ['/usr/sbin/zfs', 'create']
|
||||
if FLAGS.san_thin_provision:
|
||||
if self.configuration.san_thin_provision:
|
||||
cmd.append('-s')
|
||||
cmd.extend(['-V', sizestr])
|
||||
cmd.append(zfs_poolname)
|
||||
@ -186,7 +187,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
||||
|
||||
def local_path(self, volume):
|
||||
# TODO(justinsb): Is this needed here?
|
||||
escaped_group = FLAGS.volume_group.replace('-', '--')
|
||||
escaped_group = self.configuration.volume_group.replace('-', '--')
|
||||
escaped_name = volume['name'].replace('-', '--')
|
||||
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
||||
|
||||
@ -233,7 +234,8 @@ class SolarisISCSIDriver(SanISCSIDriver):
|
||||
|
||||
#TODO(justinsb): Is this always 1? Does it matter?
|
||||
iscsi_portal_interface = '1'
|
||||
iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
|
||||
iscsi_portal = \
|
||||
self.configuration.san_ip + ":3260," + iscsi_portal_interface
|
||||
|
||||
db_update = {}
|
||||
db_update['provider_location'] = ("%s %s" %
|
||||
|
@ -24,7 +24,6 @@ import urlparse
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.image import image_utils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume import driver
|
||||
@ -43,8 +42,8 @@ volume_opts = [
|
||||
help='Path from Scality SOFS root to volume dir'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(volume_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
|
||||
|
||||
class ScalityDriver(driver.VolumeDriver):
|
||||
@ -58,7 +57,7 @@ class ScalityDriver(driver.VolumeDriver):
|
||||
"""Sanity checks before attempting to mount SOFS."""
|
||||
|
||||
# config is mandatory
|
||||
config = FLAGS.scality_sofs_config
|
||||
config = CONF.scality_sofs_config
|
||||
if not config:
|
||||
msg = _("Value required for 'scality_sofs_config'")
|
||||
LOG.warn(msg)
|
||||
@ -89,8 +88,8 @@ class ScalityDriver(driver.VolumeDriver):
|
||||
raise e
|
||||
|
||||
def _mount_sofs(self):
|
||||
config = FLAGS.scality_sofs_config
|
||||
mount_path = FLAGS.scality_sofs_mount_point
|
||||
config = CONF.scality_sofs_config
|
||||
mount_path = CONF.scality_sofs_mount_point
|
||||
sysdir = os.path.join(mount_path, 'sys')
|
||||
|
||||
self._makedirs(mount_path)
|
||||
@ -121,16 +120,16 @@ class ScalityDriver(driver.VolumeDriver):
|
||||
"""Any initialization the volume driver does while starting."""
|
||||
self._check_prerequisites()
|
||||
self._mount_sofs()
|
||||
voldir = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir)
|
||||
voldir = os.path.join(CONF.scality_sofs_mount_point,
|
||||
CONF.scality_sofs_volume_dir)
|
||||
if not os.path.isdir(voldir):
|
||||
self._makedirs(voldir)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
self._check_prerequisites()
|
||||
voldir = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir)
|
||||
voldir = os.path.join(CONF.scality_sofs_mount_point,
|
||||
CONF.scality_sofs_volume_dir)
|
||||
if not os.path.isdir(voldir):
|
||||
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
|
||||
LOG.warn(msg)
|
||||
@ -160,8 +159,8 @@ class ScalityDriver(driver.VolumeDriver):
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot."""
|
||||
volume_path = os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
FLAGS.scality_sofs_volume_dir,
|
||||
volume_path = os.path.join(CONF.scality_sofs_mount_point,
|
||||
CONF.scality_sofs_volume_dir,
|
||||
snapshot['volume_name'])
|
||||
snapshot_path = self.local_path(snapshot)
|
||||
self._create_file(snapshot_path,
|
||||
@ -173,11 +172,11 @@ class ScalityDriver(driver.VolumeDriver):
|
||||
os.remove(self.local_path(snapshot))
|
||||
|
||||
def _sofs_path(self, volume):
|
||||
return os.path.join(FLAGS.scality_sofs_volume_dir,
|
||||
return os.path.join(CONF.scality_sofs_volume_dir,
|
||||
volume['name'])
|
||||
|
||||
def local_path(self, volume):
|
||||
return os.path.join(FLAGS.scality_sofs_mount_point,
|
||||
return os.path.join(CONF.scality_sofs_mount_point,
|
||||
self._sofs_path(volume))
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
|
@ -18,14 +18,14 @@ SheepDog Volume Driver.
|
||||
"""
|
||||
import re
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume import driver
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class SheepdogDriver(driver.VolumeDriver):
|
||||
|
@ -50,6 +50,10 @@ sf_opts = [
|
||||
help='Create SolidFire accounts with this prefix'), ]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(sf_opts)
|
||||
|
||||
|
||||
class SolidFire(SanISCSIDriver):
|
||||
"""OpenStack driver to enable SolidFire cluster.
|
||||
|
||||
|
@ -101,6 +101,10 @@ storwize_svc_opts = [
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(storwize_svc_opts)
|
||||
|
||||
|
||||
class StorwizeSVCDriver(san.SanISCSIDriver):
|
||||
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.image import glance
|
||||
from cinder.image import image_utils
|
||||
from cinder.openstack.common import log as logging
|
||||
@ -53,9 +52,9 @@ xenapi_nfs_opts = [
|
||||
help='Path of exported NFS, used by XenAPINFSDriver'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(xenapi_opts)
|
||||
FLAGS.register_opts(xenapi_nfs_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(xenapi_opts)
|
||||
CONF.register_opts(xenapi_nfs_opts)
|
||||
|
||||
|
||||
class XenAPINFSDriver(driver.VolumeDriver):
|
||||
@ -166,8 +165,8 @@ class XenAPINFSDriver(driver.VolumeDriver):
|
||||
def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume,
|
||||
image_service, image_id):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
|
||||
CONF.xenapi_nfs_serverpath,
|
||||
sr_uuid, vdi_uuid,
|
||||
False) as device:
|
||||
image_utils.fetch_to_raw(context,
|
||||
@ -184,22 +183,22 @@ class XenAPINFSDriver(driver.VolumeDriver):
|
||||
auth_token = context.auth_token
|
||||
|
||||
overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
CONF.xenapi_nfs_server,
|
||||
CONF.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
glance_server,
|
||||
image_id,
|
||||
auth_token,
|
||||
FLAGS.xenapi_sr_base_path)
|
||||
CONF.xenapi_sr_base_path)
|
||||
|
||||
if overwrite_result is False:
|
||||
raise exception.ImageCopyFailure(reason='Overwriting volume '
|
||||
'failed.')
|
||||
|
||||
self.nfs_ops.resize_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
CONF.xenapi_nfs_server,
|
||||
CONF.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
volume['size'])
|
||||
@ -215,8 +214,8 @@ class XenAPINFSDriver(driver.VolumeDriver):
|
||||
def _use_image_utils_to_upload_volume(self, context, volume, image_service,
|
||||
image_meta):
|
||||
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
|
||||
with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
|
||||
CONF.xenapi_nfs_serverpath,
|
||||
sr_uuid, vdi_uuid,
|
||||
True) as device:
|
||||
image_utils.upload_volume(context,
|
||||
@ -235,14 +234,14 @@ class XenAPINFSDriver(driver.VolumeDriver):
|
||||
auth_token = context.auth_token
|
||||
|
||||
self.nfs_ops.use_glance_plugin_to_upload_volume(
|
||||
FLAGS.xenapi_nfs_server,
|
||||
FLAGS.xenapi_nfs_serverpath,
|
||||
CONF.xenapi_nfs_server,
|
||||
CONF.xenapi_nfs_serverpath,
|
||||
sr_uuid,
|
||||
vdi_uuid,
|
||||
glance_server,
|
||||
image_id,
|
||||
auth_token,
|
||||
FLAGS.xenapi_sr_base_path)
|
||||
CONF.xenapi_sr_base_path)
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
if refresh or not self._stats:
|
||||
|
@ -27,7 +27,6 @@ Volume driver for IBM XIV storage systems.
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flags
|
||||
from cinder.openstack.common import importutils
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.volume.drivers.san import san
|
||||
@ -38,8 +37,8 @@ ibm_xiv_opts = [
|
||||
help='Proxy driver'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(ibm_xiv_opts)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ibm_xiv_opts)
|
||||
|
||||
LOG = logging.getLogger('cinder.volume.xiv')
|
||||
|
||||
@ -50,12 +49,12 @@ class XIVDriver(san.SanISCSIDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Initialize the driver."""
|
||||
|
||||
proxy = importutils.import_class(FLAGS.xiv_proxy)
|
||||
proxy = importutils.import_class(CONF.xiv_proxy)
|
||||
|
||||
self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
|
||||
"xiv_pass": FLAGS.san_password,
|
||||
"xiv_address": FLAGS.san_ip,
|
||||
"xiv_vol_pool": FLAGS.san_clustername},
|
||||
self.xiv_proxy = proxy({"xiv_user": CONF.san_login,
|
||||
"xiv_pass": CONF.san_password,
|
||||
"xiv_address": CONF.san_ip,
|
||||
"xiv_vol_pool": CONF.san_clustername},
|
||||
LOG,
|
||||
exception)
|
||||
san.SanISCSIDriver.__init__(self, *args, **kwargs)
|
||||
|
Loading…
x
Reference in New Issue
Block a user