Merge "Adds manage/unmanage methods for HNAS drivers."
This commit is contained in:
commit
ca238f71cb
@ -228,6 +228,18 @@ Logical units : Logical units :\n\
|
||||
\n\
|
||||
Access configuration :\n\
|
||||
"
|
||||
HNAS_RESULT24 = "Logical unit modified successfully."
|
||||
|
||||
HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)."
|
||||
|
||||
HNAS_RESULT26 = "Name : volume-test \n\
|
||||
Comment: \n\
|
||||
Path : /.cinder/volume-test.iscsi \n\
|
||||
Size : 2 GB \n\
|
||||
File System : fs1 \n\
|
||||
File System Mounted : YES \n\
|
||||
Logical Unit Mounted: No"
|
||||
|
||||
|
||||
HNAS_CMDS = {
|
||||
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'):
|
||||
@ -296,7 +308,16 @@ HNAS_CMDS = {
|
||||
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
|
||||
'1', 'iscsi-target', 'addlu', 'cinder-default',
|
||||
'volume-8ddd1a54-0000-0000-0000', '2'):
|
||||
["%s" % HNAS_RESULT13, ""]
|
||||
["%s" % HNAS_RESULT13, ""],
|
||||
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
|
||||
'1', 'selectfs', 'fs01-husvm'):
|
||||
["%s" % HNAS_RESULT25, ""],
|
||||
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
|
||||
'1', 'iscsi-lu', 'list', 'test_lun'):
|
||||
["%s" % HNAS_RESULT26, ""],
|
||||
('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
|
||||
'1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'):
|
||||
["%s" % HNAS_RESULT24, ""]
|
||||
}
|
||||
|
||||
DRV_CONF = {'ssh_enabled': 'True',
|
||||
@ -590,3 +611,42 @@ class HDSHNASBendTest(test.TestCase):
|
||||
"test_hdp")
|
||||
result, lunid, tgt = ret
|
||||
self.assertFalse(result)
|
||||
|
||||
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
|
||||
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
|
||||
return_value = (HNAS_RESULT26, ""))
|
||||
def test_get_existing_lu_info(self, m_run_cmd, m_get_evs):
|
||||
|
||||
out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0",
|
||||
"supervisor",
|
||||
"supervisor", "fs01-husvm",
|
||||
"test_lun")
|
||||
|
||||
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
|
||||
'supervisor', 'fs01-husvm')
|
||||
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
|
||||
'supervisor', 'console-context',
|
||||
'--evs', 1, 'iscsi-lu', 'list',
|
||||
'test_lun')
|
||||
|
||||
self.assertEqual(HNAS_RESULT26, out)
|
||||
|
||||
@mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
|
||||
@mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
|
||||
return_value=(HNAS_RESULT24, ""))
|
||||
def test_rename_existing_lu(self, m_run_cmd, m_get_evs):
|
||||
|
||||
out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0",
|
||||
"supervisor",
|
||||
"supervisor", "fs01-husvm",
|
||||
"vol_test",
|
||||
"new_vol_test")
|
||||
|
||||
m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
|
||||
'supervisor', 'fs01-husvm')
|
||||
m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
|
||||
'supervisor', 'console-context',
|
||||
'--evs', 1, 'iscsi-lu', 'mod',
|
||||
'-n', 'vol_test', 'new_vol_test')
|
||||
|
||||
self.assertEqual(HNAS_RESULT24, out)
|
||||
|
@ -103,6 +103,20 @@ class SimulatedHnasBackend(object):
|
||||
# iSCSI connections
|
||||
self.connections = []
|
||||
|
||||
def rename_existing_lu(self, cmd, ip0, user, pw, fslabel,
|
||||
vol_name, vol_ref_name):
|
||||
return 'Logical unit modified successfully.'
|
||||
|
||||
def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun):
|
||||
out = "Name : volume-test \n\
|
||||
Comment: \n\
|
||||
Path : /.cinder/volume-test.iscsi \n\
|
||||
Size : 20 GB \n\
|
||||
File System : manage_iscsi_test \n\
|
||||
File System Mounted : Yes \n\
|
||||
Logical Unit Mounted: Yes"
|
||||
return out
|
||||
|
||||
def deleteVolume(self, name):
|
||||
volume = self.getVolume(name)
|
||||
if volume:
|
||||
@ -462,3 +476,60 @@ class HNASiSCSIDriverTest(test.TestCase):
|
||||
self.backend.check_target.return_value = (True, fake_tgt)
|
||||
self.assertRaises(exception.NoMoreTargets,
|
||||
self.driver._get_service_target, vol)
|
||||
|
||||
@mock.patch.object(iscsi.HDSISCSIDriver, '_get_service')
|
||||
def test_unmanage(self, get_service):
|
||||
get_service.return_value = ('fs2')
|
||||
|
||||
self.driver.unmanage(_VOLUME)
|
||||
get_service.assert_called_once_with(_VOLUME)
|
||||
|
||||
def test_manage_existing_get_size(self):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {'source-name': 'manage_iscsi_test/volume-test'}
|
||||
|
||||
out = self.driver.manage_existing_get_size(vol, existing_vol_ref)
|
||||
self.assertEqual(20, out)
|
||||
|
||||
def test_manage_existing_get_size_error(self):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {'source-name': 'invalid_FS/vol-not-found'}
|
||||
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
existing_vol_ref)
|
||||
|
||||
def test_manage_existing_get_size_without_source_name(self):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {
|
||||
'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'}
|
||||
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
existing_vol_ref)
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
def test_manage_existing(self, m_get_extra_specs):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {'source-name': 'fs2/volume-test'}
|
||||
version = {'provider_location': '18-48-A5-A1-80-13.testvol'}
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'silver'}
|
||||
|
||||
out = self.driver.manage_existing(vol, existing_vol_ref)
|
||||
|
||||
m_get_extra_specs.assert_called_once_with('1')
|
||||
self.assertEqual(version, out)
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
def test_manage_existing_invalid_pool(self, m_get_extra_specs):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {'source-name': 'fs2/volume-test'}
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'gold'}
|
||||
|
||||
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
|
||||
self.driver.manage_existing, vol, existing_vol_ref)
|
||||
m_get_extra_specs.assert_called_once_with('1')
|
||||
|
@ -22,11 +22,13 @@ import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
|
||||
from cinder.volume.drivers import nfs as drivernfs
|
||||
from cinder.volume.drivers import remotefs
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
SHARESCONF = """172.17.39.132:/cinder
|
||||
172.17.39.133:/cinder"""
|
||||
|
||||
@ -100,6 +102,17 @@ _SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
|
||||
'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191',
|
||||
'host': 'host1@hnas-iscsi-backend#silver'}
|
||||
|
||||
_VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc',
|
||||
'id': '61da3-8d23-4bb9-3136-ca819d89e7fc',
|
||||
'size': 4,
|
||||
'metadata': [{'key': 'type',
|
||||
'service_label': 'silver'}],
|
||||
'volume_type': 'silver',
|
||||
'volume_type_id': 'silver',
|
||||
'provider_location': '172.24.44.34:/silver/',
|
||||
'volume_size': 128,
|
||||
'host': 'host1@hnas-nfs#silver'}
|
||||
|
||||
GET_ID_VOL = {
|
||||
("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME],
|
||||
("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME]
|
||||
@ -297,3 +310,144 @@ class HDSNFSDriverTest(test.TestCase):
|
||||
vol = _VOLUME.copy()
|
||||
|
||||
self.assertEqual('silver', self.driver.get_pool(vol))
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point,
|
||||
m_isfile, m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'silver'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
|
||||
with mock.patch.object(self.driver, '_execute'):
|
||||
out = self.driver.manage_existing(vol, existing_vol_ref)
|
||||
|
||||
loc = {'provider_location': '172.17.39.133:/cinder'}
|
||||
self.assertEqual(loc, out)
|
||||
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point, m_isfile,
|
||||
m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'silver'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.driver._execute = mock.Mock(side_effect=OSError)
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing, vol, existing_vol_ref)
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point, m_isfile,
|
||||
m_get_extra_specs):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
m_get_extra_specs.return_value = {'key': 'type',
|
||||
'service_label': 'gold'}
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
self.driver._execute = mock.Mock(side_effect=OSError)
|
||||
|
||||
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
|
||||
self.driver.manage_existing, vol, existing_vol_ref)
|
||||
m_get_extra_specs.assert_called_once_with('silver')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(utils, 'get_file_size', return_value=4000000000)
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_get_size(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point,
|
||||
m_isfile, m_file_size):
|
||||
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
|
||||
out = self.driver.manage_existing_get_size(vol, existing_vol_ref)
|
||||
|
||||
self.assertEqual(vol['size'], out)
|
||||
m_file_size.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(utils, 'get_file_size', return_value='badfloat')
|
||||
@mock.patch.object(os.path, 'isfile', return_value=True)
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
@mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
|
||||
@mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
|
||||
def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve,
|
||||
m_mount_point,
|
||||
m_isfile, m_file_size):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
|
||||
self.driver._mounted_shares = ['172.17.39.133:/cinder']
|
||||
existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
|
||||
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
existing_vol_ref)
|
||||
m_file_size.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_isfile.assert_called_once_with('/mnt/gold/volume-test')
|
||||
m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
|
||||
m_resolve.assert_called_with('172.17.39.133')
|
||||
m_ensure_shares.assert_called_once_with()
|
||||
|
||||
def test_manage_existing_get_size_without_source_name(self):
|
||||
vol = _VOLUME.copy()
|
||||
existing_vol_ref = {
|
||||
'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'}
|
||||
|
||||
self.assertRaises(exception.ManageExistingInvalidReference,
|
||||
self.driver.manage_existing_get_size, vol,
|
||||
existing_vol_ref)
|
||||
|
||||
@mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
|
||||
return_value='/mnt/gold')
|
||||
def test_unmanage(self, m_mount_point):
|
||||
with mock.patch.object(self.driver, '_execute'):
|
||||
vol = _VOLUME_NFS.copy()
|
||||
self.driver.unmanage(vol)
|
||||
|
||||
m_mount_point.assert_called_once_with('172.24.44.34:/silver/')
|
||||
|
@ -29,6 +29,7 @@ import os
|
||||
import pyclbr
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import stat
|
||||
import sys
|
||||
import tempfile
|
||||
@ -969,3 +970,20 @@ def setup_tracing(trace_flags):
|
||||
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
|
||||
TRACE_METHOD = 'method' in trace_flags
|
||||
TRACE_API = 'api' in trace_flags
|
||||
|
||||
|
||||
def resolve_hostname(hostname):
|
||||
"""Resolves host name to IP address.
|
||||
|
||||
Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
|
||||
This routine also works if the data passed in hostname is already an IP.
|
||||
In this case, the same IP address will be returned.
|
||||
|
||||
:param hostname: Host name to resolve.
|
||||
:return: IP Address for Host name.
|
||||
"""
|
||||
result = socket.getaddrinfo(hostname, None)[0]
|
||||
(family, socktype, proto, canonname, sockaddr) = result
|
||||
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
|
||||
{'host': hostname, 'ip': sockaddr[0]})
|
||||
return sockaddr[0]
|
||||
|
@ -824,3 +824,45 @@ class HnasBackend(object):
|
||||
|
||||
LOG.debug("LUN %(lun)s not attached.", {'lun': volume_name})
|
||||
return False, 0, None
|
||||
|
||||
def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun):
|
||||
"""Returns the information for the specified Logical Unit.
|
||||
|
||||
Returns the information of an existing Logical Unit on HNAS, according
|
||||
to the name provided.
|
||||
|
||||
:param cmd: the command that will be run on SMU
|
||||
:param ip0: string IP address of controller
|
||||
:param user: string user authentication for array
|
||||
:param pw: string password authentication for array
|
||||
:param fslabel: label of the file system
|
||||
:param lun: label of the logical unit
|
||||
"""
|
||||
|
||||
evs = self.get_evs(cmd, ip0, user, pw, fslabel)
|
||||
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
|
||||
evs, 'iscsi-lu', 'list', lun)
|
||||
|
||||
return out
|
||||
|
||||
def rename_existing_lu(self, cmd, ip0, user, pw, fslabel,
|
||||
new_name, vol_name):
|
||||
"""Renames the specified Logical Unit.
|
||||
|
||||
Renames an existing Logical Unit on HNAS according to the new name
|
||||
provided.
|
||||
|
||||
:param cmd: command that will be run on SMU
|
||||
:param ip0: string IP address of controller
|
||||
:param user: string user authentication for array
|
||||
:param pw: string password authentication for array
|
||||
:param fslabel: label of the file system
|
||||
:param new_name: new name to the existing volume
|
||||
:param vol_name: current name of the existing volume
|
||||
"""
|
||||
evs = self.get_evs(cmd, ip0, user, pw, fslabel)
|
||||
out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
|
||||
evs, "iscsi-lu", "mod", "-n", new_name,
|
||||
vol_name)
|
||||
|
||||
return out
|
||||
|
@ -36,8 +36,7 @@ from cinder.volume.drivers.hitachi import hnas_backend
|
||||
from cinder.volume import utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
HDS_HNAS_ISCSI_VERSION = '3.3.0'
|
||||
HDS_HNAS_ISCSI_VERSION = '4.0.0'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -164,6 +163,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||
Version 3.2.0: Added pool aware scheduling
|
||||
Fixed concurrency errors
|
||||
Version 3.3.0: Fixed iSCSI target limitation error
|
||||
Version 4.0.0: Added manage/unmanage features
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -832,3 +832,150 @@ class HDSISCSIDriver(driver.ISCSIDriver):
|
||||
else:
|
||||
pass
|
||||
return metadata['service_label']
|
||||
|
||||
def _check_pool_and_fs(self, volume, fs_label):
|
||||
"""Validation of the pool and filesystem.
|
||||
|
||||
Checks if the file system for the volume-type chosen matches the
|
||||
one passed in the volume reference. Also, checks if the pool
|
||||
for the volume type matches the pool for the host passed.
|
||||
|
||||
:param volume: Reference to the volume.
|
||||
:param fs_label: Label of the file system.
|
||||
"""
|
||||
pool_from_vol_type = self.get_pool(volume)
|
||||
|
||||
pool_from_host = utils.extract_host(volume['host'], level='pool')
|
||||
|
||||
if self.config['services'][pool_from_vol_type]['hdp'] != fs_label:
|
||||
msg = (_("Failed to manage existing volume because the pool of "
|
||||
"the volume type chosen does not match the file system "
|
||||
"passed in the volume reference."),
|
||||
{'File System passed': fs_label,
|
||||
'File System for volume type':
|
||||
self.config['services'][pool_from_vol_type]['hdp']})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
if pool_from_host != pool_from_vol_type:
|
||||
msg = (_("Failed to manage existing volume because the pool of "
|
||||
"the volume type chosen does not match the pool of "
|
||||
"the host."),
|
||||
{'Pool of the volume type': pool_from_vol_type,
|
||||
'Pool of the host': pool_from_host})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
def _get_info_from_vol_ref(self, vol_ref):
|
||||
"""Gets information from the volume reference.
|
||||
|
||||
Returns the information (File system and volume name) taken from
|
||||
the volume reference.
|
||||
|
||||
:param vol_ref: existing volume to take under management
|
||||
"""
|
||||
vol_info = vol_ref.split('/')
|
||||
|
||||
fs_label = vol_info[0]
|
||||
vol_name = vol_info[1]
|
||||
|
||||
return fs_label, vol_name
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_vol_ref):
|
||||
"""Gets the size to manage_existing.
|
||||
|
||||
Returns the size of volume to be managed by manage_existing.
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: existing volume to take under management
|
||||
"""
|
||||
# Check that the reference is valid.
|
||||
if 'source-name' not in existing_vol_ref:
|
||||
reason = _('Reference must contain source-name element.')
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_vol_ref, reason=reason)
|
||||
|
||||
ref_name = existing_vol_ref['source-name']
|
||||
fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
|
||||
|
||||
LOG.debug("File System: %(fs_label)s "
|
||||
"Volume name: %(vol_name)s.",
|
||||
{'fs_label': fs_label, 'vol_name': vol_name})
|
||||
|
||||
lu_info = self.bend.get_existing_lu_info(self.config['hnas_cmd'],
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'],
|
||||
fs_label, vol_name)
|
||||
|
||||
if fs_label in lu_info:
|
||||
aux = lu_info.split('\n')[3]
|
||||
size = aux.split(':')[1]
|
||||
size_unit = size.split(' ')[2]
|
||||
|
||||
if size_unit == 'TB':
|
||||
return int(size.split(' ')[1]) * units.k
|
||||
else:
|
||||
return int(size.split(' ')[1])
|
||||
else:
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=existing_vol_ref,
|
||||
reason=_('Volume not found on configured storage backend.'))
|
||||
|
||||
def manage_existing(self, volume, existing_vol_ref):
|
||||
"""Manages an existing volume.
|
||||
|
||||
The specified Cinder volume is to be taken into Cinder management.
|
||||
The driver will verify its existence and then rename it to the
|
||||
new Cinder volume name. It is expected that the existing volume
|
||||
reference is a File System and some volume_name;
|
||||
e.g., openstack/vol_to_manage
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: driver-specific information used to identify a
|
||||
volume
|
||||
"""
|
||||
ref_name = existing_vol_ref['source-name']
|
||||
fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
|
||||
|
||||
LOG.debug("Asked to manage ISCSI volume %(vol)s, with vol "
|
||||
"ref %(ref)s.", {'vol': volume['id'],
|
||||
'ref': existing_vol_ref['source-name']})
|
||||
|
||||
self._check_pool_and_fs(volume, fs_label)
|
||||
|
||||
self.bend.rename_existing_lu(self.config['hnas_cmd'],
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'], fs_label,
|
||||
volume['name'], vol_name)
|
||||
|
||||
LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."),
|
||||
{'name': volume['name']})
|
||||
|
||||
lun = self.arid + '.' + volume['name']
|
||||
|
||||
return {'provider_location': lun}
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Unmanages a volume from cinder.
|
||||
|
||||
Removes the specified volume from Cinder management.
|
||||
Does not delete the underlying backend storage object. A log entry
|
||||
will be made to notify the Admin that the volume is no longer being
|
||||
managed.
|
||||
|
||||
:param volume: cinder volume to unmanage
|
||||
"""
|
||||
svc = self._get_service(volume)
|
||||
|
||||
new_name = 'unmanage-' + volume['name']
|
||||
vol_path = svc + '/' + volume['name']
|
||||
|
||||
self.bend.rename_existing_lu(self.config['hnas_cmd'],
|
||||
self.config['mgmt_ip0'],
|
||||
self.config['username'],
|
||||
self.config['password'], svc, new_name,
|
||||
volume['name'])
|
||||
|
||||
LOG.info(_LI("Cinder ISCSI volume with current path %(path)s is "
|
||||
"no longer being managed. The new name is %(unm)s."),
|
||||
{'path': vol_path, 'unm': new_name})
|
||||
|
@ -17,7 +17,10 @@
|
||||
Volume driver for HDS HNAS NFS storage.
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import six
|
||||
import socket
|
||||
import time
|
||||
from xml.etree import ElementTree as ETree
|
||||
|
||||
@ -30,13 +33,14 @@ from oslo_utils import units
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI
|
||||
from cinder.image import image_utils
|
||||
from cinder import utils as cutils
|
||||
from cinder.volume.drivers.hitachi import hnas_backend
|
||||
from cinder.volume.drivers import nfs
|
||||
from cinder.volume import utils
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
HDS_HNAS_NFS_VERSION = '3.0.1'
|
||||
HDS_HNAS_NFS_VERSION = '4.0.0'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -150,8 +154,9 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
Executes commands relating to Volumes.
|
||||
|
||||
Version 1.0.0: Initial driver version
|
||||
Version 2.2.0: Added support to SSH authentication
|
||||
|
||||
Version 2.2.0: Added support to SSH authentication
|
||||
Version 3.0.0: Added pool aware scheduling
|
||||
Version 4.0.0: Added manage/unmanage features
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@ -576,3 +581,215 @@ class HDSNFSDriver(nfs.NfsDriver):
|
||||
self._do_create_volume(volume)
|
||||
|
||||
return {'provider_location': volume['provider_location']}
|
||||
|
||||
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
|
||||
"""Converts the share point name to an IP address.
|
||||
|
||||
The volume reference may have a DNS name portion in the share name.
|
||||
Convert that to an IP address and then restore the entire path.
|
||||
|
||||
:param vol_ref: driver-specific information used to identify a volume
|
||||
:return: a volume reference where share is in IP format
|
||||
"""
|
||||
|
||||
# First strip out share and convert to IP format.
|
||||
share_split = vol_ref.split(':')
|
||||
|
||||
try:
|
||||
vol_ref_share_ip = cutils.resolve_hostname(share_split[0])
|
||||
except socket.gaierror as e:
|
||||
LOG.error(_LE('Invalid hostname %(host)s'),
|
||||
{'host': share_split[0]})
|
||||
LOG.debug('error: %s', e.strerror)
|
||||
raise
|
||||
|
||||
# Now place back into volume reference.
|
||||
vol_ref_share = vol_ref_share_ip + ':' + share_split[1]
|
||||
|
||||
return vol_ref_share
|
||||
|
||||
def _get_share_mount_and_vol_from_vol_ref(self, vol_ref):
|
||||
"""Get the NFS share, the NFS mount, and the volume from reference.
|
||||
|
||||
Determine the NFS share point, the NFS mount point, and the volume
|
||||
(with possible path) from the given volume reference. Raise exception
|
||||
if unsuccessful.
|
||||
|
||||
:param vol_ref: driver-specific information used to identify a volume
|
||||
:return: NFS Share, NFS mount, volume path or raise error
|
||||
"""
|
||||
# Check that the reference is valid.
|
||||
if 'source-name' not in vol_ref:
|
||||
reason = _('Reference must contain source-name element.')
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=vol_ref, reason=reason)
|
||||
vol_ref_name = vol_ref['source-name']
|
||||
|
||||
self._ensure_shares_mounted()
|
||||
|
||||
# If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config
|
||||
# file, but the admin tries to manage the file located at
|
||||
# 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below
|
||||
# when searching self._mounted_shares to see if we have an existing
|
||||
# mount that would work to access the volume-to-be-managed (a string
|
||||
# comparison is done instead of IP comparison).
|
||||
vol_ref_share = self._convert_vol_ref_share_name_to_share_ip(
|
||||
vol_ref_name)
|
||||
for nfs_share in self._mounted_shares:
|
||||
cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share)
|
||||
(orig_share, work_share,
|
||||
file_path) = vol_ref_share.partition(cfg_share)
|
||||
if work_share == cfg_share:
|
||||
file_path = file_path[1:] # strip off leading path divider
|
||||
LOG.debug("Found possible share %s; checking mount.",
|
||||
work_share)
|
||||
nfs_mount = self._get_mount_point_for_share(nfs_share)
|
||||
vol_full_path = os.path.join(nfs_mount, file_path)
|
||||
if os.path.isfile(vol_full_path):
|
||||
LOG.debug("Found share %(share)s and vol %(path)s on "
|
||||
"mount %(mnt)s.",
|
||||
{'share': nfs_share, 'path': file_path,
|
||||
'mnt': nfs_mount})
|
||||
return nfs_share, nfs_mount, file_path
|
||||
else:
|
||||
LOG.debug("vol_ref %(ref)s not on share %(share)s.",
|
||||
{'ref': vol_ref_share, 'share': nfs_share})
|
||||
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
existing_ref=vol_ref,
|
||||
reason=_('Volume not found on configured storage backend.'))
|
||||
|
||||
def manage_existing(self, volume, existing_vol_ref):
|
||||
"""Manages an existing volume.
|
||||
|
||||
The specified Cinder volume is to be taken into Cinder management.
|
||||
The driver will verify its existence and then rename it to the
|
||||
new Cinder volume name. It is expected that the existing volume
|
||||
reference is an NFS share point and some [/path]/volume;
|
||||
e.g., 10.10.32.1:/openstack/vol_to_manage
|
||||
or 10.10.32.1:/openstack/some_directory/vol_to_manage
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: driver-specific information used to identify a
|
||||
volume
|
||||
"""
|
||||
|
||||
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
|
||||
(nfs_share, nfs_mount, vol_path
|
||||
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
|
||||
|
||||
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s.",
|
||||
{'vol': volume['id'],
|
||||
'ref': existing_vol_ref['source-name']})
|
||||
self._check_pool_and_share(volume, nfs_share)
|
||||
if vol_path == volume['name']:
|
||||
LOG.debug("New Cinder volume %s name matches reference name: "
|
||||
"no need to rename.", volume['name'])
|
||||
else:
|
||||
src_vol = os.path.join(nfs_mount, vol_path)
|
||||
dst_vol = os.path.join(nfs_mount, volume['name'])
|
||||
try:
|
||||
self._execute("mv", src_vol, dst_vol, run_as_root=False,
|
||||
check_exit_code=True)
|
||||
LOG.debug("Setting newly managed Cinder volume name to %s.",
|
||||
volume['name'])
|
||||
self._set_rw_permissions_for_all(dst_vol)
|
||||
except (OSError, processutils.ProcessExecutionError) as err:
|
||||
exception_msg = (_("Failed to manage existing volume "
|
||||
"%(name)s, because rename operation "
|
||||
"failed: Error msg: %(msg)s."),
|
||||
{'name': existing_vol_ref['source-name'],
|
||||
'msg': six.text_type(err)})
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
return {'provider_location': nfs_share}
|
||||
|
||||
def _check_pool_and_share(self, volume, nfs_share):
|
||||
"""Validates the pool and the NFS share.
|
||||
|
||||
Checks if the NFS share for the volume-type chosen matches the
|
||||
one passed in the volume reference. Also, checks if the pool
|
||||
for the volume type matches the pool for the host passed.
|
||||
|
||||
:param volume: cinder volume reference
|
||||
:param nfs_share: NFS share passed to manage
|
||||
"""
|
||||
pool_from_vol_type = self.get_pool(volume)
|
||||
|
||||
pool_from_host = utils.extract_host(volume['host'], level='pool')
|
||||
|
||||
if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share:
|
||||
msg = (_("Failed to manage existing volume because the pool of "
|
||||
"the volume type chosen does not match the NFS share "
|
||||
"passed in the volume reference."),
|
||||
{'Share passed': nfs_share,
|
||||
'Share for volume type':
|
||||
self.config['services'][pool_from_vol_type]['hdp']})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
if pool_from_host != pool_from_vol_type:
|
||||
msg = (_("Failed to manage existing volume because the pool of "
|
||||
"the volume type chosen does not match the pool of "
|
||||
"the host."),
|
||||
{'Pool of the volume type': pool_from_vol_type,
|
||||
'Pool of the host': pool_from_host})
|
||||
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_vol_ref):
|
||||
"""Returns the size of volume to be managed by manage_existing.
|
||||
|
||||
When calculating the size, round up to the next GB.
|
||||
|
||||
:param volume: cinder volume to manage
|
||||
:param existing_vol_ref: existing volume to take under management
|
||||
"""
|
||||
|
||||
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
|
||||
(nfs_share, nfs_mount, vol_path
|
||||
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
|
||||
|
||||
try:
|
||||
LOG.debug("Asked to get size of NFS vol_ref %s.",
|
||||
existing_vol_ref['source-name'])
|
||||
|
||||
file_path = os.path.join(nfs_mount, vol_path)
|
||||
file_size = float(cutils.get_file_size(file_path)) / units.Gi
|
||||
vol_size = int(math.ceil(file_size))
|
||||
except (OSError, ValueError):
|
||||
exception_message = (_("Failed to manage existing volume "
|
||||
"%(name)s, because of error in getting "
|
||||
"volume size."),
|
||||
{'name': existing_vol_ref['source-name']})
|
||||
raise exception.VolumeBackendAPIException(data=exception_message)
|
||||
|
||||
LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.",
|
||||
{'ref': existing_vol_ref['source-name'], 'size': vol_size})
|
||||
|
||||
return vol_size
|
||||
|
||||
def unmanage(self, volume):
|
||||
"""Removes the specified volume from Cinder management.
|
||||
|
||||
It does not delete the underlying backend storage object. A log entry
|
||||
will be made to notify the Admin that the volume is no longer being
|
||||
managed.
|
||||
|
||||
:param volume: cinder volume to unmanage
|
||||
"""
|
||||
vol_str = CONF.volume_name_template % volume['id']
|
||||
path = self._get_mount_point_for_share(volume['provider_location'])
|
||||
|
||||
new_str = "unmanage-" + vol_str
|
||||
|
||||
vol_path = os.path.join(path, vol_str)
|
||||
new_path = os.path.join(path, new_str)
|
||||
|
||||
try:
|
||||
self._execute("mv", vol_path, new_path,
|
||||
run_as_root=False, check_exit_code=True)
|
||||
|
||||
LOG.info(_LI("Cinder NFS volume with current path %(cr)s is "
|
||||
"no longer being managed."), {'cr': new_path})
|
||||
|
||||
except (OSError, ValueError):
|
||||
LOG.error(_LE("The NFS Volume %(cr)s does not exist."),
|
||||
{'cr': new_path})
|
||||
|
Loading…
x
Reference in New Issue
Block a user