Merge "Add Oracle ZFS Storage Appliance ISCSI Driver"
This commit is contained in:
commit
9bddf33f9f
305
cinder/tests/test_zfssa.py
Normal file
305
cinder/tests/test_zfssa.py
Normal file
@ -0,0 +1,305 @@
|
||||
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Unit tests for Oracle's ZFSSA Cinder volume driver
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.openstack.common import units
|
||||
from cinder import test
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FakeZFSSA(object):
|
||||
"""Fake ZFS SA"""
|
||||
def __init__(self):
|
||||
self.user = None
|
||||
self.host = None
|
||||
|
||||
def login(self, user):
|
||||
self.user = user
|
||||
|
||||
def set_host(self, host):
|
||||
self.host = host
|
||||
|
||||
def create_project(self, pool, project, compression, logbias):
|
||||
out = {}
|
||||
if not self.host or not self.user:
|
||||
return out
|
||||
|
||||
out = {"status": "online",
|
||||
"name": "pool",
|
||||
"usage": {"available": 10,
|
||||
"total": 10,
|
||||
"dedupratio": 100,
|
||||
"used": 1},
|
||||
"peer": "00000000-0000-0000-0000-000000000000",
|
||||
"owner": "host",
|
||||
"asn": "11111111-2222-3333-4444-555555555555"}
|
||||
return out
|
||||
|
||||
def create_initiator(self, init, initgrp, chapuser, chapsecret):
|
||||
out = {}
|
||||
if not self.host or not self.user:
|
||||
return out
|
||||
out = {"href": "fake_href",
|
||||
"alias": "fake_alias",
|
||||
"initiator": "fake_iqn.1993-08.org.fake:01:000000000000",
|
||||
"chapuser": "",
|
||||
"chapsecret": ""
|
||||
}
|
||||
|
||||
return out
|
||||
|
||||
def add_to_initiatorgroup(self, init, initgrp):
|
||||
out = {}
|
||||
if not self.host or not self.user:
|
||||
return out
|
||||
|
||||
out = {"href": "fake_href",
|
||||
"name": "fake_initgrp",
|
||||
"initiators": ["fake_iqn.1993-08.org.fake:01:000000000000"]
|
||||
}
|
||||
return out
|
||||
|
||||
def create_target(self, tgtalias, inter, tchapuser, tchapsecret):
|
||||
out = {}
|
||||
if not self.host or not self.user:
|
||||
return out
|
||||
out = {"href": "fake_href",
|
||||
"alias": "fake_tgtgrp",
|
||||
"iqn": "iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd",
|
||||
"auth": "none",
|
||||
"targetchapuser": "",
|
||||
"targetchapsecret": "",
|
||||
"interfaces": ["eth0"]
|
||||
}
|
||||
|
||||
return out
|
||||
|
||||
def add_to_targetgroup(self, iqn, tgtgrp):
|
||||
out = {}
|
||||
if not self.host or not self.user:
|
||||
return {}
|
||||
out = {"href": "fake_href",
|
||||
"name": "fake_tgtgrp",
|
||||
"targets": ["iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd"]
|
||||
}
|
||||
return out
|
||||
|
||||
def get_lun(self, pool, project, lun):
|
||||
ret = {
|
||||
'guid': '600144F0F8FBD5BD000053CE53AB0001',
|
||||
'number': 0,
|
||||
'initiatorgroup': 'fake_initgrp',
|
||||
'size': 1 * units.Gi
|
||||
}
|
||||
return ret
|
||||
|
||||
def get_target(self, target):
|
||||
return 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'
|
||||
|
||||
def create_lun(self, pool, project, lun, volsize, targetgroup,
|
||||
volblocksize, sparse, compression, logbias):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return out
|
||||
|
||||
out = {"logbias": logbias,
|
||||
"compression": compression,
|
||||
"status": "online",
|
||||
"lunguid": "600144F0F8FBD5BD000053CE53AB0001",
|
||||
"initiatorgroup": ["fake_initgrp"],
|
||||
"volsize": volsize,
|
||||
"pool": pool,
|
||||
"volblocksize": volblocksize,
|
||||
"name": lun,
|
||||
"project": project,
|
||||
"sparse": sparse,
|
||||
"targetgroup": targetgroup}
|
||||
|
||||
return out
|
||||
|
||||
def delete_lun(self, pool, project, lun):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return out
|
||||
out = {"pool": pool,
|
||||
"project": project,
|
||||
"name": lun}
|
||||
|
||||
return out
|
||||
|
||||
def create_snapshot(self, pool, project, vol, snap):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return {}
|
||||
out = {"name": snap,
|
||||
"numclones": 0,
|
||||
"share": vol,
|
||||
"project": project,
|
||||
"pool": pool}
|
||||
|
||||
return out
|
||||
|
||||
def delete_snapshot(self, pool, project, vol, snap):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return {}
|
||||
out = {"name": snap,
|
||||
"share": vol,
|
||||
"project": project,
|
||||
"pool": pool}
|
||||
|
||||
return out
|
||||
|
||||
def clone_snapshot(self, pool, project, pvol, snap, vol):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return out
|
||||
out = {"origin": {"project": project,
|
||||
"snapshot": snap,
|
||||
"share": pvol,
|
||||
"pool": pool},
|
||||
"logbias": "latency",
|
||||
"assignednumber": 1,
|
||||
"status": "online",
|
||||
"lunguid": "600144F0F8FBD5BD000053CE67A50002",
|
||||
"volsize": 1,
|
||||
"pool": pool,
|
||||
"name": vol,
|
||||
"project": project}
|
||||
|
||||
return out
|
||||
|
||||
def set_lun_initiatorgroup(self, pool, project, vol, initgrp):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return out
|
||||
out = {"lunguid": "600144F0F8FBD5BD000053CE67A50002",
|
||||
"pool": pool,
|
||||
"name": vol,
|
||||
"project": project,
|
||||
"initiatorgroup": ["fake_initgrp"]}
|
||||
|
||||
return out
|
||||
|
||||
def has_clones(self, pool, project, vol, snapshot):
|
||||
return False
|
||||
|
||||
def set_lun_props(self, pool, project, vol, **kargs):
|
||||
out = {}
|
||||
if not self.host and not self.user:
|
||||
return out
|
||||
out = {"pool": pool,
|
||||
"name": vol,
|
||||
"project": project,
|
||||
"volsize": kargs['volsize']}
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class TestZFSSAISCSIDriver(test.TestCase):
|
||||
|
||||
test_vol = {
|
||||
'name': 'cindervol',
|
||||
'size': 1
|
||||
}
|
||||
|
||||
test_snap = {
|
||||
'name': 'cindersnap',
|
||||
'volume_name': test_vol['name']
|
||||
}
|
||||
|
||||
test_vol_snap = {
|
||||
'name': 'cindersnapvol',
|
||||
'size': test_vol['size']
|
||||
}
|
||||
|
||||
def __init__(self, method):
|
||||
super(TestZFSSAISCSIDriver, self).__init__(method)
|
||||
|
||||
@mock.patch.object(iscsi, 'factory_zfssa')
|
||||
def setUp(self, _factory_zfssa):
|
||||
super(TestZFSSAISCSIDriver, self).setUp()
|
||||
self._create_fake_config()
|
||||
_factory_zfssa.return_value = FakeZFSSA()
|
||||
self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration)
|
||||
self.drv.do_setup({})
|
||||
|
||||
def _create_fake_config(self):
|
||||
self.configuration = mock.Mock(spec=conf.Configuration)
|
||||
self.configuration.san_ip = '1.1.1.1'
|
||||
self.configuration.san_login = 'user'
|
||||
self.configuration.san_password = 'passwd'
|
||||
self.configuration.zfssa_pool = 'pool'
|
||||
self.configuration.zfssa_project = 'project'
|
||||
self.configuration.zfssa_lun_volblocksize = '8k'
|
||||
self.configuration.zfssa_lun_sparse = 'false'
|
||||
self.configuration.zfssa_lun_logbias = 'latency'
|
||||
self.configuration.zfssa_lun_compression = 'off'
|
||||
self.configuration.zfssa_initiator_group = 'test-init-grp1'
|
||||
self.configuration.zfssa_initiator = \
|
||||
'iqn.1993-08.org.debian:01:daa02db2a827'
|
||||
self.configuration.zfssa_initiator_user = ''
|
||||
self.configuration.zfssa_initiator_password = ''
|
||||
self.configuration.zfssa_target_group = 'test-target-grp1'
|
||||
self.configuration.zfssa_target_user = ''
|
||||
self.configuration.zfssa_target_password = ''
|
||||
self.configuration.zfssa_target_portal = '1.1.1.1:3260'
|
||||
self.configuration.zfssa_target_interfaces = 'e1000g0'
|
||||
|
||||
def test_create_delete_volume(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def test_create_delete_snapshot(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.create_snapshot(self.test_snap)
|
||||
self.drv.delete_snapshot(self.test_snap)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def test_create_volume_from_snapshot(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.create_snapshot(self.test_snap)
|
||||
self.drv.create_volume_from_snapshot(self.test_vol_snap,
|
||||
self.test_snap)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def test_create_export(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.create_export({}, self.test_vol)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def test_remove_export(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.remove_export({}, self.test_vol)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def test_get_volume_stats(self):
|
||||
self.drv.get_volume_stats(refresh=False)
|
||||
|
||||
def test_extend_volume(self):
|
||||
self.drv.create_volume(self.test_vol)
|
||||
self.drv.extend_volume(self.test_vol, 3)
|
||||
self.drv.delete_volume(self.test_vol)
|
||||
|
||||
def tearDown(self):
|
||||
super(TestZFSSAISCSIDriver, self).tearDown()
|
0
cinder/volume/drivers/zfssa/__init__.py
Normal file
0
cinder/volume/drivers/zfssa/__init__.py
Normal file
355
cinder/volume/drivers/zfssa/restclient.py
Normal file
355
cinder/volume/drivers/zfssa/restclient.py
Normal file
@ -0,0 +1,355 @@
|
||||
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance REST API Client Programmatic Interface
|
||||
"""
|
||||
|
||||
import httplib
|
||||
import json
|
||||
import StringIO
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Status(object):
|
||||
"""Result HTTP Status"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
#: Request return OK
|
||||
OK = httplib.OK
|
||||
|
||||
#: New resource created successfully
|
||||
CREATED = httplib.CREATED
|
||||
|
||||
#: Command accepted
|
||||
ACCEPTED = httplib.ACCEPTED
|
||||
|
||||
#: Command returned OK but no data will be returned
|
||||
NO_CONTENT = httplib.NO_CONTENT
|
||||
|
||||
#: Bad Request
|
||||
BAD_REQUEST = httplib.BAD_REQUEST
|
||||
|
||||
#: User is not authorized
|
||||
UNAUTHORIZED = httplib.UNAUTHORIZED
|
||||
|
||||
#: The request is not allowed
|
||||
FORBIDDEN = httplib.FORBIDDEN
|
||||
|
||||
#: The requested resource was not found
|
||||
NOT_FOUND = httplib.NOT_FOUND
|
||||
|
||||
#: The request is not allowed
|
||||
NOT_ALLOWED = httplib.METHOD_NOT_ALLOWED
|
||||
|
||||
#: Request timed out
|
||||
TIMEOUT = httplib.REQUEST_TIMEOUT
|
||||
|
||||
#: Invalid request
|
||||
CONFLICT = httplib.CONFLICT
|
||||
|
||||
#: Service Unavailable
|
||||
BUSY = httplib.SERVICE_UNAVAILABLE
|
||||
|
||||
|
||||
class RestResult(object):
|
||||
"""Result from a REST API operation"""
|
||||
def __init__(self, response=None, err=None):
|
||||
"""Initialize a RestResult containing the results from a REST call
|
||||
:param response: HTTP response
|
||||
"""
|
||||
self.response = response
|
||||
self.error = err
|
||||
self.data = ""
|
||||
self.status = 0
|
||||
if self.response:
|
||||
self.status = self.response.getcode()
|
||||
result = self.response.read()
|
||||
while result:
|
||||
self.data += result
|
||||
result = self.response.read()
|
||||
|
||||
if self.error:
|
||||
self.status = self.error.code
|
||||
self.data = httplib.responses[self.status]
|
||||
|
||||
LOG.debug('Response code: %s' % self.status)
|
||||
LOG.debug('Response data: %s' % self.data)
|
||||
|
||||
def get_header(self, name):
|
||||
"""Get an HTTP header with the given name from the results
|
||||
|
||||
:param name: HTTP header name
|
||||
:return: The header value or None if no value is found
|
||||
"""
|
||||
if self.response is None:
|
||||
return None
|
||||
info = self.response.info()
|
||||
return info.getheader(name)
|
||||
|
||||
|
||||
class RestClientError(Exception):
|
||||
"""Exception for ZFS REST API client errors"""
|
||||
def __init__(self, status, name="ERR_INTERNAL", message=None):
|
||||
|
||||
"""Create a REST Response exception
|
||||
|
||||
:param status: HTTP response status
|
||||
:param name: The name of the REST API error type
|
||||
:param message: Descriptive error message returned from REST call
|
||||
"""
|
||||
super(RestClientError, self).__init__(message)
|
||||
self.code = status
|
||||
self.name = name
|
||||
self.msg = message
|
||||
if status in httplib.responses:
|
||||
self.msg = httplib.responses[status]
|
||||
|
||||
def __str__(self):
|
||||
return "%d %s %s" % (self.code, self.name, self.msg)
|
||||
|
||||
|
||||
class RestClientURL(object):
|
||||
"""ZFSSA urllib2 client"""
|
||||
def __init__(self, url, **kwargs):
|
||||
"""Initialize a REST client.
|
||||
|
||||
:param url: The ZFSSA REST API URL
|
||||
:key session: HTTP Cookie value of x-auth-session obtained from a
|
||||
normal BUI login.
|
||||
:key timeout: Time in seconds to wait for command to complete.
|
||||
(Default is 60 seconds)
|
||||
"""
|
||||
self.url = url
|
||||
self.local = kwargs.get("local", False)
|
||||
self.base_path = kwargs.get("base_path", "/api")
|
||||
self.timeout = kwargs.get("timeout", 60)
|
||||
self.headers = None
|
||||
if kwargs.get('session'):
|
||||
self.headers['x-auth-session'] = kwargs.get('session')
|
||||
|
||||
self.headers = {"content-type": "application/json"}
|
||||
self.do_logout = False
|
||||
self.auth_str = None
|
||||
|
||||
def _path(self, path, base_path=None):
|
||||
"""build rest url path"""
|
||||
if path.startswith("http://") or path.startswith("https://"):
|
||||
return path
|
||||
if base_path is None:
|
||||
base_path = self.base_path
|
||||
if not path.startswith(base_path) and not (
|
||||
self.local and ("/api" + path).startswith(base_path)):
|
||||
path = "%s%s" % (base_path, path)
|
||||
if self.local and path.startswith("/api"):
|
||||
path = path[4:]
|
||||
return self.url + path
|
||||
|
||||
def _authorize(self):
|
||||
"""Performs authorization setting x-auth-session"""
|
||||
self.headers['authorization'] = 'Basic %s' % self.auth_str
|
||||
if 'x-auth-session' in self.headers:
|
||||
del self.headers['x-auth-session']
|
||||
|
||||
try:
|
||||
result = self.post("/access/v1")
|
||||
del self.headers['authorization']
|
||||
if result.status == httplib.CREATED:
|
||||
self.headers['x-auth-session'] = \
|
||||
result.get_header('x-auth-session')
|
||||
self.do_logout = True
|
||||
LOG.info(_('ZFSSA version: %s') %
|
||||
result.get_header('x-zfssa-version'))
|
||||
|
||||
elif result.status == httplib.NOT_FOUND:
|
||||
raise RestClientError(result.status, name="ERR_RESTError",
|
||||
message="REST Not Available: \
|
||||
Please Upgrade")
|
||||
|
||||
except RestClientError as err:
|
||||
del self.headers['authorization']
|
||||
raise err
|
||||
|
||||
def login(self, auth_str):
|
||||
"""Login to an appliance using a user name and password.
|
||||
|
||||
Start a session like what is done logging into the BUI. This is not a
|
||||
requirement to run REST commands, since the protocol is stateless.
|
||||
What is does is set up a cookie session so that some server side
|
||||
caching can be done. If login is used remember to call logout when
|
||||
finished.
|
||||
|
||||
:param auth_str: Authorization string (base64)
|
||||
"""
|
||||
self.auth_str = auth_str
|
||||
self._authorize()
|
||||
|
||||
def logout(self):
|
||||
"""Logout of an appliance"""
|
||||
result = None
|
||||
try:
|
||||
result = self.delete("/access/v1", base_path="/api")
|
||||
except RestClientError:
|
||||
pass
|
||||
|
||||
self.headers.clear()
|
||||
self.do_logout = False
|
||||
return result
|
||||
|
||||
def islogin(self):
|
||||
"""return if client is login"""
|
||||
return self.do_logout
|
||||
|
||||
@staticmethod
|
||||
def mkpath(*args, **kwargs):
|
||||
"""Make a path?query string for making a REST request
|
||||
|
||||
:cmd_params args: The path part
|
||||
:cmd_params kwargs: The query part
|
||||
"""
|
||||
buf = StringIO.StringIO()
|
||||
query = "?"
|
||||
for arg in args:
|
||||
buf.write("/")
|
||||
buf.write(arg)
|
||||
for k in kwargs:
|
||||
buf.write(query)
|
||||
if query == "?":
|
||||
query = "&"
|
||||
buf.write(k)
|
||||
buf.write("=")
|
||||
buf.write(kwargs[k])
|
||||
return buf.getvalue()
|
||||
|
||||
def request(self, path, request, body=None, **kwargs):
|
||||
"""Make an HTTP request and return the results
|
||||
|
||||
:param path: Path used with the initiazed URL to make a request
|
||||
:param request: HTTP request type (GET, POST, PUT, DELETE)
|
||||
:param body: HTTP body of request
|
||||
:key accept: Set HTTP 'Accept' header with this value
|
||||
:key base_path: Override the base_path for this request
|
||||
:key content: Set HTTP 'Content-Type' header with this value
|
||||
"""
|
||||
out_hdrs = dict.copy(self.headers)
|
||||
if kwargs.get("accept"):
|
||||
out_hdrs['accept'] = kwargs.get("accept")
|
||||
|
||||
if body:
|
||||
if isinstance(body, dict):
|
||||
body = str(json.dumps(body))
|
||||
|
||||
if body and len(body):
|
||||
out_hdrs['content-length'] = len(body)
|
||||
|
||||
zfssaurl = self._path(path, kwargs.get("base_path"))
|
||||
req = urllib2.Request(zfssaurl, body, out_hdrs)
|
||||
req.get_method = lambda: request
|
||||
maxreqretries = kwargs.get("maxreqretries", 10)
|
||||
retry = 0
|
||||
response = None
|
||||
|
||||
LOG.debug('Request: %s %s' % (request, zfssaurl))
|
||||
LOG.debug('Out headers: %s' % out_hdrs)
|
||||
if body and body != '':
|
||||
LOG.debug('Body: %s' % body)
|
||||
|
||||
while retry < maxreqretries:
|
||||
try:
|
||||
response = urllib2.urlopen(req, timeout=self.timeout)
|
||||
except urllib2.HTTPError as err:
|
||||
LOG.error(_('REST Not Available: %s') % err.code)
|
||||
if err.code == httplib.SERVICE_UNAVAILABLE and \
|
||||
retry < maxreqretries:
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
LOG.error(_('Server Busy retry request: %s') % retry)
|
||||
continue
|
||||
if (err.code == httplib.UNAUTHORIZED or
|
||||
err.code == httplib.INTERNAL_SERVER_ERROR) and \
|
||||
'/access/v1' not in zfssaurl:
|
||||
try:
|
||||
LOG.error(_('Authorizing request: '
|
||||
'%(zfssaurl)s'
|
||||
'retry: %(retry)d .')
|
||||
% {'zfssaurl': zfssaurl,
|
||||
'retry': retry})
|
||||
self._authorize()
|
||||
req.add_header('x-auth-session',
|
||||
self.headers['x-auth-session'])
|
||||
except RestClientError:
|
||||
pass
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
return RestResult(err=err)
|
||||
|
||||
except urllib2.URLError as err:
|
||||
LOG.error(_('URLError: %s') % err.reason)
|
||||
raise RestClientError(-1, name="ERR_URLError",
|
||||
message=err.reason)
|
||||
|
||||
break
|
||||
|
||||
if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
|
||||
retry >= maxreqretries:
|
||||
raise RestClientError(response.getcode(), name="ERR_HTTPError",
|
||||
message="REST Not Available: Disabled")
|
||||
|
||||
return RestResult(response=response)
|
||||
|
||||
def get(self, path, **kwargs):
|
||||
"""Make an HTTP GET request
|
||||
|
||||
:param path: Path to resource.
|
||||
"""
|
||||
return self.request(path, "GET", **kwargs)
|
||||
|
||||
def post(self, path, body="", **kwargs):
|
||||
"""Make an HTTP POST request
|
||||
|
||||
:param path: Path to resource.
|
||||
:param body: Post data content
|
||||
"""
|
||||
return self.request(path, "POST", body, **kwargs)
|
||||
|
||||
def put(self, path, body="", **kwargs):
|
||||
"""Make an HTTP PUT request
|
||||
|
||||
:param path: Path to resource.
|
||||
:param body: Put data content
|
||||
"""
|
||||
return self.request(path, "PUT", body, **kwargs)
|
||||
|
||||
def delete(self, path, **kwargs):
|
||||
"""Make an HTTP DELETE request
|
||||
|
||||
:param path: Path to resource that will be deleted.
|
||||
"""
|
||||
return self.request(path, "DELETE", **kwargs)
|
||||
|
||||
def head(self, path, **kwargs):
|
||||
"""Make an HTTP HEAD request
|
||||
|
||||
:param path: Path to resource.
|
||||
"""
|
||||
return self.request(path, "HEAD", **kwargs)
|
385
cinder/volume/drivers/zfssa/zfssaiscsi.py
Normal file
385
cinder/volume/drivers/zfssa/zfssaiscsi.py
Normal file
@ -0,0 +1,385 @@
|
||||
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance Cinder Volume Driver
|
||||
"""
|
||||
import base64
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import log
|
||||
from cinder.openstack.common import units
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.san import san
|
||||
from cinder.volume.drivers.zfssa import zfssarest
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ZFSSA_OPTS = [
|
||||
cfg.StrOpt('zfssa_pool',
|
||||
help='Storage pool name.'),
|
||||
cfg.StrOpt('zfssa_project',
|
||||
help='Project name.'),
|
||||
cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
|
||||
help='Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.'),
|
||||
cfg.BoolOpt('zfssa_lun_sparse', default=False,
|
||||
help='Flag to enable sparse (thin-provisioned): True, False.'),
|
||||
cfg.StrOpt('zfssa_lun_compression', default='',
|
||||
help='Data compression-off, lzjb, gzip-2, gzip, gzip-9.'),
|
||||
cfg.StrOpt('zfssa_lun_logbias', default='',
|
||||
help='Synchronous write bias-latency, throughput.'),
|
||||
cfg.StrOpt('zfssa_initiator_group', default='',
|
||||
help='iSCSI initiator group.'),
|
||||
cfg.StrOpt('zfssa_initiator', default='',
|
||||
help='iSCSI initiator IQNs. (comma separated)'),
|
||||
cfg.StrOpt('zfssa_initiator_user', default='',
|
||||
help='iSCSI initiator CHAP user.'),
|
||||
cfg.StrOpt('zfssa_initiator_password', default='',
|
||||
help='iSCSI initiator CHAP password.'),
|
||||
cfg.StrOpt('zfssa_target_group', default='tgt-grp',
|
||||
help='iSCSI target group name.'),
|
||||
cfg.StrOpt('zfssa_target_user', default='',
|
||||
help='iSCSI target CHAP user.'),
|
||||
cfg.StrOpt('zfssa_target_password', default='',
|
||||
help='iSCSI target CHAP password.'),
|
||||
cfg.StrOpt('zfssa_target_portal',
|
||||
help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
|
||||
cfg.StrOpt('zfssa_target_interfaces',
|
||||
help='Network interfaces of iSCSI targets. (comma separated)')
|
||||
]
|
||||
|
||||
CONF.register_opts(ZFSSA_OPTS)
|
||||
|
||||
|
||||
def factory_zfssa():
|
||||
return zfssarest.ZFSSAApi()
|
||||
|
||||
|
||||
class ZFSSAISCSIDriver(driver.ISCSIDriver):
|
||||
"""ZFSSA Cinder volume driver"""
|
||||
|
||||
VERSION = '1.0.0'
|
||||
protocol = 'iSCSI'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(ZFSSA_OPTS)
|
||||
self.configuration.append_config_values(san.san_opts)
|
||||
self.zfssa = None
|
||||
self._stats = None
|
||||
|
||||
def _get_target_alias(self):
|
||||
"""return target alias"""
|
||||
return self.configuration.zfssa_target_group
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Setup - create multiple elements.
|
||||
|
||||
Project, initiators, initiatorgroup, target and targetgroup.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
msg = (_('Connecting to host: %s.') % lcfg.san_ip)
|
||||
LOG.info(msg)
|
||||
self.zfssa = factory_zfssa()
|
||||
self.zfssa.set_host(lcfg.san_ip)
|
||||
auth_str = base64.encodestring('%s:%s' %
|
||||
(lcfg.san_login,
|
||||
lcfg.san_password))[:-1]
|
||||
self.zfssa.login(auth_str)
|
||||
self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
|
||||
compression=lcfg.zfssa_lun_compression,
|
||||
logbias=lcfg.zfssa_lun_logbias)
|
||||
|
||||
if (lcfg.zfssa_initiator != '' and
|
||||
(lcfg.zfssa_initiator_group == '' or
|
||||
lcfg.zfssa_initiator_group == 'default')):
|
||||
msg = (_('zfssa_initiator: %(ini)s'
|
||||
' wont be used on '
|
||||
'zfssa_initiator_group= %(inigrp)s.')
|
||||
% {'ini': lcfg.zfssa_initiator,
|
||||
'inigrp': lcfg.zfssa_initiator_group})
|
||||
|
||||
LOG.warning(msg)
|
||||
# Setup initiator and initiator group
|
||||
if (lcfg.zfssa_initiator != '' and
|
||||
lcfg.zfssa_initiator_group != '' and
|
||||
lcfg.zfssa_initiator_group != 'default'):
|
||||
for initiator in lcfg.zfssa_initiator.split(','):
|
||||
self.zfssa.create_initiator(initiator,
|
||||
lcfg.zfssa_initiator_group + '-' +
|
||||
initiator,
|
||||
chapuser=
|
||||
lcfg.zfssa_initiator_user,
|
||||
chapsecret=
|
||||
lcfg.zfssa_initiator_password)
|
||||
self.zfssa.add_to_initiatorgroup(initiator,
|
||||
lcfg.zfssa_initiator_group)
|
||||
# Parse interfaces
|
||||
interfaces = []
|
||||
for interface in lcfg.zfssa_target_interfaces.split(','):
|
||||
if interface == '':
|
||||
continue
|
||||
interfaces.append(interface)
|
||||
|
||||
# Setup target and target group
|
||||
iqn = self.zfssa.create_target(
|
||||
self._get_target_alias(),
|
||||
interfaces,
|
||||
tchapuser=lcfg.zfssa_target_user,
|
||||
tchapsecret=lcfg.zfssa_target_password)
|
||||
|
||||
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that driver can login.
|
||||
|
||||
Check also pool, project, initiators, initiatorgroup, target and
|
||||
targetgroup.
|
||||
"""
|
||||
lcfg = self.configuration
|
||||
|
||||
self.zfssa.verify_pool(lcfg.zfssa_pool)
|
||||
self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
|
||||
|
||||
if (lcfg.zfssa_initiator != '' and
|
||||
lcfg.zfssa_initiator_group != '' and
|
||||
lcfg.zfssa_initiator_group != 'default'):
|
||||
for initiator in lcfg.zfssa_initiator.split(','):
|
||||
self.zfssa.verify_initiator(initiator)
|
||||
|
||||
self.zfssa.verify_target(self._get_target_alias())
|
||||
|
||||
def _get_provider_info(self, volume):
|
||||
"""return provider information"""
|
||||
lcfg = self.configuration
|
||||
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project, volume['name'])
|
||||
iqn = self.zfssa.get_target(self._get_target_alias())
|
||||
loc = "%s %s %s" % (lcfg.zfssa_target_portal, iqn, lun['number'])
|
||||
LOG.debug('_get_provider_info: provider_location: %s' % loc)
|
||||
provider = {'provider_location': loc}
|
||||
if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
|
||||
provider['provider_auth'] = ('CHAP %s %s' %
|
||||
lcfg.zfssa_target_user,
|
||||
lcfg.zfssa_target_password)
|
||||
|
||||
return provider
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume on ZFSSA"""
|
||||
LOG.debug('zfssa.create_volume: volume=' + volume['name'])
|
||||
lcfg = self.configuration
|
||||
volsize = str(volume['size']) + 'g'
|
||||
self.zfssa.create_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
volsize,
|
||||
targetgroup=lcfg.zfssa_target_group,
|
||||
volblocksize=lcfg.zfssa_lun_volblocksize,
|
||||
sparse=lcfg.zfssa_lun_sparse,
|
||||
compression=lcfg.zfssa_lun_compression,
|
||||
logbias=lcfg.zfssa_lun_logbias)
|
||||
|
||||
return self._get_provider_info(volume)
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes a volume with the given volume['name']."""
|
||||
LOG.debug('zfssa.delete_volume: name=' + volume['name'])
|
||||
lcfg = self.configuration
|
||||
lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'])
|
||||
# Delete clone temp snapshot. see create_cloned_volume()
|
||||
if 'origin' in lun2del and 'id' in volume:
|
||||
if lun2del['nodestroy']:
|
||||
self.zfssa.set_lun_props(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
nodestroy=False)
|
||||
|
||||
tmpsnap = 'tmp-snapshot-%s' % volume['id']
|
||||
if lun2del['origin']['snapshot'] == tmpsnap:
|
||||
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
lun2del['origin']['share'],
|
||||
lun2del['origin']['snapshot'])
|
||||
return
|
||||
|
||||
self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
|
||||
project=lcfg.zfssa_project,
|
||||
lun=volume['name'])
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
"""Creates a snapshot with the given snapshot['name'] of the
|
||||
snapshot['volume_name']
|
||||
"""
|
||||
LOG.debug('zfssa.create_snapshot: snapshot=' + snapshot['name'])
|
||||
lcfg = self.configuration
|
||||
self.zfssa.create_snapshot(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
snapshot['volume_name'],
|
||||
snapshot['name'])
|
||||
|
||||
def delete_snapshot(self, snapshot):
|
||||
"""Deletes a snapshot."""
|
||||
LOG.debug('zfssa.delete_snapshot: snapshot=' + snapshot['name'])
|
||||
lcfg = self.configuration
|
||||
has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
snapshot['volume_name'],
|
||||
snapshot['name'])
|
||||
if has_clones:
|
||||
LOG.error(_('Snapshot %s: has clones') % snapshot['name'])
|
||||
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
|
||||
|
||||
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
snapshot['volume_name'],
|
||||
snapshot['name'])
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Creates a volume from a snapshot - clone a snapshot"""
|
||||
LOG.debug('zfssa.create_volume_from_snapshot: volume=' +
|
||||
volume['name'])
|
||||
LOG.debug('zfssa.create_volume_from_snapshot: snapshot=' +
|
||||
snapshot['name'])
|
||||
if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
|
||||
exception_msg = (_('Error verifying clone size on '
|
||||
'Volume clone: %(clone)s '
|
||||
'Size: %(size)d on'
|
||||
'Snapshot: %(snapshot)s')
|
||||
% {'clone': volume['name'],
|
||||
'size': volume['size'],
|
||||
'snapshot': snapshot['name']})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.InvalidInput(reason=exception_msg)
|
||||
|
||||
lcfg = self.configuration
|
||||
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
snapshot['volume_name'],
|
||||
snapshot['name'],
|
||||
volume['name'])
|
||||
|
||||
def _update_volume_status(self):
|
||||
"""Retrieve status info from volume group."""
|
||||
LOG.debug("Updating volume status")
|
||||
self._stats = None
|
||||
data = {}
|
||||
data["volume_backend_name"] = self.__class__.__name__
|
||||
data["vendor_name"] = 'Oracle'
|
||||
data["driver_version"] = self.VERSION
|
||||
data["storage_protocol"] = self.protocol
|
||||
|
||||
lcfg = self.configuration
|
||||
(avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
|
||||
if avail is None or total is None:
|
||||
return
|
||||
|
||||
data['total_capacity_gb'] = int(total) / units.Gi
|
||||
data['free_capacity_gb'] = int(avail) / units.Gi
|
||||
data['reserved_percentage'] = 0
|
||||
data['QoS_support'] = False
|
||||
self._stats = data
|
||||
|
||||
def get_volume_stats(self, refresh=False):
|
||||
"""Get volume status.
|
||||
If 'refresh' is True, run update the stats first.
|
||||
"""
|
||||
if refresh:
|
||||
self._update_volume_status()
|
||||
return self._stats
|
||||
|
||||
def _export_volume(self, volume):
|
||||
"""Export the volume - set the initiatorgroup property."""
|
||||
LOG.debug('_export_volume: volume name: %s' % volume['name'])
|
||||
lcfg = self.configuration
|
||||
|
||||
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
lcfg.zfssa_initiator_group)
|
||||
return self._get_provider_info(volume)
|
||||
|
||||
def create_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for a new volume."""
|
||||
LOG.debug('create_export: volume name: %s' % volume['name'])
|
||||
return self._export_volume(volume)
|
||||
|
||||
def remove_export(self, context, volume):
|
||||
"""Driver entry point to remove an export for a volume."""
|
||||
LOG.debug('remove_export: volume name: %s' % volume['name'])
|
||||
lcfg = self.configuration
|
||||
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
'')
|
||||
|
||||
def ensure_export(self, context, volume):
|
||||
"""Driver entry point to get the export info for an existing volume."""
|
||||
LOG.debug('ensure_export: volume name: %s' % volume['name'])
|
||||
return self._export_volume(volume)
|
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id):
|
||||
self.ensure_export(context, volume)
|
||||
super(ZFSSAISCSIDriver, self).copy_image_to_volume(
|
||||
context, volume, image_service, image_id)
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Driver entry point to extent volume size."""
|
||||
LOG.debug('extend_volume: volume name: %s' % volume['name'])
|
||||
lcfg = self.configuration
|
||||
self.zfssa.set_lun_props(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
volume['name'],
|
||||
volsize=new_size * units.Gi)
|
||||
|
||||
def create_cloned_volume(self, volume, src_vref):
|
||||
"""Create a clone of the specified volume."""
|
||||
zfssa_snapshot = {'volume_name': src_vref['name'],
|
||||
'name': 'tmp-snapshot-%s' % volume['id']}
|
||||
self.create_snapshot(zfssa_snapshot)
|
||||
try:
|
||||
self.create_volume_from_snapshot(volume, zfssa_snapshot)
|
||||
except exception.VolumeBackendAPIException:
|
||||
LOG.error(_('Clone Volume:'
|
||||
'%(volume)s failed from source volume:'
|
||||
'%(src_vref)s')
|
||||
% {'volume': volume['name'],
|
||||
'src_vref': src_vref['name']})
|
||||
# Cleanup snapshot
|
||||
self.delete_snapshot(zfssa_snapshot)
|
||||
|
||||
def local_path(self, volume):
|
||||
"""Not implemented"""
|
||||
pass
|
||||
|
||||
def backup_volume(self, context, backup, backup_service):
|
||||
"""Not implemented"""
|
||||
pass
|
||||
|
||||
def restore_backup(self, context, backup, volume, backup_service):
|
||||
"""Not implemented"""
|
||||
pass
|
||||
|
||||
def _verify_clone_size(self, snapshot, size):
|
||||
"""Check whether the clone size is the same as the parent volume"""
|
||||
lcfg = self.configuration
|
||||
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
|
||||
lcfg.zfssa_project,
|
||||
snapshot['volume_name'])
|
||||
return lun['size'] == size
|
613
cinder/volume/drivers/zfssa/zfssarest.py
Normal file
613
cinder/volume/drivers/zfssa/zfssarest.py
Normal file
@ -0,0 +1,613 @@
|
||||
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
ZFS Storage Appliance Proxy
|
||||
"""
|
||||
import json
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.openstack.common import log
|
||||
from cinder.volume.drivers.zfssa import restclient
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ZFSSAApi(object):
|
||||
"""ZFSSA API proxy class"""
|
||||
|
||||
def __init__(self):
|
||||
self.host = None
|
||||
self.url = None
|
||||
self.rclient = None
|
||||
|
||||
def __del__(self):
|
||||
if self.rclient and self.rclient.islogin():
|
||||
self.rclient.logout()
|
||||
|
||||
def _is_pool_owned(self, pdata):
|
||||
"""returns True if the pool's owner is the
|
||||
same as the host.
|
||||
"""
|
||||
svc = '/api/system/v1/version'
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error getting version: '
|
||||
'svc: %(svc)s.'
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'svc': svc,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
vdata = json.loads(ret.data)
|
||||
return vdata['version']['asn'] == pdata['pool']['asn'] and \
|
||||
vdata['version']['nodename'] == pdata['pool']['owner']
|
||||
|
||||
def set_host(self, host):
|
||||
self.host = host
|
||||
self.url = "https://" + self.host + ":215"
|
||||
self.rclient = restclient.RestClientURL(self.url)
|
||||
|
||||
def login(self, auth_str):
|
||||
"""Login to the appliance"""
|
||||
if self.rclient and not self.rclient.islogin():
|
||||
self.rclient.login(auth_str)
|
||||
|
||||
def get_pool_stats(self, pool):
|
||||
"""Get space available and total properties of a pool
|
||||
returns (avail, total).
|
||||
"""
|
||||
svc = '/api/storage/v1/pools/' + pool
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Getting Pool Stats: '
|
||||
'Pool: %(pool)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'pool': pool,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.InvalidVolume(reason=exception_msg)
|
||||
|
||||
val = json.loads(ret.data)
|
||||
|
||||
if not self._is_pool_owned(val):
|
||||
exception_msg = (_('Error Pool ownership: '
|
||||
'Pool %(pool)s is not owned '
|
||||
'by %(host)s.')
|
||||
% {'pool': pool,
|
||||
'host': self.host})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.InvalidInput(reason=pool)
|
||||
|
||||
avail = val['pool']['usage']['available']
|
||||
total = val['pool']['usage']['total']
|
||||
|
||||
return avail, total
|
||||
|
||||
def create_project(self, pool, project, compression=None, logbias=None):
|
||||
"""Create a project on a pool
|
||||
Check first whether the pool exists.
|
||||
"""
|
||||
self.verify_pool(pool)
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects'
|
||||
arg = {
|
||||
'name': project
|
||||
}
|
||||
if compression and compression != '':
|
||||
arg.update({'compression': compression})
|
||||
if logbias and logbias != '':
|
||||
arg.update({'logbias': logbias})
|
||||
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating Project: '
|
||||
'%(project)s on '
|
||||
'Pool: %(pool)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'project': project,
|
||||
'pool': pool,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def create_initiator(self, initiator, alias, chapuser=None,
|
||||
chapsecret=None):
|
||||
"""Create an iSCSI initiator."""
|
||||
|
||||
svc = '/api/san/v1/iscsi/initiators/alias=' + alias
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
svc = '/api/san/v1/iscsi/initiators'
|
||||
arg = {
|
||||
'initiator': initiator,
|
||||
'alias': alias
|
||||
}
|
||||
if chapuser and chapuser != '' and chapsecret and chapsecret != '':
|
||||
arg.update({'chapuser': chapuser,
|
||||
'chapsecret': chapsecret})
|
||||
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating Initator: '
|
||||
'%(initiator)s on '
|
||||
'Alias: %(alias)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'initiator': initiator,
|
||||
'alias': alias,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def add_to_initiatorgroup(self, initiator, initiatorgroup):
|
||||
"""Add an iSCSI initiator to initiatorgroup"""
|
||||
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
svc = '/api/san/v1/iscsi/initiator-groups'
|
||||
arg = {
|
||||
'name': initiatorgroup,
|
||||
'initiators': [initiator]
|
||||
}
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Adding Initator: '
|
||||
'%(initiator)s on group'
|
||||
'InitiatorGroup: %(initiatorgroup)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'initiator': initiator,
|
||||
'initiatorgroup': initiatorgroup,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
else:
|
||||
svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
|
||||
arg = {
|
||||
'initiators': [initiator]
|
||||
}
|
||||
ret = self.rclient.put(svc, arg)
|
||||
if ret.status != restclient.Status.ACCEPTED:
|
||||
exception_msg = (_('Error Adding Initator: '
|
||||
'%(initiator)s on group'
|
||||
'InitiatorGroup: %(initiatorgroup)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'initiator': initiator,
|
||||
'initiatorgroup': initiatorgroup,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def create_target(self, alias, interfaces=None, tchapuser=None,
|
||||
tchapsecret=None):
|
||||
"""Create an iSCSI target.
|
||||
interfaces: an array with network interfaces
|
||||
tchapuser, tchapsecret: target's chapuser and chapsecret
|
||||
returns target iqn
|
||||
"""
|
||||
svc = '/api/san/v1/iscsi/targets/alias=' + alias
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
svc = '/api/san/v1/iscsi/targets'
|
||||
arg = {
|
||||
'alias': alias
|
||||
}
|
||||
|
||||
if tchapuser and tchapuser != '' and tchapsecret and \
|
||||
tchapsecret != '':
|
||||
arg.update({'targetchapuser': tchapuser,
|
||||
'targetchapsecret': tchapsecret,
|
||||
'auth': 'chap'})
|
||||
|
||||
if interfaces is not None and len(interfaces) > 0:
|
||||
arg.update({'interfaces': interfaces})
|
||||
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating Target: '
|
||||
'%(alias)s'
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'alias': alias,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
val = json.loads(ret.data)
|
||||
return val['target']['iqn']
|
||||
|
||||
def get_target(self, alias):
|
||||
"""Get an iSCSI target iqn."""
|
||||
svc = '/api/san/v1/iscsi/targets/alias=' + alias
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Getting Target: '
|
||||
'%(alias)s'
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'alias': alias,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
val = json.loads(ret.data)
|
||||
return val['target']['iqn']
|
||||
|
||||
def add_to_targetgroup(self, iqn, targetgroup):
|
||||
"""Add an iSCSI target to targetgroup."""
|
||||
svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
svccrt = '/api/san/v1/iscsi/target-groups'
|
||||
arg = {
|
||||
'name': targetgroup,
|
||||
'targets': [iqn]
|
||||
}
|
||||
|
||||
ret = self.rclient.post(svccrt, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating TargetGroup: '
|
||||
'%(targetgroup)s with'
|
||||
'IQN: %(iqn)s'
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s .')
|
||||
% {'targetgroup': targetgroup,
|
||||
'iqn': iqn,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
return
|
||||
|
||||
arg = {
|
||||
'targets': [iqn]
|
||||
}
|
||||
|
||||
ret = self.rclient.put(svc, arg)
|
||||
if ret.status != restclient.Status.ACCEPTED:
|
||||
exception_msg = (_('Error Adding to TargetGroup: '
|
||||
'%(targetgroup)s with'
|
||||
'IQN: %(iqn)s'
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'targetgroup': targetgroup,
|
||||
'iqn': iqn,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def verify_pool(self, pool):
|
||||
"""Checks whether pool exists."""
|
||||
svc = '/api/storage/v1/pools/' + pool
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Verifying Pool: '
|
||||
'%(pool)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'pool': pool,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def verify_project(self, pool, project):
|
||||
"""Checks whether project exists."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Verifying '
|
||||
'Project: %(project)s on '
|
||||
'Pool: %(pool)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'project': project,
|
||||
'pool': pool,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def verify_initiator(self, iqn):
|
||||
"""Check whether initiator iqn exists."""
|
||||
svc = '/api/san/v1/iscsi/initiators/' + iqn
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Verifying '
|
||||
'Initiator: %(iqn)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'initiator': iqn,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def verify_target(self, alias):
|
||||
"""Check whether target alias exists."""
|
||||
svc = '/api/san/v1/iscsi/targets/alias=' + alias
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Verifying '
|
||||
'Target: %(alias)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'alias': alias,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def create_lun(self, pool, project, lun, volsize, targetgroup,
|
||||
volblocksize='8k', sparse=False, compression=None,
|
||||
logbias=None):
|
||||
"""Create a LUN.
|
||||
required - pool, project, lun, volsize, targetgroup.
|
||||
optional - volblocksize, sparse, compression, logbias
|
||||
"""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns'
|
||||
arg = {
|
||||
'name': lun,
|
||||
'volsize': volsize,
|
||||
'targetgroup': targetgroup,
|
||||
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll',
|
||||
'volblocksize': volblocksize,
|
||||
'sparse': sparse
|
||||
}
|
||||
if compression and compression != '':
|
||||
arg.update({'compression': compression})
|
||||
if logbias and logbias != '':
|
||||
arg.update({'logbias': logbias})
|
||||
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating '
|
||||
'Volume: %(lun)s '
|
||||
'Size: %(size)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'lun': lun,
|
||||
'size': volsize,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def get_lun(self, pool, project, lun):
|
||||
"""return iscsi lun properties."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + "/luns/" + lun
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Getting '
|
||||
'Volume: %(lun)s on '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
val = json.loads(ret.data)
|
||||
ret = {
|
||||
'guid': val['lun']['lunguid'],
|
||||
'number': val['lun']['assignednumber'],
|
||||
'initiatorgroup': val['lun']['initiatorgroup'],
|
||||
'size': val['lun']['volsize'],
|
||||
'nodestroy': val['lun']['nodestroy']
|
||||
}
|
||||
if 'origin' in val['lun']:
|
||||
ret.update({'origin': val['lun']['origin']})
|
||||
|
||||
return ret
|
||||
|
||||
def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
|
||||
"""Set the initiatorgroup property of a LUN."""
|
||||
if initiatorgroup == '':
|
||||
initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
|
||||
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun
|
||||
arg = {
|
||||
'initiatorgroup': initiatorgroup
|
||||
}
|
||||
|
||||
ret = self.rclient.put(svc, arg)
|
||||
if ret.status != restclient.Status.ACCEPTED:
|
||||
exception_msg = (_('Error Setting '
|
||||
'Volume: %(lun)s to '
|
||||
'InitiatorGroup: %(initiatorgroup)s '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'lun': lun,
|
||||
'initiatorgroup': initiatorgroup,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
|
||||
def delete_lun(self, pool, project, lun):
|
||||
"""delete iscsi lun."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun
|
||||
|
||||
ret = self.rclient.delete(svc)
|
||||
if ret.status != restclient.Status.NO_CONTENT:
|
||||
exception_msg = (_('Error Deleting '
|
||||
'Volume: %(lun)s to '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
|
||||
def create_snapshot(self, pool, project, lun, snapshot):
|
||||
"""create snapshot."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun + '/snapshots'
|
||||
arg = {
|
||||
'name': snapshot
|
||||
}
|
||||
|
||||
ret = self.rclient.post(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Creating '
|
||||
'Snapshot: %(snapshot)s on'
|
||||
'Volume: %(lun)s to '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'snapshot': snapshot,
|
||||
'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def delete_snapshot(self, pool, project, lun, snapshot):
|
||||
"""delete snapshot."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun + '/snapshots/' + snapshot
|
||||
|
||||
ret = self.rclient.delete(svc)
|
||||
if ret.status != restclient.Status.NO_CONTENT:
|
||||
exception_msg = (_('Error Deleting '
|
||||
'Snapshot: %(snapshot)s on '
|
||||
'Volume: %(lun)s to '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'snapshot': snapshot,
|
||||
'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def clone_snapshot(self, pool, project, lun, snapshot, clone):
|
||||
"""clone snapshot."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
|
||||
arg = {
|
||||
'project': project,
|
||||
'share': clone,
|
||||
'nodestroy': True
|
||||
}
|
||||
|
||||
ret = self.rclient.put(svc, arg)
|
||||
if ret.status != restclient.Status.CREATED:
|
||||
exception_msg = (_('Error Cloning '
|
||||
'Snapshot: %(snapshot)s on '
|
||||
'Volume: %(lun)s of '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'snapshot': snapshot,
|
||||
'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def set_lun_props(self, pool, project, lun, **kargs):
|
||||
"""set lun properties."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun
|
||||
if kargs is None:
|
||||
return
|
||||
|
||||
ret = self.rclient.put(svc, kargs)
|
||||
if ret.status != restclient.Status.ACCEPTED:
|
||||
exception_msg = (_('Error Setting props '
|
||||
'Props: %(props)s on '
|
||||
'Volume: %(lun)s of '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'props': kargs,
|
||||
'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
def has_clones(self, pool, project, lun, snapshot):
|
||||
"""Checks whether snapshot has clones or not."""
|
||||
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
|
||||
project + '/luns/' + lun + '/snapshots/' + snapshot
|
||||
|
||||
ret = self.rclient.get(svc)
|
||||
if ret.status != restclient.Status.OK:
|
||||
exception_msg = (_('Error Getting '
|
||||
'Snapshot: %(snapshot)s on '
|
||||
'Volume: %(lun)s to '
|
||||
'Pool: %(pool)s '
|
||||
'Project: %(project)s '
|
||||
'Return code: %(ret.status)d '
|
||||
'Message: %(ret.data)s.')
|
||||
% {'snapshot': snapshot,
|
||||
'lun': lun,
|
||||
'pool': pool,
|
||||
'project': project,
|
||||
'ret.status': ret.status,
|
||||
'ret.data': ret.data})
|
||||
LOG.error(exception_msg)
|
||||
raise exception.VolumeBackendAPIException(data=exception_msg)
|
||||
|
||||
val = json.loads(ret.data)
|
||||
return val['snapshot']['numclones'] != 0
|
@ -1998,6 +1998,61 @@
|
||||
#zadara_vpsa_allow_nonexistent_delete=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi
|
||||
#
|
||||
|
||||
# Storage pool name. (string value)
|
||||
#zfssa_pool=<None>
|
||||
|
||||
# Project name. (string value)
|
||||
#zfssa_project=<None>
|
||||
|
||||
# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.
|
||||
# (string value)
|
||||
#zfssa_lun_volblocksize=8k
|
||||
|
||||
# Flag to enable sparse (thin-provisioned): True, False.
|
||||
# (boolean value)
|
||||
#zfssa_lun_sparse=false
|
||||
|
||||
# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string
|
||||
# value)
|
||||
#zfssa_lun_compression=
|
||||
|
||||
# Synchronous write bias-latency, throughput. (string value)
|
||||
#zfssa_lun_logbias=
|
||||
|
||||
# iSCSI initiator group. (string value)
|
||||
#zfssa_initiator_group=
|
||||
|
||||
# iSCSI initiator IQNs. (comma separated) (string value)
|
||||
#zfssa_initiator=
|
||||
|
||||
# iSCSI initiator CHAP user. (string value)
|
||||
#zfssa_initiator_user=
|
||||
|
||||
# iSCSI initiator CHAP password. (string value)
|
||||
#zfssa_initiator_password=
|
||||
|
||||
# iSCSI target group name. (string value)
|
||||
#zfssa_target_group=tgt-grp
|
||||
|
||||
# iSCSI target CHAP user. (string value)
|
||||
#zfssa_target_user=
|
||||
|
||||
# iSCSI target CHAP password. (string value)
|
||||
#zfssa_target_password=
|
||||
|
||||
# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string
|
||||
# value)
|
||||
#zfssa_target_portal=<None>
|
||||
|
||||
# Network interfaces of iSCSI targets. (comma separated)
|
||||
# (string value)
|
||||
#zfssa_target_interfaces=<None>
|
||||
|
||||
|
||||
#
|
||||
# Options defined in cinder.volume.manager
|
||||
#
|
||||
|
Loading…
x
Reference in New Issue
Block a user