From 747efde4709d7aa00f5d13d98d0036639e612945 Mon Sep 17 00:00:00 2001
From: Michael Latchmansingh <mlatchmansingh@datacore.com>
Date: Mon, 21 Nov 2016 16:07:46 +0300
Subject: [PATCH] Adds DataCore Volume Drivers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The iSCSI and Fibre Channel volume drivers for DataCore’s
SANsymphony and Hyper-converged Virtual SAN storage
support the core/minimum feature set:
 - Volume Create/Delete
 - Volume Attach/Detach
 - Snapshot Create/Delete
 - Create Volume from Snapshot
 - Get Volume Stats
 - Copy Image to Volume
 - Copy Volume to Image
 - Clone Volume
 - Extend Volume

DocImpact
Implements: blueprint datacore-volume-driver

Change-Id: I3308bdedbfe3bb83d695d38667eaea6327fa4461
---
 cinder/opts.py                                |    6 +
 .../unit/volume/drivers/datacore/__init__.py  |    0
 .../drivers/datacore/test_datacore_api.py     |  728 +++++++++++
 .../drivers/datacore/test_datacore_driver.py  |  678 +++++++++++
 .../drivers/datacore/test_datacore_fc.py      |  256 ++++
 .../drivers/datacore/test_datacore_iscsi.py   |  515 ++++++++
 .../drivers/datacore/test_datacore_passwd.py  |  283 +++++
 .../drivers/datacore/test_datacore_utils.py   |   78 ++
 cinder/volume/drivers/datacore/__init__.py    |    0
 cinder/volume/drivers/datacore/api.py         | 1062 +++++++++++++++++
 cinder/volume/drivers/datacore/driver.py      |  742 ++++++++++++
 cinder/volume/drivers/datacore/exception.py   |   36 +
 cinder/volume/drivers/datacore/fc.py          |  183 +++
 cinder/volume/drivers/datacore/iscsi.py       |  440 +++++++
 cinder/volume/drivers/datacore/passwd.py      |  166 +++
 cinder/volume/drivers/datacore/utils.py       |   73 ++
 driver-requirements.txt                       |    2 +
 ...tacore-volume-driver-3775797b0515f538.yaml |    4 +
 18 files changed, 5252 insertions(+)
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/__init__.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py
 create mode 100644 cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py
 create mode 100644 cinder/volume/drivers/datacore/__init__.py
 create mode 100644 cinder/volume/drivers/datacore/api.py
 create mode 100644 cinder/volume/drivers/datacore/driver.py
 create mode 100644 cinder/volume/drivers/datacore/exception.py
 create mode 100644 cinder/volume/drivers/datacore/fc.py
 create mode 100644 cinder/volume/drivers/datacore/iscsi.py
 create mode 100644 cinder/volume/drivers/datacore/passwd.py
 create mode 100644 cinder/volume/drivers/datacore/utils.py
 create mode 100644 releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml

diff --git a/cinder/opts.py b/cinder/opts.py
index 20990687b09..51f349323e9 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -74,6 +74,10 @@ from cinder.volume.drivers.coprhd import common as \
     cinder_volume_drivers_coprhd_common
 from cinder.volume.drivers.coprhd import scaleio as \
     cinder_volume_drivers_coprhd_scaleio
+from cinder.volume.drivers.datacore import driver as \
+    cinder_volume_drivers_datacore_driver
+from cinder.volume.drivers.datacore import iscsi as \
+    cinder_volume_drivers_datacore_iscsi
 from cinder.volume.drivers.datera import datera_iscsi as \
     cinder_volume_drivers_datera_dateraiscsi
 from cinder.volume.drivers.dell_emc import ps as \
@@ -245,6 +249,8 @@ def list_opts():
                 [cinder_volume_api.az_cache_time_opt],
                 cinder_volume_driver.volume_opts,
                 cinder_volume_driver.iser_opts,
+                cinder_volume_drivers_datacore_driver.datacore_opts,
+                cinder_volume_drivers_datacore_iscsi.datacore_iscsi_opts,
                 cinder_volume_drivers_inspur_instorage_instoragecommon.
                 instorage_mcs_opts,
                 cinder_volume_drivers_inspur_instorage_instorageiscsi.
diff --git a/cinder/tests/unit/volume/drivers/datacore/__init__.py b/cinder/tests/unit/volume/drivers/datacore/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py
new file mode 100644
index 00000000000..52db51ec973
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py
@@ -0,0 +1,728 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for classes that are used to invoke DataCore SANsymphony API."""
+
+import mock
+from oslo_utils import units
+import six
+import suds
+from suds.sax import parser
+from suds import wsdl
+
+from cinder import test
+from cinder.volume.drivers.datacore import api
+from cinder.volume.drivers.datacore import exception
+
+
+class FakeWebSocketException(Exception):
+    pass
+
+
+class DataCoreClientTestCase(test.TestCase):
+    """Tests for the DataCore SANsymphony client."""
+
+    def setUp(self):
+        super(DataCoreClientTestCase, self).setUp()
+        self.mock_storage_services = mock.MagicMock()
+        self.mock_executive_service = mock.MagicMock()
+
+        self.mock_suds_client = mock.MagicMock()
+        self.mock_object(
+            api.suds_client, 'Client', return_value=self.mock_suds_client)
+
+        self.mock_channel = mock.MagicMock()
+        mock_websocket = self.mock_object(api, 'websocket')
+        mock_websocket.WebSocketException = FakeWebSocketException
+        mock_websocket.create_connection.return_value = self.mock_channel
+
+        setattr(self.mock_suds_client.service.__getitem__,
+                'side_effect',
+                self._get_service_side_effect)
+
+        self.client = api.DataCoreClient('hostname', 'username', 'password', 1)
+
+    def _get_service_side_effect(self, service_name):
+        self.assertIn(service_name,
+                      [
+                          api.DataCoreClient.STORAGE_SERVICES_BINDING,
+                          api.DataCoreClient.EXECUTIVE_SERVICE_BINDING
+                      ])
+
+        if service_name is api.DataCoreClient.STORAGE_SERVICES_BINDING:
+            return self.mock_storage_services
+        else:
+            return self.mock_executive_service
+
+    def _assert_storage_services_method_called(self, method_name):
+        return self.mock_storage_services.__getitem__.assert_called_with(
+            method_name)
+
+    @property
+    def mock_storage_service_context(self):
+        return self.mock_storage_services.__getitem__()()
+
+    @property
+    def mock_executive_service_context(self):
+        return self.mock_executive_service.__getitem__()()
+
+    def test_process_request_failed(self):
+        def fail_with_socket_error():
+            raise FakeWebSocketException()
+
+        def fail_with_web_fault(message):
+            fault = mock.Mock()
+            fault.faultstring = "General error."
+            document = mock.Mock()
+            raise suds.WebFault(fault, document)
+
+        self.mock_channel.recv.side_effect = fail_with_socket_error
+        self.assertRaises(exception.DataCoreConnectionException,
+                          self.client.get_server_groups)
+        self.mock_channel.recv.side_effect = None
+
+        (self.mock_storage_service_context.process_reply
+         .side_effect) = fail_with_web_fault
+        self.assertRaises(exception.DataCoreFaultException,
+                          self.client.get_server_groups)
+
+    def test_channel_closing_failed(self):
+        def fail_with_socket_error():
+            raise FakeWebSocketException()
+
+        def fail_with_web_fault(message):
+            fault = mock.Mock()
+            fault.faultstring = "General error."
+            document = mock.Mock()
+            raise suds.WebFault(fault, document)
+
+        self.mock_channel.close.side_effect = fail_with_socket_error
+        (self.mock_storage_service_context.process_reply
+         .side_effect) = fail_with_web_fault
+        self.assertRaises(exception.DataCoreFaultException,
+                          self.client.get_server_groups)
+
+    def test_update_api_endpoints(self):
+        def fail_with_socket_error():
+            try:
+                raise FakeWebSocketException()
+            finally:
+                self.mock_channel.recv.side_effect = None
+
+        self.mock_channel.recv.side_effect = fail_with_socket_error
+
+        mock_executive_endpoints = [{
+            'network_address': '127.0.0.1:3794',
+            'http_endpoint': 'http://127.0.0.1:3794/',
+            'ws_endpoint': 'ws://127.0.0.1:3794/',
+        }]
+        self.mock_object(self.client,
+                         '_executive_service_endpoints',
+                         mock_executive_endpoints)
+
+        mock_storage_endpoint = {
+            'network_address': '127.0.0.1:3794',
+            'http_endpoint': 'http://127.0.0.1:3794/',
+            'ws_endpoint': 'ws://127.0.0.1:3794/',
+        }
+        self.mock_object(self.client,
+                         '_storage_services_endpoint',
+                         mock_storage_endpoint)
+
+        node = mock.Mock()
+        node.HostAddress = '127.0.0.1:3794'
+        reply = mock.MagicMock()
+        reply.RegionNodeData = [node]
+        self.mock_storage_service_context.process_reply.return_value = reply
+
+        result = self.client.get_server_groups()
+        self.assertIsNotNone(result)
+
+    def test_update_api_endpoints_failed(self):
+        def fail_with_socket_error():
+            try:
+                raise FakeWebSocketException()
+            finally:
+                self.mock_channel.recv.side_effect = None
+
+        self.mock_channel.recv.side_effect = fail_with_socket_error
+
+        mock_executive_endpoints = [{
+            'network_address': '127.0.0.1:3794',
+            'http_endpoint': 'http://127.0.0.1:3794/',
+            'ws_endpoint': 'ws://127.0.0.1:3794/',
+        }]
+        self.mock_object(self.client,
+                         '_executive_service_endpoints',
+                         mock_executive_endpoints)
+
+        reply = mock.MagicMock()
+        reply.RegionNodeData = []
+        self.mock_storage_service_context.process_reply.return_value = reply
+
+        self.mock_executive_service_context.process_reply.return_value = None
+
+        result = self.client.get_server_groups()
+        self.assertIsNotNone(result)
+
+    def test_get_server_groups(self):
+        self.client.get_server_groups()
+        self._assert_storage_services_method_called('GetServerGroups')
+
+    def test_get_servers(self):
+        self.client.get_servers()
+        self._assert_storage_services_method_called('GetServers')
+
+    def test_get_disk_pools(self):
+        self.client.get_disk_pools()
+        self._assert_storage_services_method_called('GetDiskPools')
+
+    def test_get_logical_disks(self):
+        self.client.get_logical_disks()
+        self._assert_storage_services_method_called('GetLogicalDisks')
+
+    def test_create_pool_logical_disk(self):
+        pool_id = 'pool_id'
+        pool_volume_type = 'Striped'
+        size = 1 * units.Gi
+        min_quota = 1
+        max_quota = 1 * units.Gi
+        self.client.create_pool_logical_disk(
+            pool_id, pool_volume_type, size, min_quota, max_quota)
+        self._assert_storage_services_method_called('CreatePoolLogicalDisk')
+
+    def test_delete_logical_disk(self):
+        logical_disk_id = 'disk_id'
+        self.client.delete_logical_disk(logical_disk_id)
+        self._assert_storage_services_method_called('DeleteLogicalDisk')
+
+    def test_get_logical_disk_chunk_allocation_map(self):
+        logical_disk_id = 'disk_id'
+        self.client.get_logical_disk_chunk_allocation_map(logical_disk_id)
+        self._assert_storage_services_method_called(
+            'GetLogicalDiskChunkAllocationMap')
+
+    def test_get_next_virtual_disk_alias(self):
+        base_alias = 'volume'
+        self.client.get_next_virtual_disk_alias(base_alias)
+        self._assert_storage_services_method_called('GetNextVirtualDiskAlias')
+
+    def test_get_virtual_disks(self):
+        self.client.get_virtual_disks()
+        self._assert_storage_services_method_called('GetVirtualDisks')
+
+    def test_build_virtual_disk_data(self):
+        disk_alias = 'alias'
+        disk_type = 'Mirrored'
+        size = 1 * units.Gi
+        description = 'description'
+        storage_profile_id = 'storage_profile_id'
+
+        vd_data = self.client.build_virtual_disk_data(
+            disk_alias, disk_type, size, description, storage_profile_id)
+
+        self.assertEqual(disk_alias, vd_data.Alias)
+        self.assertEqual(size, vd_data.Size.Value)
+        self.assertEqual(description, vd_data.Description)
+        self.assertEqual(storage_profile_id, vd_data.StorageProfileId)
+        self.assertTrue(hasattr(vd_data, 'Type'))
+        self.assertTrue(hasattr(vd_data, 'SubType'))
+        self.assertTrue(hasattr(vd_data, 'DiskStatus'))
+        self.assertTrue(hasattr(vd_data, 'RecoveryPriority'))
+
+    def test_create_virtual_disk_ex2(self):
+        disk_alias = 'alias'
+        disk_type = 'Mirrored'
+        size = 1 * units.Gi
+        description = 'description'
+        storage_profile_id = 'storage_profile_id'
+        first_disk_id = 'disk_id'
+        second_disk_id = 'disk_id'
+        add_redundancy = True
+        vd_data = self.client.build_virtual_disk_data(
+            disk_alias, disk_type, size, description, storage_profile_id)
+        self.client.create_virtual_disk_ex2(
+            vd_data, first_disk_id, second_disk_id, add_redundancy)
+        self._assert_storage_services_method_called('CreateVirtualDiskEx2')
+
+    def test_set_virtual_disk_size(self):
+        disk_id = 'disk_id'
+        size = 1 * units.Gi
+        self.client.set_virtual_disk_size(disk_id, size)
+        self._assert_storage_services_method_called('SetVirtualDiskSize')
+
+    def test_delete_virtual_disk(self):
+        virtual_disk_id = 'disk_id'
+        delete_logical_disks = True
+        self.client.delete_virtual_disk(virtual_disk_id, delete_logical_disks)
+        self._assert_storage_services_method_called('DeleteVirtualDisk')
+
+    def test_serve_virtual_disks_to_host(self):
+        host_id = 'host_id'
+        disks = ['disk_id']
+        self.client.serve_virtual_disks_to_host(host_id, disks)
+        self._assert_storage_services_method_called('ServeVirtualDisksToHost')
+
+    def test_unserve_virtual_disks_from_host(self):
+        host_id = 'host_id'
+        disks = ['disk_id']
+        self.client.unserve_virtual_disks_from_host(host_id, disks)
+        self._assert_storage_services_method_called(
+            'UnserveVirtualDisksFromHost')
+
+    def test_unserve_virtual_disks_from_port(self):
+        port_id = 'port_id'
+        disks = ['disk_id']
+        self.client.unserve_virtual_disks_from_port(port_id, disks)
+        self._assert_storage_services_method_called(
+            'UnserveVirtualDisksFromPort')
+
+    def test_bind_logical_disk(self):
+        disk_id = 'disk_id'
+        logical_disk_id = 'disk_id'
+        role = 'Second'
+        create_mirror_mappings = True
+        create_client_mappings = False
+        add_redundancy = True
+        self.client.bind_logical_disk(
+            disk_id, logical_disk_id, role, create_mirror_mappings,
+            create_client_mappings, add_redundancy)
+        self._assert_storage_services_method_called(
+            'BindLogicalDisk')
+
+    def test_get_snapshots(self):
+        self.client.get_snapshots()
+        self._assert_storage_services_method_called('GetSnapshots')
+
+    def test_create_snapshot(self):
+        disk_id = 'disk_id'
+        name = 'name'
+        description = 'description'
+        pool_id = 'pool_id'
+        snapshot_type = 'Full'
+        duplicate_disk_id = False
+        storage_profile_id = 'profile_id'
+        self.client.create_snapshot(
+            disk_id, name, description, pool_id, snapshot_type,
+            duplicate_disk_id, storage_profile_id)
+        self._assert_storage_services_method_called('CreateSnapshot')
+
+    def test_delete_snapshot(self):
+        snapshot_id = "snapshot_id"
+        self.client.delete_snapshot(snapshot_id)
+        self._assert_storage_services_method_called('DeleteSnapshot')
+
+    def test_get_storage_profiles(self):
+        self.client.get_storage_profiles()
+        self._assert_storage_services_method_called('GetStorageProfiles')
+
+    def test_designate_map_store(self):
+        pool_id = 'pool_id'
+        self.client.designate_map_store(pool_id)
+        self._assert_storage_services_method_called('DesignateMapStore')
+
+    def test_get_performance_by_type(self):
+        types = ['DiskPoolPerformance']
+        self.client.get_performance_by_type(types)
+        self._assert_storage_services_method_called('GetPerformanceByType')
+
+    def test_get_ports(self):
+        self.client.get_ports()
+        self._assert_storage_services_method_called('GetPorts')
+
+    def test_build_scsi_port_data(self):
+        host_id = 'host_id'
+        port_name = 'port_name'
+        port_mode = 'Initiator'
+        port_type = 'iSCSI'
+
+        port_data = self.client.build_scsi_port_data(
+            host_id, port_name, port_mode, port_type)
+
+        self.assertEqual(host_id, port_data.HostId)
+        self.assertEqual(port_name, port_data.PortName)
+        self.assertTrue(hasattr(port_data, 'PortMode'))
+        self.assertTrue(hasattr(port_data, 'PortType'))
+
+    def test_register_port(self):
+        port_data = self.client.build_scsi_port_data(
+            'host_id', 'port_name', 'initiator', 'iSCSI')
+        self.client.register_port(port_data)
+        self._assert_storage_services_method_called('RegisterPort')
+
+    def test_assign_port(self):
+        client_id = 'client_id'
+        port_id = 'port_id'
+        self.client.assign_port(client_id, port_id)
+        self._assert_storage_services_method_called('AssignPort')
+
+    def test_set_server_port_properties(self):
+        port_id = 'port_id'
+        port_properties = mock.MagicMock()
+        self.client.set_server_port_properties(port_id, port_properties)
+        self._assert_storage_services_method_called('SetServerPortProperties')
+
+    def test_build_access_token(self):
+        initiator_node_name = 'initiator'
+        initiator_username = 'initiator_username'
+        initiator_password = 'initiator_password'
+        mutual_authentication = True
+        target_username = 'target_username'
+        target_password = 'target_password'
+
+        access_token = self.client.build_access_token(
+            initiator_node_name, initiator_username, initiator_password,
+            mutual_authentication, target_username, target_password)
+
+        self.assertEqual(initiator_node_name, access_token.InitiatorNodeName)
+        self.assertEqual(initiator_username, access_token.InitiatorUsername)
+        self.assertEqual(initiator_password, access_token.InitiatorPassword)
+        self.assertEqual(mutual_authentication,
+                         access_token.MutualAuthentication)
+        self.assertEqual(target_username, access_token.TargetUsername)
+        self.assertEqual(target_password, access_token.TargetPassword)
+
+    def test_set_access_token(self):
+        port_id = 'port_id'
+        access_token = self.client.build_access_token(
+            'initiator_name', None, None, False, 'initiator_name', 'password')
+        self.client.set_access_token(port_id, access_token)
+        self._assert_storage_services_method_called('SetAccessToken')
+
+    def test_get_clients(self):
+        self.client.get_clients()
+        self._assert_storage_services_method_called('GetClients')
+
+    def test_register_client(self):
+        host_name = 'name'
+        description = 'description'
+        machine_type = 'Other'
+        mode = 'PreferredServer'
+        preferred_server_ids = None
+        self.client.register_client(
+            host_name, description, machine_type, mode, preferred_server_ids)
+        self._assert_storage_services_method_called('RegisterClient')
+
+    def test_set_client_capabilities(self):
+        client_id = 'client_id'
+        mpio = True
+        alua = True
+        self.client.set_client_capabilities(client_id, mpio, alua)
+        self._assert_storage_services_method_called('SetClientCapabilities')
+
+    def test_get_target_domains(self):
+        self.client.get_target_domains()
+        self._assert_storage_services_method_called('GetTargetDomains')
+
+    def test_create_target_domain(self):
+        initiator_host_id = 'host_id'
+        target_host_id = 'host_id'
+        self.client.create_target_domain(initiator_host_id, target_host_id)
+        self._assert_storage_services_method_called('CreateTargetDomain')
+
+    def test_delete_target_domain(self):
+        domain_id = 'domain_id'
+        self.client.delete_target_domain(domain_id)
+        self._assert_storage_services_method_called('DeleteTargetDomain')
+
+    def test_get_target_devices(self):
+        self.client.get_target_devices()
+        self._assert_storage_services_method_called('GetTargetDevices')
+
+    def test_build_scsi_port_nexus_data(self):
+        initiator_id = 'initiator_id'
+        target_id = 'target_id'
+
+        nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
+
+        self.assertEqual(initiator_id, nexus.InitiatorPortId)
+        self.assertEqual(target_id, nexus.TargetPortId)
+
+    def test_create_target_device(self):
+        domain_id = 'domain_id'
+        nexus = self.client.build_scsi_port_nexus_data('initiator_id',
+                                                       'target_id')
+        self.client.create_target_device(domain_id, nexus)
+        self._assert_storage_services_method_called('CreateTargetDevice')
+
+    def test_delete_target_device(self):
+        device_id = 'device_id'
+        self.client.delete_target_device(device_id)
+        self._assert_storage_services_method_called('DeleteTargetDevice')
+
+    def test_get_next_free_lun(self):
+        device_id = 'device_id'
+        self.client.get_next_free_lun(device_id)
+        self._assert_storage_services_method_called('GetNextFreeLun')
+
+    def test_get_logical_units(self):
+        self.client.get_logical_units()
+        self._assert_storage_services_method_called('GetLogicalUnits')
+
+    def test_map_logical_disk(self):
+        disk_id = 'disk_id'
+        lun = 0
+        host_id = 'host_id'
+        mapping_type = 'Client'
+        initiator_id = 'initiator_id'
+        target_id = 'target_id'
+        nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id)
+        self.client.map_logical_disk(
+            disk_id, nexus, lun, host_id, mapping_type)
+        self._assert_storage_services_method_called('MapLogicalDisk')
+
+    def test_unmap_logical_disk(self):
+        logical_disk_id = 'disk_id'
+        nexus = self.client.build_scsi_port_nexus_data('initiator_id',
+                                                       'target_id')
+        self.client.unmap_logical_disk(logical_disk_id, nexus)
+        self._assert_storage_services_method_called('UnmapLogicalDisk')
+
+
+FAKE_WSDL_DOCUMENT = """<?xml version="1.0" encoding="utf-8"?>
+<wsdl:definitions name="ExecutiveServices"
+                  targetNamespace="http://tempuri.org/"
+                  xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
+                  xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
+                  xmlns:tns="http://tempuri.org/"
+                  xmlns:wsa10="http://www.w3.org/2005/08/addressing"
+                  xmlns:wsaw="http://www.w3.org/2006/05/addressing/wsdl">
+    <wsdl:types>
+        <xs:schema elementFormDefault="qualified"
+                   targetNamespace="http://tempuri.org/"
+                   xmlns:xs="http://www.w3.org/2001/XMLSchema">
+            <xs:import
+namespace="http://schemas.microsoft.com/2003/10/Serialization/Arrays"/>
+            <xs:import
+namespace="http://schemas.datacontract.org/2004/07/DataCore.Executive"/>
+            <xs:element name="StartExecutive">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+            <xs:element name="StartExecutiveResponse">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+            <xs:element name="StopExecutive">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+            <xs:element name="StopExecutiveResponse">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+            <xs:element name="ExecutiveStarted">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+            <xs:element name="ExecutiveStopped">
+                <xs:complexType>
+                    <xs:sequence/>
+                </xs:complexType>
+            </xs:element>
+        </xs:schema>
+    </wsdl:types>
+    <wsdl:message name="IExecutiveServiceEx_StartExecutive_InputMessage">
+        <wsdl:part name="parameters" element="tns:StartExecutive"/>
+    </wsdl:message>
+    <wsdl:message name="IExecutiveServiceEx_StartExecutive_OutputMessage">
+        <wsdl:part name="parameters" element="tns:StartExecutiveResponse"/>
+    </wsdl:message>
+    <wsdl:message
+name="IExecutiveServiceEx_StartExecutive_ExecutiveError_FaultMessage">
+        <wsdl:part name="detail" element="ExecutiveError"/>
+    </wsdl:message>
+    <wsdl:message name="IExecutiveServiceEx_StopExecutive_InputMessage">
+        <wsdl:part name="parameters" element="tns:StopExecutive"/>
+    </wsdl:message>
+    <wsdl:message name="IExecutiveServiceEx_StopExecutive_OutputMessage">
+        <wsdl:part name="parameters" element="tns:StopExecutiveResponse"/>
+    </wsdl:message>
+    <wsdl:message
+name="IExecutiveServiceEx_StopExecutive_ExecutiveError_FaultMessage">
+        <wsdl:part name="detail" element="ExecutiveError"/>
+    </wsdl:message>
+    <wsdl:message
+            name="IExecutiveServiceEx_ExecutiveStarted_OutputCallbackMessage">
+        <wsdl:part name="parameters" element="tns:ExecutiveStarted"/>
+    </wsdl:message>
+    <wsdl:message
+name="IExecutiveServiceEx_ExecutiveStopped_OutputCallbackMessage">
+        <wsdl:part name="parameters" element="tns:ExecutiveStopped"/>
+    </wsdl:message>
+    <wsdl:portType name="IExecutiveServiceEx">
+        <wsdl:operation name="StartExecutive">
+            <wsdl:input
+wsaw:Action="http://tempuri.org/IExecutiveService/StartExecutive"
+message="tns:IExecutiveServiceEx_StartExecutive_InputMessage"/>
+            <wsdl:output
+wsaw:Action="http://tempuri.org/IExecutiveService/StartExecutiveResponse"
+message="tns:IExecutiveServiceEx_StartExecutive_OutputMessage"/>
+            <wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
+message="tns:IExecutiveServiceEx_StartExecutive_ExecutiveError_FaultMessage"/>
+        </wsdl:operation>
+        <wsdl:operation name="StopExecutive">
+            <wsdl:input
+wsaw:Action="http://tempuri.org/IExecutiveService/StopExecutive"
+message="tns:IExecutiveServiceEx_StopExecutive_InputMessage"/>
+            <wsdl:output
+wsaw:Action="http://tempuri.org/IExecutiveService/StopExecutiveResponse"
+message="tns:IExecutiveServiceEx_StopExecutive_OutputMessage"/>
+            <wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
+message="tns:IExecutiveServiceEx_StopExecutive_ExecutiveError_FaultMessage"/>
+        </wsdl:operation>
+        <wsdl:operation name="ExecutiveStarted">
+            <wsdl:output
+wsaw:Action="http://tempuri.org/IExecutiveService/ExecutiveStarted"
+message="tns:IExecutiveServiceEx_ExecutiveStarted_OutputCallbackMessage"/>
+            <wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
+                        message="tns:"/>
+        </wsdl:operation>
+        <wsdl:operation name="ExecutiveStopped">
+            <wsdl:output
+wsaw:Action="http://tempuri.org/IExecutiveService/ExecutiveStopped"
+message="tns:IExecutiveServiceEx_ExecutiveStopped_OutputCallbackMessage"/>
+            <wsdl:fault wsaw:Action="ExecutiveError" name="ExecutiveError"
+                        message="tns:"/>
+        </wsdl:operation>
+    </wsdl:portType>
+    <wsdl:binding name="CustomBinding_IExecutiveServiceEx"
+                  type="tns:IExecutiveServiceEx">
+        <soap:binding transport="http://schemas.microsoft.com/soap/websocket"/>
+        <wsdl:operation name="StartExecutive">
+            <soap:operation
+soapAction="http://tempuri.org/IExecutiveService/StartExecutive"
+                    style="document"/>
+            <wsdl:input>
+                <soap:body use="literal"/>
+            </wsdl:input>
+            <wsdl:output>
+                <soap:body use="literal"/>
+            </wsdl:output>
+            <wsdl:fault name="ExecutiveError">
+                <soap:fault use="literal" name="ExecutiveError" namespace=""/>
+            </wsdl:fault>
+        </wsdl:operation>
+        <wsdl:operation name="StopExecutive">
+            <soap:operation
+soapAction="http://tempuri.org/IExecutiveService/StopExecutive"
+                    style="document"/>
+            <wsdl:input>
+                <soap:body use="literal"/>
+            </wsdl:input>
+            <wsdl:output>
+                <soap:body use="literal"/>
+            </wsdl:output>
+            <wsdl:fault name="ExecutiveError">
+                <soap:fault use="literal" name="ExecutiveError" namespace=""/>
+            </wsdl:fault>
+        </wsdl:operation>
+        <wsdl:operation name="ExecutiveStarted">
+            <soap:operation
+soapAction="http://tempuri.org/IExecutiveService/ExecutiveStarted"
+                    style="document"/>
+            <wsdl:output>
+                <soap:body use="literal"/>
+            </wsdl:output>
+            <wsdl:fault name="ExecutiveError">
+                <soap:fault use="literal" name="ExecutiveError" namespace=""/>
+            </wsdl:fault>
+        </wsdl:operation>
+        <wsdl:operation name="ExecutiveStopped">
+            <soap:operation
+soapAction="http://tempuri.org/IExecutiveService/ExecutiveStopped"
+                    style="document"/>
+            <wsdl:output>
+                <soap:body use="literal"/>
+            </wsdl:output>
+            <wsdl:fault name="ExecutiveError">
+                <soap:fault use="literal" name="ExecutiveError" namespace=""/>
+            </wsdl:fault>
+        </wsdl:operation>
+    </wsdl:binding>
+    <wsdl:service name="ExecutiveServices">
+        <wsdl:port name="CustomBinding_IExecutiveServiceEx"
+                   binding="tns:CustomBinding_IExecutiveServiceEx">
+            <soap:address
+                    location="ws://mns-vsp-001:3794/IExecutiveServiceEx"/>
+            <wsa10:EndpointReference>
+                <wsa10:Address>ws://mns-vsp-001:3794/IExecutiveServiceEx
+                </wsa10:Address>
+            </wsa10:EndpointReference>
+        </wsdl:port>
+    </wsdl:service>
+</wsdl:definitions>"""
+
+
+class FaultDefinitionsFilterTestCase(test.TestCase):
+    """Tests for the plugin to process the DataCore API WSDL document."""
+
+    @staticmethod
+    def _binding_operation_has_fault(document, operation_name):
+        for binding in document.getChildren('binding', wsdl.wsdlns):
+            for operation in binding.getChildren('operation', wsdl.wsdlns):
+                if operation.get('name') == operation_name:
+                    fault = operation.getChildren('fault', wsdl.wsdlns)
+                    if fault:
+                        return True
+        return False
+
+    @staticmethod
+    def _port_type_operation_has_fault(document, operation_name):
+        for port_type in document.getChildren('portType', wsdl.wsdlns):
+            for operation in port_type.getChildren('operation', wsdl.wsdlns):
+                if operation.get('name') == operation_name:
+                    fault = operation.getChildren('fault', wsdl.wsdlns)
+                    if fault:
+                        return True
+        return False
+
+    def _operation_has_fault(self, document, operation_name):
+        _binding_has_fault = self._binding_operation_has_fault(
+            document, operation_name)
+        _port_type_has_fault = self._port_type_operation_has_fault(
+            document, operation_name)
+        self.assertEqual(_binding_has_fault, _port_type_has_fault)
+        return _binding_has_fault
+
+    def test_parsed(self):
+        context = mock.Mock()
+        sax = parser.Parser()
+        wsdl_document = FAKE_WSDL_DOCUMENT
+        if isinstance(wsdl_document, six.text_type):
+            wsdl_document = wsdl_document.encode('utf-8')
+        context.document = sax.parse(string=wsdl_document).root()
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'StartExecutive'))
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'StopExecutive'))
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'ExecutiveStarted'))
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'ExecutiveStopped'))
+        plugin = api.FaultDefinitionsFilter()
+        plugin.parsed(context)
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'StartExecutive'))
+        self.assertTrue(self._operation_has_fault(context.document,
+                                                  'StopExecutive'))
+        self.assertFalse(self._operation_has_fault(context.document,
+                                                   'ExecutiveStarted'))
+        self.assertFalse(self._operation_has_fault(context.document,
+                                                   'ExecutiveStopped'))
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py
new file mode 100644
index 00000000000..d4c5ce1661f
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py
@@ -0,0 +1,678 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for the base Driver for DataCore SANsymphony storage array."""
+
+from __future__ import division
+
+import abc
+import mock
+from oslo_utils import units
+
+from cinder import exception as cinder_exception
+from cinder.tests.unit import fake_constants
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.datacore import driver as datacore_driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.san import san
+
+
+SERVER_GROUPS = [
+    mock.Mock(Id='server_group_id1',
+              OurGroup=True),
+    mock.Mock(Id='server_group_id2',
+              OurGroup=False),
+]
+
+SERVERS = [
+    mock.Mock(Id='server_id1',
+              State='Online'),
+    mock.Mock(Id='server_id2',
+              State='Online'),
+]
+
+DISK_POOLS = [
+    mock.Mock(Id='disk_pool_id1',
+              Caption='disk_pool1',
+              ServerId='server_id1',
+              PoolStatus='Running'),
+    mock.Mock(Id='disk_pool_id2',
+              Caption='disk_pool2',
+              ServerId='server_id2',
+              PoolStatus='Running'),
+    mock.Mock(Id='disk_pool_id3',
+              Caption='disk_pool3',
+              ServerId='server_id1',
+              PoolStatus='Offline'),
+    mock.Mock(Id='disk_pool_id4',
+              Caption='disk_pool4',
+              ServerId='server_id2',
+              PoolStatus='Unknown'),
+]
+
+DISK_POOL_PERFORMANCE = [
+    mock.Mock(ObjectId='disk_pool_id1',
+              PerformanceData=mock.Mock(BytesTotal=5 * units.Gi,
+                                        BytesAllocated=2 * units.Gi,
+                                        BytesAvailable=3 * units.Gi,
+                                        BytesReserved=0)),
+    mock.Mock(ObjectId='disk_pool_id2',
+              PerformanceData=mock.Mock(BytesTotal=5 * units.Gi,
+                                        BytesAllocated=3 * units.Gi,
+                                        BytesAvailable=1 * units.Gi,
+                                        BytesReserved=1 * units.Gi)),
+    mock.Mock(ObjectId='disk_pool_id3',
+              PerformanceData=None),
+    mock.Mock(ObjectId='disk_pool_id4',
+              PerformanceData=None),
+]
+
+STORAGE_PROFILES = [
+    mock.Mock(Id='storage_profile_id1',
+              Caption='storage_profile1'),
+    mock.Mock(Id='storage_profile_id2',
+              Caption='storage_profile2'),
+    mock.Mock(Id='storage_profile_id3',
+              Caption='storage_profile3'),
+]
+
+VIRTUAL_DISKS = [
+    mock.Mock(Id='virtual_disk_id1',
+              DiskStatus='Online',
+              IsServed=False,
+              FirstHostId='server_id1'),
+    mock.Mock(Id='virtual_disk_id2',
+              DiskStatus='Failed',
+              IsServed=False,
+              FirstHostId='server_id2'),
+    mock.Mock(Id='virtual_disk_id3',
+              DiskStatus='Online',
+              IsServed=True,
+              FirstHostId='server_id1',
+              SecondHostId='server_id2'),
+    mock.Mock(Id='virtual_disk_id4',
+              DiskStatus='Failed',
+              IsServed=False,
+              FirstHostId='server_id1',
+              SecondHostId='server_id2'),
+]
+
+VIRTUAL_DISK_SNAPSHOTS = [
+    mock.Mock(Id='snapshot_id1',
+              State='Migrated',
+              Failure='NoFailure',
+              DestinationLogicalDiskId='logical_disk_id1'),
+    mock.Mock(Id='snapshot_id2',
+              State='Failed',
+              Failure='NotAccessible',
+              DestinationLogicalDiskId='logical_disk_id2'),
+    mock.Mock(Id='snapshot_id3',
+              State='Migrated',
+              Failure='NoFailure',
+              DestinationLogicalDiskId='logical_disk_id2'),
+]
+
+LOGICAL_DISKS = [
+    mock.Mock(Id='logical_disk_id1',
+              VirtualDiskId='virtual_disk_id1',
+              ServerHostId='server_id1',
+              PoolId='disk_pool_id1',
+              Size=mock.Mock(Value=1 * units.Gi)),
+    mock.Mock(Id='logical_disk_id2',
+              VirtualDiskId='virtual_disk_id2',
+              ServerHostId='server_id1',
+              PoolId='disk_pool_id3',
+              Size=mock.Mock(Value=1 * units.Gi)),
+    mock.Mock(Id='logical_disk_id3',
+              VirtualDiskId='virtual_disk_id3',
+              ServerHostId='server_id1',
+              PoolId='disk_pool_id1',
+              Size=mock.Mock(Value=1 * units.Gi)),
+    mock.Mock(Id='logical_disk_id4',
+              VirtualDiskId='virtual_disk_id3',
+              ServerHostId='server_id2',
+              PoolId='disk_pool_id2',
+              Size=mock.Mock(Value=1 * units.Gi)),
+    mock.Mock(Id='logical_disk_id5',
+              VirtualDiskId='virtual_disk_id4',
+              ServerHostId='server_id1',
+              PoolId='disk_pool_id3',
+              Size=mock.Mock(Value=1 * units.Gi)),
+    mock.Mock(Id='logical_disk_id6',
+              VirtualDiskId='virtual_disk_id4',
+              ServerHostId='server_id2',
+              PoolId='disk_pool_id4',
+              Size=mock.Mock(Value=1 * units.Gi)),
+]
+
+LOGICAL_UNITS = [
+    mock.Mock(VirtualTargetDeviceId='target_device_id1',
+              LogicalDiskId='logical_disk_id3'),
+    mock.Mock(VirtualTargetDeviceId='target_device_id2',
+              LogicalDiskId='logical_disk_id4'),
+]
+
+TARGET_DEVICES = [
+    mock.Mock(Id='target_device_id1',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id2',
+              InitiatorPortId='initiator_port_id1'),
+]
+
+CLIENTS = [
+    mock.Mock(Id='client_id1',
+              HostName='client_host_name1'),
+    mock.Mock(Id='client_id2',
+              HostName='client_host_name2'),
+]
+
+VOLUME = {
+    'id': fake_constants.VOLUME_ID,
+    'display_name': 'volume_1',
+    'volume_type_id': None,
+    'size': 1,
+}
+
+SNAPSHOT = {
+    'id': fake_constants.SNAPSHOT_ID,
+    'display_name': 'snapshot_1',
+}
+
+
+class DataCoreVolumeDriverTestCase(object):
+    """Tests for the base Driver for DataCore SANsymphony storage array."""
+
+    def setUp(self):
+        super(DataCoreVolumeDriverTestCase, self).setUp()
+        self.mock_client = mock.Mock()
+        self.mock_client.get_servers.return_value = SERVERS
+        self.mock_client.get_disk_pools.return_value = DISK_POOLS
+        (self.mock_client.get_performance_by_type
+         .return_value) = DISK_POOL_PERFORMANCE
+        self.mock_client.get_virtual_disks.return_value = VIRTUAL_DISKS
+        self.mock_client.get_storage_profiles.return_value = STORAGE_PROFILES
+        self.mock_client.get_snapshots.return_value = VIRTUAL_DISK_SNAPSHOTS
+        self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS
+        self.mock_client.get_clients.return_value = CLIENTS
+        self.mock_client.get_server_groups.return_value = SERVER_GROUPS
+        self.mock_object(datacore_driver.api,
+                         'DataCoreClient',
+                         return_value=self.mock_client)
+
+    @staticmethod
+    @abc.abstractmethod
+    def init_driver(config):
+        raise NotImplementedError()
+
+    @staticmethod
+    def create_configuration():
+        config = conf.Configuration(None)
+        config.append_config_values(san.san_opts)
+        config.append_config_values(datacore_driver.datacore_opts)
+        return config
+
+    def setup_default_configuration(self):
+        config = self.create_configuration()
+        config.volume_backend_name = 'DataCore'
+        config.san_ip = '127.0.0.1'
+        config.san_login = 'dcsadmin'
+        config.san_password = 'password'
+        config.datacore_api_timeout = 300
+        return config
+
+    def test_do_setup(self):
+        config = self.setup_default_configuration()
+        self.init_driver(config)
+
+    def test_do_setup_failed(self):
+        config = self.setup_default_configuration()
+        config.san_ip = None
+        self.assertRaises(cinder_exception.InvalidInput,
+                          self.init_driver,
+                          config)
+
+        config = self.setup_default_configuration()
+        config.san_login = None
+        self.assertRaises(cinder_exception.InvalidInput,
+                          self.init_driver,
+                          config)
+
+        config = self.setup_default_configuration()
+        config.san_password = None
+        self.assertRaises(cinder_exception.InvalidInput,
+                          self.init_driver,
+                          config)
+
+    def test_get_volume_stats(self):
+        aggregation = [(getattr(perf.PerformanceData, 'BytesTotal', 0),
+                        getattr(perf.PerformanceData, 'BytesAvailable', 0),
+                        getattr(perf.PerformanceData, 'BytesReserved', 0),)
+                       for perf in DISK_POOL_PERFORMANCE]
+
+        total, available, reserved = map(sum, zip(*aggregation))
+        free = (available + reserved) / units.Gi
+        reserved = 100.0 * reserved / total
+        total /= units.Gi
+        provisioned = sum(disk.Size.Value for disk in LOGICAL_DISKS)
+        provisioned /= units.Gi
+        ratio = 2.0
+
+        config = self.setup_default_configuration()
+        config.max_over_subscription_ratio = ratio
+        driver = self.init_driver(config)
+        expected_volume_stats = {
+            'vendor_name': 'DataCore',
+            'QoS_support': False,
+            'total_capacity_gb': total,
+            'free_capacity_gb': free,
+            'provisioned_capacity_gb': provisioned,
+            'reserved_percentage': reserved,
+            'max_over_subscription_ratio': ratio,
+            'thin_provisioning_support': True,
+            'thick_provisioning_support': False,
+            'volume_backend_name': driver.get_volume_backend_name(),
+            'driver_version': driver.get_version(),
+            'storage_protocol': driver.get_storage_protocol(),
+        }
+        volume_stats = driver.get_volume_stats(refresh=True)
+        self.assertDictEqual(expected_volume_stats, volume_stats)
+        volume_stats_cached = driver.get_volume_stats(refresh=False)
+        self.assertEqual(volume_stats, volume_stats_cached)
+
+    def test_create_volume(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+    def test_create_volume_mirrored_disk_type_specified(self):
+        virtual_disk = VIRTUAL_DISKS[2]
+        self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'mirrored'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume_type = {
+            'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'mirrored'}
+        }
+        get_volume_type = self.mock_object(datacore_driver.volume_types,
+                                           'get_volume_type')
+        get_volume_type.return_value = volume_type
+        volume = VOLUME.copy()
+        volume['volume_type_id'] = 'volume_type_id'
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+    def test_create_volume_profile_specified(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+        config = self.setup_default_configuration()
+        config.datacore_storage_profile = 'storage_profile1'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+        volume_type = {
+            'extra_specs': {
+                driver.DATACORE_STORAGE_PROFILE_KEY: 'storage_profile2'
+            }
+        }
+        get_volume_type = self.mock_object(datacore_driver.volume_types,
+                                           'get_volume_type')
+        get_volume_type.return_value = volume_type
+        volume = VOLUME.copy()
+        volume['volume_type_id'] = 'volume_type_id'
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+    def test_create_volume_pool_specified(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_pools = ['disk_pool1']
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+        volume_type = {
+            'extra_specs': {driver.DATACORE_DISK_POOLS_KEY: 'disk_pool2'}
+        }
+        get_volume_type = self.mock_object(datacore_driver.volume_types,
+                                           'get_volume_type')
+        get_volume_type.return_value = volume_type
+        volume = VOLUME.copy()
+        volume['volume_type_id'] = 'volume_type_id'
+        result = driver.create_volume(volume)
+        self.assertIn('provider_location', result)
+        self.assertEqual(virtual_disk.Id, result['provider_location'])
+
+    def test_create_volume_failed(self):
+        def fail_with_datacore_fault(*args):
+            raise datacore_exception.DataCoreFaultException(
+                reason="General error.")
+
+        (self.mock_client.create_virtual_disk_ex2
+         .side_effect) = fail_with_datacore_fault
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        self.assertRaises(datacore_exception.DataCoreFaultException,
+                          driver.create_volume,
+                          volume)
+
+    def test_create_volume_unknown_disk_type_specified(self):
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'unknown'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume_type = {
+            'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'unknown'}
+        }
+        get_volume_type = self.mock_object(datacore_driver.volume_types,
+                                           'get_volume_type')
+        get_volume_type.return_value = volume_type
+        volume = VOLUME.copy()
+        volume['volume_type_id'] = 'volume_type_id'
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+    def test_create_volume_unknown_profile_specified(self):
+        config = self.setup_default_configuration()
+        config.datacore_storage_profile = 'unknown'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume_type = {
+            'extra_specs': {driver.DATACORE_STORAGE_PROFILE_KEY: 'unknown'}
+        }
+        get_volume_type = self.mock_object(datacore_driver.volume_types,
+                                           'get_volume_type')
+        get_volume_type.return_value = volume_type
+        volume = VOLUME.copy()
+        volume['volume_type_id'] = 'volume_type_id'
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+    def test_create_volume_on_failed_pool(self):
+        config = self.setup_default_configuration()
+        config.datacore_disk_pools = ['disk_pool3', 'disk_pool4']
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+    def test_create_volume_await_online_timed_out(self):
+        virtual_disk = VIRTUAL_DISKS[1]
+        self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_failed_delay = 1
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume,
+                          volume)
+
+    def test_extend_volume(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        driver.extend_volume(volume, 2147483648)
+
+    def test_extend_volume_failed_not_found(self):
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = 'wrong_virtual_disk_id'
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.extend_volume,
+                          volume,
+                          2147483648)
+
+    def test_delete_volume(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        driver.delete_volume(volume)
+
+    def test_delete_volume_assigned(self):
+        self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS
+        self.mock_client.get_logical_units.return_value = LOGICAL_UNITS
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        virtual_disk = VIRTUAL_DISKS[2]
+        volume['provider_location'] = virtual_disk.Id
+        driver.delete_volume(volume)
+
+    def test_create_snapshot(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        snapshot = SNAPSHOT.copy()
+        snapshot['volume'] = volume
+        result = driver.create_snapshot(snapshot)
+        self.assertIn('provider_location', result)
+
+    def test_create_snapshot_on_failed_pool(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        config = self.setup_default_configuration()
+        config.datacore_disk_pools = ['disk_pool3', 'disk_pool4']
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        snapshot = SNAPSHOT.copy()
+        snapshot['volume'] = volume
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_snapshot,
+                          snapshot)
+
+    def test_create_snapshot_await_migrated_timed_out(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[1]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        snapshot = SNAPSHOT.copy()
+        snapshot['volume'] = volume
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_snapshot,
+                          snapshot)
+
+    def test_delete_snapshot(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        snapshot = SNAPSHOT.copy()
+        snapshot['provider_location'] = virtual_disk.Id
+        driver.delete_snapshot(snapshot)
+
+    def test_create_volume_from_snapshot(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        snapshot = SNAPSHOT.copy()
+        snapshot['provider_location'] = virtual_disk.Id
+        result = driver.create_volume_from_snapshot(volume, snapshot)
+        self.assertIn('provider_location', result)
+
+    def test_create_volume_from_snapshot_mirrored_disk_type_specified(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'mirrored'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        snapshot = SNAPSHOT.copy()
+        snapshot['provider_location'] = virtual_disk.Id
+        result = driver.create_volume_from_snapshot(volume, snapshot)
+        self.assertIn('provider_location', result)
+
+    def test_create_volume_from_snapshot_on_failed_pool(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'mirrored'
+        config.datacore_disk_pools = ['disk_pool1', 'disk_pool4']
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        snapshot = SNAPSHOT.copy()
+        snapshot['provider_location'] = virtual_disk.Id
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume_from_snapshot,
+                          volume,
+                          snapshot)
+
+    def test_create_volume_from_snapshot_await_online_timed_out(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        snapshot_virtual_disk = VIRTUAL_DISKS[1]
+        (self.mock_client.set_virtual_disk_size
+         .return_value) = snapshot_virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        snapshot = SNAPSHOT.copy()
+        snapshot['provider_location'] = virtual_disk.Id
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_volume_from_snapshot,
+                          volume,
+                          snapshot)
+
+    def test_create_cloned_volume(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        src_vref = VOLUME.copy()
+        src_vref['provider_location'] = virtual_disk.Id
+        result = driver.create_cloned_volume(volume, src_vref)
+        self.assertIn('provider_location', result)
+
+    def test_create_cloned_volume_mirrored_disk_type_specified(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'mirrored'
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        src_vref = VOLUME.copy()
+        src_vref['provider_location'] = virtual_disk.Id
+        result = driver.create_cloned_volume(volume, src_vref)
+        self.assertIn('provider_location', result)
+
+    def test_create_cloned_volume_on_failed_pool(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        self.mock_client.set_virtual_disk_size.return_value = virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        config = self.setup_default_configuration()
+        config.datacore_disk_type = 'mirrored'
+        config.datacore_disk_pools = ['disk_pool1', 'disk_pool4']
+        driver = self.init_driver(config)
+        volume = VOLUME.copy()
+        src_vref = VOLUME.copy()
+        src_vref['provider_location'] = virtual_disk.Id
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_cloned_volume,
+                          volume,
+                          src_vref)
+
+    def test_create_cloned_volume_await_online_timed_out(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        snapshot_virtual_disk = VIRTUAL_DISKS[1]
+        (self.mock_client.set_virtual_disk_size
+         .return_value) = snapshot_virtual_disk
+        virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2]
+        self.mock_client.create_snapshot.return_value = virtual_disk_snapshot
+
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        src_vref = VOLUME.copy()
+        src_vref['provider_location'] = virtual_disk.Id
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.create_cloned_volume,
+                          volume,
+                          src_vref)
+
+    def test_terminate_connection(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        client = CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        connector = {'host': client.HostName}
+        driver.terminate_connection(volume, connector)
+
+    def test_terminate_connection_connector_is_none(self):
+        virtual_disk = VIRTUAL_DISKS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        driver.terminate_connection(volume, None)
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py
new file mode 100644
index 00000000000..b6861069d6e
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py
@@ -0,0 +1,256 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for the Fibre Channel Driver for DataCore SANsymphony
+storage array.
+"""
+
+import mock
+
+from cinder import exception as cinder_exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver
+from cinder.volume.drivers.datacore import fc
+
+
+PORTS = [
+    mock.Mock(Id='initiator_port_id1',
+              PortType='FibreChannel',
+              PortMode='Initiator',
+              PortName='AA-AA-AA-AA-AA-AA-AA-AA',
+              HostId='client_id1'),
+    mock.Mock(Id='initiator_port_id2',
+              PortType='FibreChannel',
+              PortMode='Initiator',
+              PortName='BB-BB-BB-BB-BB-BB-BB-BB'),
+    mock.Mock(Id='target_port_id1',
+              PortMode='Target',
+              PortName='CC-CC-CC-CC-CC-CC-CC-CC',
+              HostId='server_id1'),
+    mock.Mock(Id='target_port_id2',
+              PortMode='Target',
+              PortName='DD-DD-DD-DD-DD-DD-DD-DD',
+              HostId='server_id1'),
+]
+
+LOGICAL_UNITS = [
+    mock.Mock(VirtualTargetDeviceId='target_device_id1',
+              Lun=mock.Mock(Quad=4)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id2',
+              Lun=mock.Mock(Quad=3)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id3',
+              Lun=mock.Mock(Quad=2)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id4',
+              Lun=mock.Mock(Quad=1)),
+]
+
+TARGET_DEVICES = [
+    mock.Mock(Id='target_device_id1',
+              TargetPortId='target_port_id1',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id2',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id3',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id4',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id2'),
+]
+
+
+class FibreChannelVolumeDriverTestCase(
+        test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase):
+    """Tests for the FC Driver for DataCore SANsymphony storage array."""
+
+    def setUp(self):
+        super(FibreChannelVolumeDriverTestCase, self).setUp()
+        self.mock_client.get_ports.return_value = PORTS
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+    @staticmethod
+    def init_driver(config):
+        driver = fc.FibreChannelVolumeDriver(configuration=config)
+        driver.do_setup(None)
+        return driver
+
+    def test_validate_connector(self):
+        driver = self.init_driver(self.setup_default_configuration())
+        connector = {
+            'host': 'host_name',
+            'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA'],
+        }
+        driver.validate_connector(connector)
+
+    def test_validate_connector_failed(self):
+        driver = self.init_driver(self.setup_default_configuration())
+        connector = {}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+        connector = {'host': 'host_name'}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+        connector = {'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA']}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+    def test_initialize_connection(self):
+        (self.mock_client.serve_virtual_disks_to_host
+         .return_value) = LOGICAL_UNITS
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+                           in PORTS
+                           if port.PortMode == 'Initiator']
+        connector = {
+            'host': client.HostName,
+            'wwpns': initiator_wwpns,
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('fibre_channel', result['driver_volume_type'])
+
+        target_wwns = [port.PortName.replace('-', '').lower() for port
+                       in PORTS
+                       if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_wwn'], target_wwns)
+
+        target_wwn = result['data']['target_wwn']
+        target_port_id = next((
+            port.Id for port
+            in PORTS
+            if port.PortName.replace('-', '').lower() == target_wwn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port_id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+
+    def test_initialize_connection_unknown_client(self):
+        client = test_datacore_driver.CLIENTS[0]
+        self.mock_client.register_client.return_value = client
+        (self.mock_client.get_clients
+         .return_value) = test_datacore_driver.CLIENTS[1:]
+        (self.mock_client.serve_virtual_disks_to_host
+         .return_value) = LOGICAL_UNITS
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+                           in PORTS
+                           if port.PortMode == 'Initiator']
+        connector = {
+            'host': client.HostName,
+            'wwpns': initiator_wwpns,
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('fibre_channel', result['driver_volume_type'])
+
+        target_wwns = [port.PortName.replace('-', '').lower() for port
+                       in PORTS
+                       if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_wwn'], target_wwns)
+
+        target_wwn = result['data']['target_wwn']
+        target_port_id = next((
+            port.Id for port
+            in PORTS
+            if port.PortName.replace('-', '').lower() == target_wwn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port_id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+
+    def test_initialize_connection_failed_not_found(self):
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = 'wrong_virtual_disk_id'
+        initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+                           in PORTS
+                           if port.PortMode == 'Initiator']
+        connector = {
+            'host': client.HostName,
+            'wwpns': initiator_wwpns,
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_failed_initiator_not_found(self):
+        (self.mock_client.serve_virtual_disks_to_host
+         .return_value) = LOGICAL_UNITS
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        connector = {
+            'host': client.HostName,
+            'wwpns': ['0000000000000000'],
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_failed_on_serve(self):
+        self.mock_client.serve_virtual_disks_to_host.return_value = []
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_wwpns = [port.PortName.replace('-', '').lower() for port
+                           in PORTS
+                           if port.PortMode == 'Initiator']
+        connector = {
+            'host': client.HostName,
+            'wwpns': initiator_wwpns,
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py
new file mode 100644
index 00000000000..4530be3eb5f
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py
@@ -0,0 +1,515 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for the iSCSI Driver for DataCore SANsymphony storage array."""
+
+import mock
+
+from cinder import exception as cinder_exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver
+from cinder.tests.unit.volume.drivers.datacore import test_datacore_passwd
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import iscsi
+
+
+ISCSI_PORT_STATE_INFO_READY = mock.Mock(
+    PortalsState=mock.Mock(
+        PortalStateInfo=[mock.Mock(State='Ready')]
+    )
+)
+
+ISCSI_PORT_CONFIG_INFO = mock.Mock(
+    PortalsConfig=mock.Mock(
+        iScsiPortalConfigInfo=[mock.Mock(
+            Address=mock.Mock(Address='127.0.0.1'), TcpPort='3260')]
+    )
+)
+
+PORTS = [
+    mock.Mock(Id='initiator_port_id1',
+              PortType='iSCSI',
+              PortMode='Initiator',
+              PortName='iqn.1993-08.org.debian:1:1',
+              HostId='client_id1'),
+    mock.Mock(Id='initiator_port_id2',
+              PortType='iSCSI',
+              PortMode='Initiator',
+              PortName='iqn.1993-08.org.debian:1:2'),
+    mock.Mock(__class__=mock.Mock(__name__='ServeriScsiPortData'),
+              Id='target_port_id1',
+              PortType='iSCSI',
+              PortMode='Target',
+              PortName='iqn.2000-08.com.datacore:server-1-1',
+              HostId='server_id1',
+              PresenceStatus='Present',
+              ServerPortProperties=mock.Mock(Role="Frontend",
+                                             Authentication='None'),
+              IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+              PortConfigInfo=ISCSI_PORT_CONFIG_INFO),
+    mock.Mock(Id='target_port_id2',
+              PortType='iSCSI',
+              PortMode='Target',
+              PortName='iqn.2000-08.com.datacore:server-1-2',
+              HostId='server_id1',
+              PresenceStatus='Present',
+              ServerPortProperties=mock.Mock(Role="Frontend",
+                                             Authentication='None'),
+              IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+              PortConfigInfo=ISCSI_PORT_CONFIG_INFO),
+]
+
+LOGICAL_UNITS = [
+    mock.Mock(VirtualTargetDeviceId='target_device_id1',
+              Lun=mock.Mock(Quad=4)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id2',
+              Lun=mock.Mock(Quad=3)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id3',
+              Lun=mock.Mock(Quad=2)),
+    mock.Mock(VirtualTargetDeviceId='target_device_id4',
+              Lun=mock.Mock(Quad=1)),
+]
+
+TARGET_DEVICES = [
+    mock.Mock(Id='target_device_id1',
+              TargetPortId='target_port_id1',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id2',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id3',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id1'),
+    mock.Mock(Id='target_device_id4',
+              TargetPortId='target_port_id2',
+              InitiatorPortId='initiator_port_id2'),
+]
+
+
+class ISCSIVolumeDriverTestCase(
+        test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase):
+    """Tests for the iSCSI Driver for DataCore SANsymphony storage array."""
+
+    def setUp(self):
+        super(ISCSIVolumeDriverTestCase, self).setUp()
+        self.mock_client.get_ports.return_value = PORTS
+        (self.mock_client.build_scsi_port_nexus_data
+         .side_effect) = self._build_nexus_data
+        self.mock_client.map_logical_disk.side_effect = self._map_logical_disk
+
+    @staticmethod
+    def _build_nexus_data(initiator_port_id, target_port_id):
+        return mock.Mock(InitiatorPortId=initiator_port_id,
+                         TargetPortId=target_port_id)
+
+    @staticmethod
+    def _map_logical_disk(logical_disk_id, nexus, *args):
+        target_device_id = next((
+            device.Id for device in TARGET_DEVICES
+            if device.TargetPortId == nexus.TargetPortId
+            and device.InitiatorPortId == nexus.InitiatorPortId), None)
+        return next(unit for unit in LOGICAL_UNITS
+                    if unit.VirtualTargetDeviceId == target_device_id)
+
+    @staticmethod
+    def init_driver(config):
+        driver = iscsi.ISCSIVolumeDriver(configuration=config)
+        driver.do_setup(None)
+        return driver
+
+    @staticmethod
+    def create_configuration():
+        config = super(ISCSIVolumeDriverTestCase,
+                       ISCSIVolumeDriverTestCase).create_configuration()
+        config.append_config_values(iscsi.datacore_iscsi_opts)
+        return config
+
+    def test_do_setup_failed(self):
+        super(ISCSIVolumeDriverTestCase, self).test_do_setup_failed()
+
+        config = self.setup_default_configuration()
+        config.datacore_iscsi_chap_enabled = True
+        config.datacore_iscsi_chap_storage = None
+        self.assertRaises(cinder_exception.InvalidInput,
+                          self.init_driver,
+                          config)
+
+    def test_validate_connector(self):
+        driver = self.init_driver(self.setup_default_configuration())
+        connector = {
+            'host': 'host_name',
+            'initiator': 'iqn.1993-08.org.debian:1:1',
+        }
+        driver.validate_connector(connector)
+
+    def test_validate_connector_failed(self):
+        driver = self.init_driver(self.setup_default_configuration())
+        connector = {}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+        connector = {'host': 'host_name'}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+        connector = {'initiator': 'iqn.1993-08.org.debian:1:1'}
+        self.assertRaises(cinder_exception.InvalidConnectorException,
+                          driver.validate_connector,
+                          connector)
+
+    def test_initialize_connection(self):
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('iscsi', result['driver_volume_type'])
+
+        target_iqn = [port.PortName for port
+                      in PORTS
+                      if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_iqn'], target_iqn)
+
+        target_iqn = result['data']['target_iqn']
+        target_port = next((
+            port for port
+            in PORTS
+            if port.PortName == target_iqn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port.Id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+
+    def test_initialize_connection_unknown_client(self):
+        client = test_datacore_driver.CLIENTS[0]
+        self.mock_client.register_client.return_value = client
+        (self.mock_client.get_clients
+         .return_value) = test_datacore_driver.CLIENTS[1:]
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('iscsi', result['driver_volume_type'])
+
+        target_iqn = [port.PortName for port
+                      in PORTS
+                      if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_iqn'], target_iqn)
+
+        target_iqn = result['data']['target_iqn']
+        target_port = next((
+            port for port
+            in PORTS
+            if port.PortName == target_iqn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port.Id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+
+    def test_initialize_connection_unknown_initiator(self):
+        self.mock_client.register_port.return_value = PORTS[0]
+        self.mock_client.get_ports.return_value = PORTS[1:]
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('iscsi', result['driver_volume_type'])
+
+        target_iqn = [port.PortName for port
+                      in PORTS
+                      if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_iqn'], target_iqn)
+
+        target_iqn = result['data']['target_iqn']
+        target_port = next((
+            port for port
+            in PORTS
+            if port.PortName == target_iqn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port.Id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+
+    def test_initialize_connection_failed_not_found(self):
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = 'wrong_virtual_disk_id'
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_failed_target_not_found(self):
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        config = self.setup_default_configuration()
+        config.datacore_iscsi_unallowed_targets = [
+            port.PortName for port in PORTS if port.PortMode == 'Target'
+        ]
+        driver = self.init_driver(config)
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_failed_on_map(self):
+        def fail_with_datacore_fault(*args):
+            raise datacore_exception.DataCoreFaultException(
+                reason="General error.")
+
+        (self.mock_client.map_logical_disk
+         .side_effect) = fail_with_datacore_fault
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        self.assertRaises(datacore_exception.DataCoreFaultException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_chap(self):
+        mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage')
+        mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage()
+        target_port = mock.Mock(
+            Id='target_port_id1',
+            PortType='iSCSI',
+            PortMode='Target',
+            PortName='iqn.2000-08.com.datacore:server-1-1',
+            HostId='server_id1',
+            PresenceStatus='Present',
+            ServerPortProperties=mock.Mock(Role="Frontend",
+                                           Authentication='None'),
+            IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+            PortConfigInfo=ISCSI_PORT_CONFIG_INFO,
+            iSCSINodes=mock.Mock(Node=[]))
+        ports = PORTS[:2]
+        ports.append(target_port)
+        self.mock_client.get_ports.return_value = ports
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        config = self.setup_default_configuration()
+        config.datacore_iscsi_chap_enabled = True
+        config.datacore_iscsi_chap_storage = 'fake_file_path'
+        driver = self.init_driver(config)
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        result = driver.initialize_connection(volume, connector)
+        self.assertEqual('iscsi', result['driver_volume_type'])
+
+        target_iqn = [port.PortName for port
+                      in PORTS
+                      if port.PortMode == 'Target']
+        self.assertIn(result['data']['target_iqn'], target_iqn)
+
+        target_iqn = result['data']['target_iqn']
+        target_port = next((
+            port for port
+            in PORTS
+            if port.PortName == target_iqn), None)
+        target_device_id = next((
+            device.Id for device
+            in TARGET_DEVICES
+            if device.TargetPortId == target_port.Id), None)
+        target_lun = next((
+            unit.Lun.Quad for unit
+            in LOGICAL_UNITS
+            if unit.VirtualTargetDeviceId == target_device_id), None)
+        self.assertEqual(target_lun, result['data']['target_lun'])
+
+        self.assertEqual('127.0.0.1:3260', result['data']['target_portal'])
+        self.assertFalse(result['data']['target_discovered'])
+        self.assertEqual(volume['id'], result['data']['volume_id'])
+        self.assertEqual('rw', result['data']['access_mode'])
+        self.assertEqual('CHAP', result['data']['auth_method'])
+        self.assertEqual(initiator_iqn, result['data']['auth_username'])
+        self.assertIsNotNone(result['data']['auth_password'])
+
+    def test_initialize_connection_chap_failed_check(self):
+        target_port = mock.Mock(
+            __class__=mock.Mock(__name__='ServeriScsiPortData'),
+            Id='target_port_id2',
+            PortType='iSCSI',
+            PortMode='Target',
+            PortName='iqn.2000-08.com.datacore:server-1-2',
+            HostId='server_id1',
+            PresenceStatus='Present',
+            ServerPortProperties=mock.Mock(Role="Frontend",
+                                           Authentication='CHAP'),
+            IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+            PortConfigInfo=ISCSI_PORT_CONFIG_INFO)
+        ports = PORTS[:2]
+        ports.append(target_port)
+        self.mock_client.get_ports.return_value = ports
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+        self.mock_client.get_logical_units.return_value = LOGICAL_UNITS
+        self.mock_client.get_target_domains.return_value = []
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        driver = self.init_driver(self.setup_default_configuration())
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        self.assertRaises(cinder_exception.VolumeDriverException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
+
+    def test_initialize_connection_chap_failed_on_set_port_properties(self):
+        def fail_with_datacore_fault(*args):
+            raise datacore_exception.DataCoreFaultException(
+                reason="General error.")
+
+        mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage')
+        mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage()
+        target_port = mock.Mock(
+            __class__=mock.Mock(__name__='ServeriScsiPortData'),
+            Id='target_port_id1',
+            PortType='iSCSI',
+            PortMode='Target',
+            PortName='iqn.2000-08.com.datacore:server-1-1',
+            HostId='server_id1',
+            PresenceStatus='Present',
+            ServerPortProperties=mock.Mock(Role="Frontend",
+                                           Authentication='None'),
+            IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY,
+            PortConfigInfo=ISCSI_PORT_CONFIG_INFO,
+            iSCSINodes=mock.Mock(Node=[]))
+        ports = PORTS[:2]
+        ports.append(target_port)
+        self.mock_client.get_ports.return_value = ports
+        (self.mock_client.set_server_port_properties
+         .side_effect) = fail_with_datacore_fault
+        self.mock_client.get_logical_units.return_value = []
+        self.mock_client.get_target_domains.return_value = []
+        self.mock_client.get_target_devices.return_value = TARGET_DEVICES
+
+        virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0]
+        client = test_datacore_driver.CLIENTS[0]
+        config = self.setup_default_configuration()
+        config.datacore_iscsi_chap_enabled = True
+        config.datacore_iscsi_chap_storage = 'fake_file_path'
+        driver = self.init_driver(config)
+        volume = test_datacore_driver.VOLUME.copy()
+        volume['provider_location'] = virtual_disk.Id
+        initiator_iqn = PORTS[0].PortName
+        connector = {
+            'host': client.HostName,
+            'initiator': initiator_iqn
+        }
+        self.assertRaises(datacore_exception.DataCoreFaultException,
+                          driver.initialize_connection,
+                          volume,
+                          connector)
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py
new file mode 100644
index 00000000000..62978ffcf49
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py
@@ -0,0 +1,283 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for the password storage."""
+
+import collections
+import json
+import os
+import stat
+
+import mock
+import six
+
+from cinder import test
+from cinder.volume.drivers.datacore import passwd
+
+
+class FakeFileStorage(object):
+    """Mock FileStorage class."""
+    def __init__(self):
+        self._storage = {
+            'resource1': {
+                'user1': 'resource1-user1',
+                'user2': 'resource1-user2',
+            },
+            'resource2': {
+                'user1': 'resource2-user1',
+            }
+        }
+
+    def open(self):
+        return self
+
+    def load(self):
+        return self._storage
+
+    def save(self, storage):
+        self._storage = storage
+
+    def close(self):
+        pass
+
+
+class PasswordFileStorageTestCase(test.TestCase):
+    """Tests for the password storage."""
+
+    def test_get_password(self):
+        fake_file_storage = FakeFileStorage()
+        passwords = fake_file_storage.load()
+        resource = six.next(six.iterkeys(passwords))
+        user, expected = six.next(six.iteritems(passwords[resource]))
+
+        self._mock_file_storage(fake_file_storage)
+        password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+        result = password_storage.get_password(resource, user)
+        self.assertEqual(expected, result)
+
+        result = password_storage.get_password(resource.upper(), user)
+        self.assertIsNone(result)
+
+    def test_set_password(self):
+        fake_file_storage = FakeFileStorage()
+        user = 'user3'
+        resource1 = 'resource2'
+        password1 = 'resource2-user3'
+        resource2 = 'resource3'
+        password2 = 'resource3-user3'
+
+        self._mock_file_storage(fake_file_storage)
+        password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+        password_storage.set_password(resource1, user, password1)
+        passwords = fake_file_storage.load()
+        self.assertIn(resource1, passwords)
+        self.assertIn(user, passwords[resource1])
+        self.assertEqual(password1, passwords[resource1][user])
+
+        password_storage.set_password(resource2, user, password2)
+        passwords = fake_file_storage.load()
+        self.assertIn(resource2, passwords)
+        self.assertIn(user, passwords[resource2])
+        self.assertEqual(password2, passwords[resource2][user])
+
+    def test_delete_password(self):
+        fake_file_storage = FakeFileStorage()
+        passwords = fake_file_storage.load()
+        resource1, resource2 = 'resource1', 'resource2'
+        user1 = six.next(six.iterkeys(passwords[resource1]))
+        user2 = six.next(six.iterkeys(passwords[resource2]))
+
+        self._mock_file_storage(fake_file_storage)
+        password_storage = passwd.PasswordFileStorage('fake_file_path')
+
+        password_storage.delete_password(resource1, user1)
+        passwords = fake_file_storage.load()
+        self.assertIn(resource1, passwords)
+        self.assertNotIn(user1, passwords[resource1])
+
+        password_storage.delete_password(resource2, user2)
+        passwords = fake_file_storage.load()
+        self.assertNotIn(resource2, passwords)
+
+    def _mock_file_storage(self, fake_file_storage):
+        self.mock_object(passwd, 'FileStorage', return_value=fake_file_storage)
+
+
+class FileStorageTestCase(test.TestCase):
+    """Test for the file storage."""
+
+    def test_open(self):
+        fake_file_path = 'file_storage.data'
+        self.mock_object(passwd.os.path, 'isfile', return_value=True)
+        self.mock_object(passwd.os.path, 'isdir', return_value=True)
+        mock_open = self.mock_object(passwd, 'open', mock.mock_open())
+
+        file_storage = passwd.FileStorage(fake_file_path)
+        file_storage.open()
+        mock_open.assert_called_once_with(fake_file_path, 'r+')
+
+    def test_open_not_existing(self):
+        fake_file_path = '/fake_path/file_storage.data'
+        fake_dir_name = os.path.dirname(fake_file_path)
+        mock_chmod_calls = [
+            mock.call(fake_dir_name,
+                      stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP),
+            mock.call(fake_file_path, stat.S_IRUSR | stat.S_IWUSR)
+        ]
+        mock_open_calls = [
+            mock.call(fake_file_path, 'w'),
+            mock.call(fake_file_path, 'r+'),
+        ]
+
+        self.mock_object(passwd.os.path, 'isfile', return_value=False)
+        self.mock_object(passwd.os.path, 'isdir', return_value=False)
+        mock_makedirs = self.mock_object(passwd.os, 'makedirs')
+        mock_chmod = self.mock_object(passwd.os, 'chmod')
+        mock_open = self.mock_object(
+            passwd, 'open', return_value=mock.MagicMock())
+
+        file_storage = passwd.FileStorage(fake_file_path)
+        file_storage.open()
+        mock_makedirs.assert_called_with(fake_dir_name)
+        mock_chmod.assert_has_calls(mock_chmod_calls, any_order=True)
+        mock_open.assert_has_calls(mock_open_calls, any_order=True)
+
+    def test_open_not_closed(self):
+        fake_file_path = 'file_storage.data'
+        fake_file = mock.MagicMock()
+        mock_open_calls = [
+            mock.call(fake_file_path, 'r+'),
+            mock.call(fake_file_path, 'r+'),
+        ]
+        self.mock_object(passwd.os.path, 'isfile', return_value=True)
+        self.mock_object(passwd.os.path, 'isdir', return_value=True)
+        mock_open = self.mock_object(passwd, 'open', return_value=fake_file)
+
+        file_storage = passwd.FileStorage(fake_file_path)
+        file_storage.open()
+        file_storage.open()
+        mock_open.assert_has_calls(mock_open_calls)
+        fake_file.close.assert_called_once_with()
+
+    def test_load(self):
+        passwords = {
+            'resource1': {
+                'user1': 'resource1-user1',
+                'user2': 'resource1-user2',
+            },
+            'resource2': {
+                'user1': 'resource2-user1',
+                'user2': 'resource2-user2'
+            }
+        }
+        fake_file_name = 'file_storage.data'
+        fake_file_content = json.dumps(passwords)
+        fake_file = self._get_fake_file(fake_file_content)
+        fake_os_stat = self._get_fake_os_stat(1)
+
+        self._mock_file_open(fake_file, fake_os_stat)
+
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        result = file_storage.load()
+        self.assertEqual(passwords, result)
+
+    def test_load_empty_file(self):
+        fake_file_name = 'file_storage.data'
+        fake_file = self._get_fake_file()
+        fake_os_stat = self._get_fake_os_stat(0)
+
+        self._mock_file_open(fake_file, fake_os_stat)
+
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        result = file_storage.load()
+        expected = {}
+        self.assertEqual(expected, result)
+
+    def test_load_malformed_file(self):
+        fake_file_name = 'file_storage.data'
+        fake_file = self._get_fake_file('[1, 2, 3]')
+        fake_os_stat = self._get_fake_os_stat(1)
+
+        self._mock_file_open(fake_file, fake_os_stat)
+
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        self.assertRaises(ValueError, file_storage.load)
+
+    def test_save(self):
+        fake_file_name = 'file_storage.data'
+        fake_file = self._get_fake_file('')
+        fake_os_stat = self._get_fake_os_stat(0)
+
+        self._mock_file_open(fake_file, fake_os_stat)
+
+        passwords = {
+            'resource1': {
+                'user1': 'resource1-user1',
+                'user2': 'resource1-user2',
+            },
+            'resource2': {
+                'user1': 'resource2-user1',
+                'user2': 'resource2-user2'
+            }
+        }
+        fake_file_content = json.dumps(passwords)
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        file_storage.save(passwords)
+        self.assertEqual(fake_file_content, fake_file.getvalue())
+
+    def test_save_not_dictionary(self):
+        fake_file_name = 'file_storage.data'
+        fake_file = self._get_fake_file('')
+        fake_os_stat = self._get_fake_os_stat(0)
+
+        self._mock_file_open(fake_file, fake_os_stat)
+
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        self.assertRaises(TypeError, file_storage.save, [])
+
+    def test_close(self):
+        fake_file_name = 'file_storage.data'
+        fake_file = mock.MagicMock()
+
+        self.mock_object(passwd.os.path, 'isfile', return_value=True)
+        self.mock_object(passwd.os.path, 'isdir', return_value=True)
+        self.mock_object(passwd, 'open', return_value=fake_file)
+
+        file_storage = passwd.FileStorage(fake_file_name)
+        file_storage.open()
+        file_storage.close()
+        fake_file.close.assert_called_once_with()
+
+    def _mock_file_open(self, fake_file, fake_os_stat):
+        self.mock_object(passwd.os.path, 'isfile', return_value=True)
+        self.mock_object(passwd.os.path, 'isdir', return_value=True)
+        self.mock_object(passwd.os, 'stat', return_value=fake_os_stat)
+        self.mock_object(passwd, 'open', return_value=fake_file)
+
+    @staticmethod
+    def _get_fake_file(content=None):
+        return six.StringIO(content)
+
+    @staticmethod
+    def _get_fake_os_stat(st_size):
+        os_stat = collections.namedtuple('fake_os_stat', ['st_size'])
+        os_stat.st_size = st_size
+        return os_stat
diff --git a/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py b/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py
new file mode 100644
index 00000000000..e8c2c597c8e
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Unit tests for utilities and helper functions."""
+
+from cinder import test
+from cinder.volume.drivers.datacore import utils
+
+
+class GenericUtilsTestCase(test.TestCase):
+    """Tests for the generic utilities and helper functions."""
+
+    def test_build_network_address(self):
+        ipv4_address = '127.0.0.1'
+        ipv6_address = '::1'
+        host_name = 'localhost'
+        port = 3498
+        self.assertEqual('%s:%s' % (ipv4_address, port),
+                         utils.build_network_address(ipv4_address, port))
+        self.assertEqual('[%s]:%s' % (ipv6_address, port),
+                         utils.build_network_address(ipv6_address, port))
+        self.assertEqual('%s:%s' % (host_name, port),
+                         utils.build_network_address(host_name, port))
+
+    def test_get_first(self):
+        disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+        disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+        disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+        disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+        test_source = [disk_a, disk_b, disk_c, disk_d]
+
+        first = utils.get_first(lambda item: item['id'] == 'disk-c',
+                                test_source)
+        self.assertEqual(disk_c, first)
+
+        self.assertRaises(StopIteration,
+                          utils.get_first,
+                          lambda item: item['type'] == 'Dual',
+                          test_source)
+
+    def test_get_first_or_default(self):
+        disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+        disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+        disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+        disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+        test_source = [disk_a, disk_b, disk_c, disk_d]
+
+        first = utils.get_first_or_default(lambda item: item['size'] == 1,
+                                           test_source,
+                                           None)
+        self.assertEqual(disk_b, first)
+
+        default = utils.get_first_or_default(lambda item: item['size'] == 15,
+                                             test_source,
+                                             None)
+        self.assertIsNone(default)
+
+    def test_get_distinct_by(self):
+        disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5}
+        disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1}
+        disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5}
+        disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10}
+        test_source = [disk_a, disk_b, disk_c, disk_d]
+
+        distinct_values = utils.get_distinct_by(lambda item: item['type'],
+                                                test_source)
+        self.assertEqual([disk_a, disk_c], distinct_values)
diff --git a/cinder/volume/drivers/datacore/__init__.py b/cinder/volume/drivers/datacore/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/volume/drivers/datacore/api.py b/cinder/volume/drivers/datacore/api.py
new file mode 100644
index 00000000000..e0bc028bfbf
--- /dev/null
+++ b/cinder/volume/drivers/datacore/api.py
@@ -0,0 +1,1062 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Classes to invoke DataCore SANsymphony API."""
+
+import copy
+import sys
+import uuid
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import importutils
+import retrying
+import six
+import socket
+import suds
+from suds import client as suds_client
+from suds import plugin
+from suds.sax import attribute
+from suds.sax import element
+from suds import wsdl
+from suds import wsse
+from suds import xsd
+
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.datacore import exception as datacore_exceptions
+from cinder.volume.drivers.datacore import utils as datacore_utils
+
+websocket = importutils.try_import('websocket')
+
+
+LOG = logging.getLogger(__name__)
+
+
+class FaultDefinitionsFilter(plugin.DocumentPlugin):
+    """Plugin to process the DataCore API WSDL document.
+
+    The document plugin removes fault definitions for callback operations
+    from the DataCore API WSDL.
+    """
+
+    def parsed(self, context):
+        document = context.document
+        tns = self._get_tns(document)
+
+        message_qrefs = set()
+        for message in self._get_wsdl_messages(document):
+            message_qrefs.add((message.get('name'), tns[1]))
+
+        bindings = self._get_wsdl_operation_bindings(document)
+
+        for port_type in self._get_wsdl_port_types(document):
+            for operation in self._get_wsdl_operations(port_type):
+                self._filter_faults(
+                    document, operation, bindings, message_qrefs, tns)
+
+    @staticmethod
+    def _get_tns(document):
+        target_namespace = document.get('targetNamespace')
+        prefix = document.findPrefix(target_namespace) or 'tns'
+        return prefix, target_namespace
+
+    @staticmethod
+    def _get_wsdl_port_types(document):
+        return document.getChildren('portType', wsdl.wsdlns)
+
+    @staticmethod
+    def _get_wsdl_operations(port_type):
+        return port_type.getChildren('operation', wsdl.wsdlns)
+
+    @staticmethod
+    def _get_wsdl_messages(document):
+        return document.getChildren('message', wsdl.wsdlns)
+
+    @staticmethod
+    def _get_wsdl_operation_bindings(document):
+        bindings = []
+        for binding in document.getChildren('binding', wsdl.wsdlns):
+            operations = {}
+            for operation in binding.getChildren('operation', wsdl.wsdlns):
+                operations[operation.get('name')] = operation
+            bindings.append(operations)
+        return bindings
+
+    @staticmethod
+    def _filter_faults(document, operation, operation_bindings,
+                       message_qrefs, tns):
+        filtered_faults = {}
+        for fault in operation.getChildren('fault', wsdl.wsdlns):
+            fault_message = fault.get('message')
+            qref = xsd.qualify(fault_message, document, tns)
+            if qref not in message_qrefs:
+                filtered_faults[fault.get('name')] = fault
+        for fault in filtered_faults.values():
+            operation.remove(fault)
+        if filtered_faults:
+            for binding in operation_bindings:
+                filtered_binding_faults = []
+                faults = binding[operation.get('name')].getChildren(
+                    'fault', wsdl.wsdlns)
+                for binding_fault in faults:
+                    if binding_fault.get('name') in filtered_faults:
+                        filtered_binding_faults.append(binding_fault)
+                for binding_fault in filtered_binding_faults:
+                    binding[operation.get('name')].remove(binding_fault)
+
+
+class DataCoreClient(object):
+    """DataCore SANsymphony client."""
+
+    API_RETRY_INTERVAL = 10
+
+    DATACORE_EXECUTIVE_PORT = '3794'
+
+    STORAGE_SERVICES = 'IStorageServices'
+    STORAGE_SERVICES_BINDING = 'CustomBinding_IStorageServices'
+
+    EXECUTIVE_SERVICE = 'IExecutiveServiceEx'
+    EXECUTIVE_SERVICE_BINDING = 'CustomBinding_IExecutiveServiceEx'
+
+    NS_WSA = ('wsa', 'http://www.w3.org/2005/08/addressing')
+    WSA_ANONYMOUS = 'http://www.w3.org/2005/08/addressing/anonymous'
+    MUST_UNDERSTAND = attribute.Attribute('SOAP-ENV:mustUnderstand', '1')
+
+    # Namespaces that are defined within DataCore API WSDL
+    NS_DATACORE_EXECUTIVE = ('http://schemas.datacontract.org/2004/07/'
+                             'DataCore.Executive')
+    NS_DATACORE_EXECUTIVE_SCSI = ('http://schemas.datacontract.org/2004/07/'
+                                  'DataCore.Executive.Scsi')
+    NS_DATACORE_EXECUTIVE_ISCSI = ('http://schemas.datacontract.org/2004/07/'
+                                   'DataCore.Executive.iSCSI')
+    NS_SERIALIZATION_ARRAYS = ('http://schemas.microsoft.com/2003/10/'
+                               'Serialization/Arrays')
+
+    # Fully qualified names of objects that are defined within
+    # DataCore API WSDL
+    O_ACCESS_TOKEN = '{%s}AccessToken' % NS_DATACORE_EXECUTIVE_ISCSI
+    O_ARRAY_OF_PERFORMANCE_TYPE = ('{%s}ArrayOfPerformanceType'
+                                   % NS_DATACORE_EXECUTIVE)
+    O_ARRAY_OF_STRING = '{%s}ArrayOfstring' % NS_SERIALIZATION_ARRAYS
+    O_CLIENT_MACHINE_TYPE = '{%s}ClientMachineType' % NS_DATACORE_EXECUTIVE
+    O_DATA_SIZE = '{%s}DataSize' % NS_DATACORE_EXECUTIVE
+    O_LOGICAL_DISK_ROLE = '{%s}LogicalDiskRole' % NS_DATACORE_EXECUTIVE
+    O_LOGICAL_UNIT_TYPE = '{%s}LogicalUnitType' % NS_DATACORE_EXECUTIVE
+    O_MIRROR_RECOVERY_PRIORITY = ('{%s}MirrorRecoveryPriority'
+                                  % NS_DATACORE_EXECUTIVE)
+    O_PATH_POLICY = '{%s}PathPolicy' % NS_DATACORE_EXECUTIVE
+    O_PERFORMANCE_TYPE = '{%s}PerformanceType' % NS_DATACORE_EXECUTIVE
+    O_POOL_VOLUME_TYPE = '{%s}PoolVolumeType' % NS_DATACORE_EXECUTIVE
+    O_SNAPSHOT_TYPE = '{%s}SnapshotType' % NS_DATACORE_EXECUTIVE
+    O_SCSI_MODE = '{%s}ScsiMode' % NS_DATACORE_EXECUTIVE_SCSI
+    O_SCSI_PORT_DATA = '{%s}ScsiPortData' % NS_DATACORE_EXECUTIVE
+    O_SCSI_PORT_NEXUS_DATA = '{%s}ScsiPortNexusData' % NS_DATACORE_EXECUTIVE
+    O_SCSI_PORT_TYPE = '{%s}ScsiPortType' % NS_DATACORE_EXECUTIVE_SCSI
+    O_VIRTUAL_DISK_DATA = '{%s}VirtualDiskData' % NS_DATACORE_EXECUTIVE
+    O_VIRTUAL_DISK_STATUS = '{%s}VirtualDiskStatus' % NS_DATACORE_EXECUTIVE
+    O_VIRTUAL_DISK_SUB_TYPE = '{%s}VirtualDiskSubType' % NS_DATACORE_EXECUTIVE
+    O_VIRTUAL_DISK_TYPE = '{%s}VirtualDiskType' % NS_DATACORE_EXECUTIVE
+
+    def __init__(self, host, username, password, timeout):
+        if websocket is None:
+            msg = _("Failed to import websocket-client python module."
+                    " Please, ensure the module is installed.")
+            raise datacore_exceptions.DataCoreException(msg)
+
+        self.timeout = timeout
+
+        executive_service_net_addr = datacore_utils.build_network_address(
+            host, self.DATACORE_EXECUTIVE_PORT)
+        executive_service_endpoint = self._build_service_endpoint(
+            executive_service_net_addr, self.EXECUTIVE_SERVICE)
+
+        security_options = wsse.Security()
+        username_token = wsse.UsernameToken(username, password)
+        security_options.tokens.append(username_token)
+
+        self._executive_service_client = suds_client.Client(
+            executive_service_endpoint['http_endpoint'] + '?singlewsdl',
+            nosend=True,
+            timeout=self.timeout,
+            wsse=security_options,
+            plugins=[FaultDefinitionsFilter()])
+
+        self._update_storage_services_endpoint(executive_service_endpoint)
+
+        storage_services_endpoint = self._get_storage_services_endpoint()
+
+        self._storage_services_client = suds_client.Client(
+            storage_services_endpoint['http_endpoint'] + '?singlewsdl',
+            nosend=True,
+            timeout=self.timeout,
+            wsse=security_options,
+            plugins=[FaultDefinitionsFilter()])
+
+        self._update_executive_service_endpoints(storage_services_endpoint)
+
+    @staticmethod
+    def _get_list_data(obj, attribute_name):
+        return getattr(obj, attribute_name, [])
+
+    @staticmethod
+    def _build_service_endpoint(network_address, path):
+        return {
+            'network_address': network_address,
+            'http_endpoint': '%s://%s/%s' % ('http', network_address, path),
+            'ws_endpoint': '%s://%s/%s' % ('ws', network_address, path),
+        }
+
+    @cinder_utils.synchronized('datacore-api-request_context')
+    def _get_soap_context(self, service_client, service_binding, method,
+                          message_id, *args, **kwargs):
+        soap_action = (service_client.wsdl.services[0].port(service_binding)
+                       .methods[method].soap.action)
+
+        soap_headers = self._get_soap_headers(soap_action, message_id)
+
+        service_client.set_options(soapheaders=soap_headers)
+        context = service_client.service[service_binding][method](
+            *args, **kwargs)
+
+        return context
+
+    def _get_soap_headers(self, soap_action, message_id):
+        headers = [
+            element.Element('Action', ns=self.NS_WSA)
+            .setText(soap_action.replace('"', ''))
+            .append(self.MUST_UNDERSTAND),
+
+            element.Element('To', ns=self.NS_WSA)
+            .setText(self.WSA_ANONYMOUS)
+            .append(self.MUST_UNDERSTAND),
+
+            element.Element('MessageID', ns=self.NS_WSA)
+            .setText(message_id),
+
+            element.Element('ReplyTo', ns=self.NS_WSA)
+            .insert(element.Element('Address', ns=self.NS_WSA)
+                    .setText(self.WSA_ANONYMOUS)),
+        ]
+        return headers
+
+    def _process_request(self, service_client, service_binding,
+                         service_endpoint, method, *args, **kwargs):
+        message_id = uuid.uuid4().urn
+
+        context = self._get_soap_context(
+            service_client, service_binding,
+            method, message_id, *args, **kwargs)
+
+        channel = None
+        try:
+            channel = websocket.create_connection(
+                service_endpoint,
+                timeout=self.timeout,
+                subprotocols=['soap'],
+                header=['soap-content-type: text/xml'])
+            channel.send(context.envelope)
+            response = channel.recv()
+            if isinstance(response, six.text_type):
+                response = response.encode('utf-8')
+            return context.process_reply(response)
+        except (socket.error, websocket.WebSocketException) as e:
+            traceback = sys.exc_info()[2]
+            error = datacore_exceptions.DataCoreConnectionException(reason=e)
+            six.reraise(datacore_exceptions.DataCoreConnectionException,
+                        error,
+                        traceback)
+        except suds.WebFault as e:
+            traceback = sys.exc_info()[2]
+            fault = datacore_exceptions.DataCoreFaultException(reason=e)
+            six.reraise(datacore_exceptions.DataCoreFaultException,
+                        fault,
+                        traceback)
+        finally:
+            if channel and channel.connected:
+                try:
+                    channel.close()
+                except (socket.error, websocket.WebSocketException) as e:
+                    LOG.debug("Closing a connection to "
+                              "DataCore server failed. %s", e)
+
+    def _invoke_storage_services(self, method, *args, **kwargs):
+
+        @retrying.retry(
+            retry_on_exception=lambda e:
+                isinstance(e, datacore_exceptions.DataCoreConnectionException),
+            wait_fixed=self.API_RETRY_INTERVAL * 1000,
+            stop_max_delay=self.timeout * 1000)
+        def retry_call():
+            storage_services_endpoint = self._get_storage_services_endpoint()
+            try:
+                result = self._process_request(
+                    self._storage_services_client,
+                    self.STORAGE_SERVICES_BINDING,
+                    storage_services_endpoint['ws_endpoint'],
+                    method, *args, **kwargs)
+                return result
+            except datacore_exceptions.DataCoreConnectionException:
+                with excutils.save_and_reraise_exception():
+                    self._update_api_endpoints()
+
+        return retry_call()
+
+    def _update_api_endpoints(self):
+        executive_service_endpoints = self._get_executive_service_endpoints()
+        for endpoint in executive_service_endpoints:
+            try:
+                self._update_storage_services_endpoint(endpoint)
+                break
+            except datacore_exceptions.DataCoreConnectionException as e:
+                LOG.warning("Failed to update DataCore Server Group "
+                            "endpoints. %s.", e)
+
+        storage_services_endpoint = self._get_storage_services_endpoint()
+        try:
+            self._update_executive_service_endpoints(
+                storage_services_endpoint)
+        except datacore_exceptions.DataCoreConnectionException as e:
+            LOG.warning("Failed to update DataCore Server Group "
+                        "endpoints. %s.", e)
+
+    @cinder_utils.synchronized('datacore-api-storage_services_endpoint')
+    def _get_storage_services_endpoint(self):
+        if self._storage_services_endpoint:
+            return copy.copy(self._storage_services_endpoint)
+        return None
+
+    @cinder_utils.synchronized('datacore-api-storage_services_endpoint')
+    def _update_storage_services_endpoint(self, executive_service_endpoint):
+        controller_address = self._process_request(
+            self._executive_service_client,
+            self.EXECUTIVE_SERVICE_BINDING,
+            executive_service_endpoint['ws_endpoint'],
+            'GetControllerAddress')
+
+        if not controller_address:
+            msg = _("Could not determine controller node.")
+            raise datacore_exceptions.DataCoreConnectionException(reason=msg)
+
+        controller_host = controller_address.rsplit(':', 1)[0].strip('[]')
+        controller_net_addr = datacore_utils.build_network_address(
+            controller_host,
+            self.DATACORE_EXECUTIVE_PORT)
+
+        self._storage_services_endpoint = self._build_service_endpoint(
+            controller_net_addr,
+            self.STORAGE_SERVICES)
+
+    @cinder_utils.synchronized('datacore-api-executive_service_endpoints')
+    def _get_executive_service_endpoints(self):
+        if self._executive_service_endpoints:
+            return self._executive_service_endpoints[:]
+        return []
+
+    @cinder_utils.synchronized('datacore-api-executive_service_endpoints')
+    def _update_executive_service_endpoints(self, storage_services_endpoint):
+        endpoints = []
+        nodes = self._get_list_data(
+            self._process_request(self._storage_services_client,
+                                  self.STORAGE_SERVICES_BINDING,
+                                  storage_services_endpoint['ws_endpoint'],
+                                  'GetNodes'),
+            'RegionNodeData')
+
+        if not nodes:
+            msg = _("Could not determine executive nodes.")
+            raise datacore_exceptions.DataCoreConnectionException(reason=msg)
+
+        for node in nodes:
+            host = node.HostAddress.rsplit(':', 1)[0].strip('[]')
+            endpoint = self._build_service_endpoint(
+                datacore_utils.build_network_address(
+                    host, self.DATACORE_EXECUTIVE_PORT),
+                self.EXECUTIVE_SERVICE)
+            endpoints.append(endpoint)
+
+        self._executive_service_endpoints = endpoints
+
+    def get_server_groups(self):
+        """Get all the server groups in the configuration.
+
+        :return: A list of server group data.
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetServerGroups'),
+            'ServerHostGroupData')
+
+    def get_servers(self):
+        """Get all the server hosts in the configuration.
+
+        :return: A list of server host data
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetServers'),
+            'ServerHostData')
+
+    def get_disk_pools(self):
+        """Get all the pools in the server group.
+
+        :return: A list of disk pool data
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetDiskPools'),
+            'DiskPoolData')
+
+    def get_logical_disks(self):
+        """Get all the logical disks defined in the system.
+
+        :return: A list of logical disks
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetLogicalDisks'),
+            'LogicalDiskData')
+
+    def create_pool_logical_disk(self, pool_id, pool_volume_type, size,
+                                 min_quota=None, max_quota=None):
+        """Create the pool logical disk.
+
+        :param pool_id: Pool id
+        :param pool_volume_type: Type, either striped or spanned
+        :param size: Size
+        :param min_quota: Min quota
+        :param max_quota: Max quota
+        :return: New logical disk data
+        """
+
+        volume_type = getattr(self._storage_services_client.factory
+                              .create(self.O_POOL_VOLUME_TYPE),
+                              pool_volume_type)
+
+        data_size = (self._storage_services_client.factory
+                     .create(self.O_DATA_SIZE))
+        data_size.Value = size
+
+        data_size_min_quota = None
+        if min_quota:
+            data_size_min_quota = (self._storage_services_client.factory
+                                   .create(self.O_DATA_SIZE))
+            data_size_min_quota.Value = min_quota
+
+        data_size_max_quota = None
+        if max_quota:
+            data_size_max_quota = (self._storage_services_client.factory
+                                   .create(self.O_DATA_SIZE))
+            data_size_max_quota.Value = max_quota
+
+        return self._invoke_storage_services('CreatePoolLogicalDisk',
+                                             poolId=pool_id,
+                                             type=volume_type,
+                                             size=data_size,
+                                             minQuota=data_size_min_quota,
+                                             maxQuota=data_size_max_quota)
+
+    def delete_logical_disk(self, logical_disk_id):
+        """Delete the logical disk.
+
+        :param logical_disk_id: Logical disk id
+        """
+
+        self._invoke_storage_services('DeleteLogicalDisk',
+                                      logicalDiskId=logical_disk_id)
+
+    def get_logical_disk_chunk_allocation_map(self, logical_disk_id):
+        """Get the logical disk chunk allocation map.
+
+        The logical disk allocation map details all the physical disk chunks
+        that are currently allocated to this logical disk.
+
+        :param logical_disk_id: Logical disk id
+        :return: A list of member allocation maps, restricted to chunks
+                 allocated on to this logical disk
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetLogicalDiskChunkAllocationMap',
+                                          logicalDiskId=logical_disk_id),
+            'MemberAllocationInfoData')
+
+    def get_next_virtual_disk_alias(self, base_alias):
+        """Get the next available (unused) virtual disk alias.
+
+        :param base_alias: Base string of the new alias
+        :return: New alias
+        """
+
+        return self._invoke_storage_services('GetNextVirtualDiskAlias',
+                                             baseAlias=base_alias)
+
+    def get_virtual_disks(self):
+        """Get all the virtual disks in the configuration.
+
+        :return: A list of virtual disk's data
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetVirtualDisks'),
+            'VirtualDiskData')
+
+    def build_virtual_disk_data(self, virtual_disk_alias, virtual_disk_type,
+                                size, description, storage_profile_id):
+        """Create VirtualDiskData object.
+
+        :param virtual_disk_alias: User-visible alias of the virtual disk,
+                                   which must be unique
+        :param virtual_disk_type: Virtual disk type
+        :param size: Virtual disk size
+        :param description: A user-readable description of the virtual disk
+        :param storage_profile_id: Virtual disk storage profile
+        :return: VirtualDiskData object
+        """
+
+        vd_data = (self._storage_services_client.factory
+                   .create(self.O_VIRTUAL_DISK_DATA))
+        vd_data.Size = (self._storage_services_client.factory
+                        .create(self.O_DATA_SIZE))
+        vd_data.Size.Value = size
+        vd_data.Alias = virtual_disk_alias
+        vd_data.Description = description
+        vd_data.Type = getattr(self._storage_services_client.factory
+                               .create(self.O_VIRTUAL_DISK_TYPE),
+                               virtual_disk_type)
+        vd_data.SubType = getattr(self._storage_services_client.factory
+                                  .create(self.O_VIRTUAL_DISK_SUB_TYPE),
+                                  'Standard')
+        vd_data.DiskStatus = getattr(self._storage_services_client.factory
+                                     .create(self.O_VIRTUAL_DISK_STATUS),
+                                     'Online')
+        vd_data.RecoveryPriority = getattr(
+            self._storage_services_client.factory
+            .create(self.O_MIRROR_RECOVERY_PRIORITY),
+            'Unset')
+        vd_data.StorageProfileId = storage_profile_id
+
+        return vd_data
+
+    def create_virtual_disk_ex2(self, virtual_disk_data, first_logical_disk_id,
+                                second_logical_disk_id, add_redundancy):
+        """Create a virtual disk specifying the both logical disks.
+
+        :param virtual_disk_data: Virtual disk's properties
+        :param first_logical_disk_id: Id of the logical disk to use
+        :param second_logical_disk_id: Id of the second logical disk to use
+        :param add_redundancy: If True, the mirror has redundant mirror paths
+        :return: New virtual disk's data
+        """
+
+        return self._invoke_storage_services(
+            'CreateVirtualDiskEx2',
+            virtualDisk=virtual_disk_data,
+            firstLogicalDiskId=first_logical_disk_id,
+            secondLogicalDiskId=second_logical_disk_id,
+            addRedundancy=add_redundancy)
+
+    def set_virtual_disk_size(self, virtual_disk_id, size):
+        """Change the size of a virtual disk.
+
+        :param virtual_disk_id: Id of the virtual disk
+        :param size: New size
+        :return: Virtual disk's data
+        """
+
+        data_size = (self._storage_services_client.factory
+                     .create(self.O_DATA_SIZE))
+        data_size.Value = size
+
+        return self._invoke_storage_services('SetVirtualDiskSize',
+                                             virtualDiskId=virtual_disk_id,
+                                             size=data_size)
+
+    def delete_virtual_disk(self, virtual_disk_id, delete_logical_disks):
+        """Delete a virtual disk.
+
+        :param virtual_disk_id: Id of the virtual disk
+        :param delete_logical_disks: If True, delete the associated
+                                     logical disks
+        """
+
+        self._invoke_storage_services('DeleteVirtualDisk',
+                                      virtualDiskId=virtual_disk_id,
+                                      deleteLogicalDisks=delete_logical_disks)
+
+    def serve_virtual_disks_to_host(self, host_id, virtual_disks):
+        """Serve multiple virtual disks to a specified host.
+
+        :param host_id: Id of the host machine
+        :param virtual_disks: A list of virtual disks to serve
+        :return: A list of the virtual disks actually served to the host
+        """
+
+        virtual_disk_array = (self._storage_services_client.factory
+                              .create(self.O_ARRAY_OF_STRING))
+        virtual_disk_array.string = virtual_disks
+
+        return self._get_list_data(
+            self._invoke_storage_services('ServeVirtualDisksToHost',
+                                          hostId=host_id,
+                                          virtualDisks=virtual_disk_array),
+            'VirtualLogicalUnitData')
+
+    def unserve_virtual_disks_from_host(self, host_id, virtual_disks):
+        """Unserve multiple virtual disks from a specified host.
+
+        :param host_id: Id of the host machine
+        :param virtual_disks: A list of virtual disks to unserve
+        """
+
+        virtual_disk_array = (self._storage_services_client.factory
+                              .create(self.O_ARRAY_OF_STRING))
+        virtual_disk_array.string = virtual_disks
+
+        self._invoke_storage_services('UnserveVirtualDisksFromHost',
+                                      hostId=host_id,
+                                      virtualDisks=virtual_disk_array)
+
+    def unserve_virtual_disks_from_port(self, port_id, virtual_disks):
+        """Unserve multiple virtual disks from a specified initiator port.
+
+        :param port_id: Id of the initiator port
+        :param virtual_disks: A list of virtual disks to unserve
+        """
+
+        virtual_disk_array = (self._storage_services_client.factory
+                              .create(self.O_ARRAY_OF_STRING))
+        virtual_disk_array.string = virtual_disks
+
+        self._invoke_storage_services('UnserveVirtualDisksFromPort',
+                                      portId=port_id,
+                                      virtualDisks=virtual_disk_array)
+
+    def bind_logical_disk(self, virtual_disk_id, logical_disk_id, role,
+                          create_mirror_mappings, create_client_mappings,
+                          add_redundancy):
+        """Bind (add) a logical disk to a virtual disk.
+
+        :param virtual_disk_id: Id of the virtual disk to bind to
+        :param logical_disk_id: Id of the logical disk being bound
+        :param role: logical disk's role
+        :param create_mirror_mappings: If True, automatically create the
+                                       mirror mappings to this disk, assuming
+                                       there is already another logical disk
+                                       bound
+        :param create_client_mappings: If True, automatically create mappings
+                                       from mapped hosts to the new disk
+        :param add_redundancy: If True, the mirror has redundant mirror paths
+        :return: Updated virtual disk data
+        """
+
+        logical_disk_role = getattr(self._storage_services_client.factory
+                                    .create(self.O_LOGICAL_DISK_ROLE),
+                                    role)
+
+        return self._invoke_storage_services(
+            'BindLogicalDisk',
+            virtualDiskId=virtual_disk_id,
+            logicalDiskId=logical_disk_id,
+            role=logical_disk_role,
+            createMirrorMappings=create_mirror_mappings,
+            createClientMappings=create_client_mappings,
+            addRedundancy=add_redundancy)
+
+    def get_snapshots(self):
+        """Get all the snapshots on all the servers in the region.
+
+        :return: A list of snapshot data.
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetSnapshots'),
+            'SnapshotData')
+
+    def create_snapshot(self, virtual_disk_id, name, description,
+                        destination_pool_id, snapshot_type,
+                        duplicate_disk_id, storage_profile_id):
+        """Create a snapshot relationship.
+
+        :param virtual_disk_id: Virtual disk id
+        :param name: Name of snapshot
+        :param description: Description
+        :param destination_pool_id: Destination pool id
+        :param snapshot_type: Type of snapshot
+        :param duplicate_disk_id: If set to True then the destination virtual
+                                  disk's SCSI id will be a duplicate of the
+                                  source's
+        :param storage_profile_id: Specifies the destination virtual disk's
+                                   storage profile
+        :return: New snapshot data
+        """
+
+        st_type = getattr(self._storage_services_client.factory
+                          .create(self.O_SNAPSHOT_TYPE),
+                          snapshot_type)
+
+        return self._invoke_storage_services(
+            'CreateSnapshot',
+            virtualDiskId=virtual_disk_id,
+            name=name,
+            description=description,
+            destinationPoolId=destination_pool_id,
+            type=st_type,
+            duplicateDiskId=duplicate_disk_id,
+            storageProfileId=storage_profile_id)
+
+    def delete_snapshot(self, snapshot_id):
+        """Delete the snapshot.
+
+        :param snapshot_id: Snapshot id
+        """
+
+        self._invoke_storage_services('DeleteSnapshot', snapshotId=snapshot_id)
+
+    def get_storage_profiles(self):
+        """Get all the all the defined storage profiles.
+
+        :return: A list of storage profiles
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetStorageProfiles'),
+            'StorageProfileData')
+
+    def designate_map_store(self, pool_id):
+        """Designate which pool the snapshot mapstore will be allocated from.
+
+        :param pool_id: Pool id
+        :return: Updated server host data, which includes the mapstore pool id
+        """
+
+        return self._invoke_storage_services('DesignateMapStore',
+                                             poolId=pool_id)
+
+    def get_performance_by_type(self, performance_types):
+        """Get performance data for specific types of performance counters.
+
+        :param performance_types: A list of performance counter types
+        :return: A list of performance data points
+        """
+
+        prfm_type_array = (self._storage_services_client.factory
+                           .create(self.O_ARRAY_OF_PERFORMANCE_TYPE))
+        prfm_type_array.PerformanceType = list(
+            getattr(self._storage_services_client.factory
+                    .create(self.O_PERFORMANCE_TYPE),
+                    performance_type)
+            for performance_type in performance_types)
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetPerformanceByType',
+                                          types=prfm_type_array),
+            'CollectionPointData')
+
+    def get_ports(self):
+        """Get all ports in the configuration.
+
+        :return: A list of SCSI ports
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetPorts'),
+            'ScsiPortData')
+
+    def build_scsi_port_data(self, host_id, port_name, port_mode, port_type):
+        """Create ScsiPortData object that represents SCSI port, of any type.
+
+        :param host_id: Id of the port's host computer
+        :param port_name: Unique name of the port.
+        :param port_mode: Mode of port: initiator or target
+        :param port_type: Type of port, Fc, iSCSI or loopback
+        :return: ScsiPortData object
+        """
+
+        scsi_port_data = (self._storage_services_client.factory
+                          .create(self.O_SCSI_PORT_DATA))
+        scsi_port_data.HostId = host_id
+        scsi_port_data.PortName = port_name
+        scsi_port_data.PortMode = getattr(self._storage_services_client.factory
+                                          .create(self.O_SCSI_MODE),
+                                          port_mode)
+        scsi_port_data.PortType = getattr(self._storage_services_client.factory
+                                          .create(self.O_SCSI_PORT_TYPE),
+                                          port_type)
+
+        return scsi_port_data
+
+    def register_port(self, scsi_port_data):
+        """Register a port in the configuration.
+
+        :param scsi_port_data: Port data
+        :return: Updated port data
+        """
+
+        return self._invoke_storage_services('RegisterPort',
+                                             port=scsi_port_data)
+
+    def assign_port(self, client_id, port_id):
+        """Assign a port to a client.
+
+        :param client_id: Client id
+        :param port_id: Port id
+        :return: Updated port data,
+                 which will now have its host id set to the client id
+        """
+
+        return self._invoke_storage_services('AssignPort',
+                                             clientId=client_id,
+                                             portId=port_id)
+
+    def set_server_port_properties(self, port_id, properties):
+        """Set a server port's properties.
+
+        :param port_id: Port id
+        :param properties: New properties
+        :return: Updated port data
+        """
+
+        return self._invoke_storage_services('SetServerPortProperties',
+                                             portId=port_id,
+                                             properties=properties)
+
+    def build_access_token(self, initiator_node_name, initiator_username,
+                           initiator_password, mutual_authentication,
+                           target_username, target_password):
+        """Create an AccessToken object.
+
+        :param initiator_node_name: Initiator node name
+        :param initiator_username: Initiator user name
+        :param initiator_password: Initiator password
+        :param mutual_authentication: If True the target and the initiator
+                                      authenticate each other.
+                                      A separate secret is set for each target
+                                      and for each initiator in the storage
+                                      area network (SAN).
+        :param target_username: Target user name
+        :param target_password: Target password
+        :return: AccessToken object
+        """
+
+        access_token = (self._storage_services_client.factory
+                        .create(self.O_ACCESS_TOKEN))
+        access_token.InitiatorNodeName = initiator_node_name
+        access_token.InitiatorUsername = initiator_username
+        access_token.InitiatorPassword = initiator_password
+        access_token.MutualAuthentication = mutual_authentication
+        access_token.TargetUsername = target_username
+        access_token.TargetPassword = target_password
+
+        return access_token
+
+    def set_access_token(self, iscsi_port_id, access_token):
+        """Set the access token.
+
+        The access token allows access to a specific network node
+        from a specific iSCSI port.
+
+        :param iscsi_port_id: Id of the initiator iSCSI port
+        :param access_token: Access token to be validated
+        :return: Port data
+        """
+
+        return self._invoke_storage_services('SetAccessToken',
+                                             iScsiPortId=iscsi_port_id,
+                                             inputToken=access_token)
+
+    def get_clients(self):
+        """Get all the clients in the configuration.
+
+        :return: A list of client data
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetClients'),
+            'ClientHostData')
+
+    def register_client(self, host_name, description, machine_type,
+                        mode, preferred_server_ids):
+        """Register the client, creating a client object in the configuration.
+
+        :param host_name: Name of the client
+        :param description: Description
+        :param machine_type: Type of client
+        :param mode: Path policy mode of the client
+        :param preferred_server_ids: Preferred server ids
+        :return: New client data
+        """
+
+        client_machine_type = getattr(self._storage_services_client.factory
+                                      .create(self.O_CLIENT_MACHINE_TYPE),
+                                      machine_type)
+        client_mode = getattr(self._storage_services_client.factory
+                              .create(self.O_PATH_POLICY),
+                              mode)
+
+        return self._invoke_storage_services(
+            'RegisterClient',
+            hostName=host_name,
+            description=description,
+            type=client_machine_type,
+            mode=client_mode,
+            preferredServerIds=preferred_server_ids)
+
+    def set_client_capabilities(self, client_id, mpio, alua):
+        """Set the client capabilities for MPIO and ALUA.
+
+        :param client_id: Client id
+        :param mpio: If set to True then MPIO-capable
+        :param alua: If set to True then ALUA-capable
+        :return: Updated client data
+        """
+
+        return self._invoke_storage_services('SetClientCapabilities',
+                                             clientId=client_id,
+                                             mpio=mpio,
+                                             alua=alua)
+
+    def get_target_domains(self):
+        """Get all the target domains in the configuration.
+
+        :return: A list of target domains
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetTargetDomains'),
+            'VirtualTargetDomainData')
+
+    def create_target_domain(self, initiator_host_id, target_host_id):
+        """Create a target domain given a pair of hosts, target and initiator.
+
+        :param initiator_host_id: Id of the initiator host machine
+        :param target_host_id: Id of the target host server
+        :return: New target domain
+        """
+
+        return self._invoke_storage_services('CreateTargetDomain',
+                                             initiatorHostId=initiator_host_id,
+                                             targetHostId=target_host_id)
+
+    def delete_target_domain(self, target_domain_id):
+        """Delete a target domain.
+
+        :param target_domain_id: Target domain id
+        """
+
+        self._invoke_storage_services('DeleteTargetDomain',
+                                      targetDomainId=target_domain_id)
+
+    def get_target_devices(self):
+        """Get all the target devices in the configuration.
+
+        :return: A list of target devices
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetTargetDevices'),
+            'VirtualTargetDeviceData')
+
+    def build_scsi_port_nexus_data(self, initiator_port_id, target_port_id):
+        """Create a ScsiPortNexusData object.
+
+        Nexus is a pair of ports that can communicate, one being the initiator,
+        the other the target
+
+        :param initiator_port_id: Id of the initiator port
+        :param target_port_id: Id of the target port
+        :return: ScsiPortNexusData object
+        """
+
+        scsi_port_nexus_data = (self._storage_services_client.factory
+                                .create(self.O_SCSI_PORT_NEXUS_DATA))
+        scsi_port_nexus_data.InitiatorPortId = initiator_port_id
+        scsi_port_nexus_data.TargetPortId = target_port_id
+
+        return scsi_port_nexus_data
+
+    def create_target_device(self, target_domain_id, nexus):
+        """Create a target device, given a target domain and a nexus.
+
+        :param target_domain_id: Target domain id
+        :param nexus: Nexus, or pair of ports
+        :return: New target device
+        """
+
+        return self._invoke_storage_services('CreateTargetDevice',
+                                             targetDomainId=target_domain_id,
+                                             nexus=nexus)
+
+    def delete_target_device(self, target_device_id):
+        """Delete a target device.
+
+        :param target_device_id: Target device id
+        """
+
+        self._invoke_storage_services('DeleteTargetDevice',
+                                      targetDeviceId=target_device_id)
+
+    def get_next_free_lun(self, target_device_id):
+        """Find the next unused LUN number for a specified target device.
+
+        :param target_device_id: Target device id
+        :return: LUN number
+        """
+
+        return self._invoke_storage_services('GetNextFreeLun',
+                                             targetDeviceId=target_device_id)
+
+    def get_logical_units(self):
+        """Get all the mappings configured in the system.
+
+        :return: A list of mappings
+        """
+
+        return self._get_list_data(
+            self._invoke_storage_services('GetLogicalUnits'),
+            'VirtualLogicalUnitData')
+
+    def map_logical_disk(self, logical_disk_id, nexus, lun,
+                         initiator_host_id, mapping_type):
+        """Map a logical disk to a host.
+
+        :param logical_disk_id: Id of the logical disk
+        :param nexus: Nexus, or pair of ports
+        :param lun: Logical Unit Number
+        :param initiator_host_id: Id of the initiator host machine
+        :param mapping_type: Type of mapping
+        :return: New mapping
+        """
+
+        logical_unit_type = getattr(self._storage_services_client.factory
+                                    .create(self.O_LOGICAL_UNIT_TYPE),
+                                    mapping_type)
+
+        return self._invoke_storage_services('MapLogicalDisk',
+                                             logicalDiskId=logical_disk_id,
+                                             nexus=nexus,
+                                             lun=lun,
+                                             initiatorHostId=initiator_host_id,
+                                             mappingType=logical_unit_type)
+
+    def unmap_logical_disk(self, logical_disk_id, nexus):
+        """Unmap a logical disk mapped with a specified nexus.
+
+        :param logical_disk_id: Id of the logical disk
+        :param nexus: Nexus, or pair of ports
+        """
+
+        self._invoke_storage_services('UnmapLogicalDisk',
+                                      logicalDiskId=logical_disk_id,
+                                      nexusData=nexus)
diff --git a/cinder/volume/drivers/datacore/driver.py b/cinder/volume/drivers/datacore/driver.py
new file mode 100644
index 00000000000..1bacd0a4caa
--- /dev/null
+++ b/cinder/volume/drivers/datacore/driver.py
@@ -0,0 +1,742 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Base Driver for DataCore SANsymphony storage array."""
+
+import time
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_service import loopingcall
+from oslo_utils import excutils
+from oslo_utils import units
+import six
+
+from cinder import context as cinder_context
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+from cinder.volume import driver
+from cinder.volume.drivers.datacore import api
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import utils as datacore_utils
+from cinder.volume.drivers.san import san
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+datacore_opts = [
+    cfg.StrOpt('datacore_disk_type',
+               default='single',
+               choices=['single', 'mirrored'],
+               help='DataCore virtual disk type (single/mirrored). '
+                    'Mirrored virtual disks require two storage servers in '
+                    'the server group.'),
+    cfg.StrOpt('datacore_storage_profile',
+               default=None,
+               help='DataCore virtual disk storage profile.'),
+    cfg.ListOpt('datacore_disk_pools',
+                default=[],
+                help='List of DataCore disk pools that can be used '
+                     'by volume driver.'),
+    cfg.IntOpt('datacore_api_timeout',
+               default=300,
+               min=1,
+               help='Seconds to wait for a response from a '
+                    'DataCore API call.'),
+    cfg.IntOpt('datacore_disk_failed_delay',
+               default=15,
+               min=0,
+               help='Seconds to wait for DataCore virtual '
+                    'disk to come out of the "Failed" state.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(datacore_opts)
+
+
+class DataCoreVolumeDriver(driver.BaseVD):
+    """DataCore SANsymphony base volume driver."""
+
+    STORAGE_PROTOCOL = 'N/A'
+
+    AWAIT_DISK_ONLINE_INTERVAL = 10
+    AWAIT_SNAPSHOT_ONLINE_INTERVAL = 10
+    AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 5
+
+    DATACORE_SINGLE_DISK = 'single'
+    DATACORE_MIRRORED_DISK = 'mirrored'
+
+    DATACORE_DISK_TYPE_KEY = 'datacore:disk_type'
+    DATACORE_STORAGE_PROFILE_KEY = 'datacore:storage_profile'
+    DATACORE_DISK_POOLS_KEY = 'datacore:disk_pools'
+
+    VALID_VOLUME_TYPE_KEYS = (DATACORE_DISK_TYPE_KEY,
+                              DATACORE_STORAGE_PROFILE_KEY,
+                              DATACORE_DISK_POOLS_KEY,)
+
+    def __init__(self, *args, **kwargs):
+        super(DataCoreVolumeDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(san.san_opts)
+        self.configuration.append_config_values(datacore_opts)
+        self._api = None
+        self._default_volume_options = None
+
+    def do_setup(self, context):
+        """Perform validations and establish connection to server.
+
+        :param context: Context information
+        """
+
+        required_params = [
+            'san_ip',
+            'san_login',
+            'san_password',
+        ]
+        for param in required_params:
+            if not getattr(self.configuration, param, None):
+                raise cinder_exception.InvalidInput(_("%s not set.") % param)
+
+        self._api = api.DataCoreClient(
+            self.configuration.san_ip,
+            self.configuration.san_login,
+            self.configuration.san_password,
+            self.configuration.datacore_api_timeout)
+
+        disk_type = self.configuration.datacore_disk_type
+        if disk_type:
+            disk_type = disk_type.lower()
+        storage_profile = self.configuration.datacore_storage_profile
+        if storage_profile:
+            storage_profile = storage_profile.lower()
+        disk_pools = self.configuration.datacore_disk_pools
+        if disk_pools:
+            disk_pools = [pool.lower() for pool in disk_pools]
+
+        self._default_volume_options = {
+            self.DATACORE_DISK_TYPE_KEY: disk_type,
+            self.DATACORE_STORAGE_PROFILE_KEY: storage_profile,
+            self.DATACORE_DISK_POOLS_KEY: disk_pools,
+        }
+
+    def check_for_setup_error(self):
+        pass
+
+    def get_volume_backend_name(self):
+        """Get volume backend name of the volume service.
+
+        :return: Volume backend name
+        """
+
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        return (backend_name or
+                'datacore_' + self.get_storage_protocol().lower())
+
+    def get_storage_protocol(self):
+        """Get storage protocol of the volume backend.
+
+        :return: Storage protocol
+        """
+
+        return self.STORAGE_PROTOCOL
+
+    def get_volume_stats(self, refresh=False):
+        """Obtain status of the volume service.
+
+        :param refresh: Whether to get refreshed information
+        """
+
+        if refresh:
+            self._update_volume_stats()
+        return self._stats
+
+    def create_volume(self, volume):
+        """Creates a volume.
+
+        :param volume: Volume object
+        :return: Dictionary of changes to the volume object to be persisted
+        """
+
+        volume_options = self._get_volume_options(volume)
+
+        disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
+        if disk_type == self.DATACORE_MIRRORED_DISK:
+            logical_disk_count = 2
+            virtual_disk_type = 'MultiPathMirrored'
+        elif disk_type == self.DATACORE_SINGLE_DISK:
+            logical_disk_count = 1
+            virtual_disk_type = 'NonMirrored'
+        else:
+            msg = _("Virtual disk type '%s' is not valid.") % disk_type
+            LOG.error(msg)
+            raise cinder_exception.VolumeDriverException(message=msg)
+
+        profile_id = self._get_storage_profile_id(
+            volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
+
+        pools = datacore_utils.get_distinct_by(
+            lambda pool: pool.ServerId,
+            self._get_available_disk_pools(
+                volume_options[self.DATACORE_DISK_POOLS_KEY]))
+
+        if len(pools) < logical_disk_count:
+            msg = _("Suitable disk pools were not found for "
+                    "creating virtual disk.")
+            LOG.error(msg)
+            raise cinder_exception.VolumeDriverException(message=msg)
+
+        disk_size = self._get_size_in_bytes(volume['size'])
+
+        logical_disks = []
+        virtual_disk = None
+        try:
+            for logical_disk_pool in pools[:logical_disk_count]:
+                logical_disks.append(
+                    self._api.create_pool_logical_disk(
+                        logical_disk_pool.Id, 'Striped', disk_size))
+
+            virtual_disk_data = self._api.build_virtual_disk_data(
+                volume['id'],
+                virtual_disk_type,
+                disk_size,
+                volume['display_name'],
+                profile_id)
+
+            virtual_disk = self._api.create_virtual_disk_ex2(
+                virtual_disk_data,
+                logical_disks[0].Id,
+                logical_disks[1].Id if logical_disk_count == 2 else None,
+                True)
+
+            virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception("Creation of volume %(volume)s failed.",
+                              {'volume': volume['id']})
+                try:
+                    if virtual_disk:
+                        self._api.delete_virtual_disk(virtual_disk.Id, True)
+                    else:
+                        for logical_disk in logical_disks:
+                            self._api.delete_logical_disk(logical_disk.Id)
+                except datacore_exception.DataCoreException as e:
+                    LOG.warning("An error occurred on a cleanup after failed "
+                                "creation of volume %(volume)s: %(error)s.",
+                                {'volume': volume['id'], 'error': e})
+
+        return {'provider_location': virtual_disk.Id}
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot.
+
+        :param volume: Volume object
+        :param snapshot: Snapshot object
+        :return: Dictionary of changes to the volume object to be persisted
+        """
+
+        return self._create_volume_from(volume, snapshot)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates volume clone.
+
+        :param volume: New Volume object
+        :param src_vref: Volume object that must be cloned
+        :return: Dictionary of changes to the volume object to be persisted
+        """
+
+        return self._create_volume_from(volume, src_vref)
+
+    def extend_volume(self, volume, new_size):
+        """Extend an existing volume's size.
+
+        :param volume: Volume object
+        :param new_size: new size in GB to extend this volume to
+        """
+
+        virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+        self._set_virtual_disk_size(virtual_disk,
+                                    self._get_size_in_bytes(new_size))
+
+    def delete_volume(self, volume):
+        """Deletes a volume.
+
+        :param volume: Volume object
+        """
+
+        virtual_disk = self._get_virtual_disk_for(volume)
+        if virtual_disk:
+            if virtual_disk.IsServed:
+                logical_disks = self._api.get_logical_disks()
+                logical_units = self._api.get_logical_units()
+                target_devices = self._api.get_target_devices()
+                logical_disks = [disk.Id for disk in logical_disks
+                                 if disk.VirtualDiskId == virtual_disk.Id]
+                logical_unit_devices = [unit.VirtualTargetDeviceId
+                                        for unit in logical_units
+                                        if unit.LogicalDiskId in logical_disks]
+                initiator_ports = set(device.InitiatorPortId
+                                      for device in target_devices
+                                      if device.Id in logical_unit_devices)
+                for port in initiator_ports:
+                    self._api.unserve_virtual_disks_from_port(
+                        port, [virtual_disk.Id])
+            self._api.delete_virtual_disk(virtual_disk.Id, True)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot.
+
+        :param snapshot: Snapshot object
+        :return: Dictionary of changes to the snapshot object to be persisted
+        """
+
+        src_virtual_disk = self._get_virtual_disk_for(snapshot['volume'],
+                                                      raise_not_found=True)
+
+        volume_options = self._get_volume_options(snapshot['volume'])
+        profile_name = volume_options[self.DATACORE_STORAGE_PROFILE_KEY]
+        profile_id = self._get_storage_profile_id(profile_name)
+        pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
+
+        if src_virtual_disk.DiskStatus != 'Online':
+            LOG.warning("Attempting to make a snapshot from virtual disk "
+                        "%(disk)s that is in %(state)s state.",
+                        {'disk': src_virtual_disk.Id,
+                         'state': src_virtual_disk.DiskStatus})
+
+        snapshot_virtual_disk = self._create_virtual_disk_copy(
+            src_virtual_disk,
+            snapshot['id'],
+            snapshot['display_name'],
+            profile_id=profile_id,
+            pool_names=pool_names)
+
+        return {'provider_location': snapshot_virtual_disk.Id}
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot.
+
+        :param snapshot: Snapshot object
+        """
+
+        snapshot_virtual_disk = self._get_virtual_disk_for(snapshot)
+        if snapshot_virtual_disk:
+            self._api.delete_virtual_disk(snapshot_virtual_disk.Id, True)
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def create_export(self, context, volume, connector):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Disallow connection from connector.
+
+        :param volume: Volume object
+        :param connector: Connector information
+        """
+
+        virtual_disk = self._get_virtual_disk_for(volume)
+        if virtual_disk:
+            if connector:
+                clients = [self._get_client(connector['host'],
+                                            create_new=False)]
+            else:
+                clients = self._api.get_clients()
+
+            server_group = self._get_our_server_group()
+
+            @cinder_utils.synchronized(
+                'datacore-backend-%s' % server_group.Id, external=True)
+            def unserve_virtual_disk(client_id):
+                self._api.unserve_virtual_disks_from_host(
+                    client_id, [virtual_disk.Id])
+
+            for client in clients:
+                unserve_virtual_disk(client.Id)
+
+    def _update_volume_stats(self):
+        performance_data = self._api.get_performance_by_type(
+            ['DiskPoolPerformance'])
+        total = 0
+        available = 0
+        reserved = 0
+        for performance in performance_data:
+            missing_perf_data = []
+
+            if hasattr(performance.PerformanceData, 'BytesTotal'):
+                total += performance.PerformanceData.BytesTotal
+            else:
+                missing_perf_data.append('BytesTotal')
+
+            if hasattr(performance.PerformanceData, 'BytesAvailable'):
+                available += performance.PerformanceData.BytesAvailable
+            else:
+                missing_perf_data.append('BytesAvailable')
+
+            if hasattr(performance.PerformanceData, 'BytesReserved'):
+                reserved += performance.PerformanceData.BytesReserved
+            else:
+                missing_perf_data.append('BytesReserved')
+
+            if missing_perf_data:
+                LOG.warning("Performance data %(data)s is missing for "
+                            "disk pool %(pool)s",
+                            {'data': missing_perf_data,
+                             'pool': performance.ObjectId})
+        provisioned = 0
+        logical_disks = self._api.get_logical_disks()
+        for disk in logical_disks:
+            if getattr(disk, 'PoolId', None):
+                provisioned += disk.Size.Value
+        total_capacity_gb = self._get_size_in_gigabytes(total)
+        free = available + reserved
+        free_capacity_gb = self._get_size_in_gigabytes(free)
+        provisioned_capacity_gb = self._get_size_in_gigabytes(provisioned)
+        reserved_percentage = 100.0 * reserved / total if total else 0.0
+        ratio = self.configuration.max_over_subscription_ratio
+        stats_data = {
+            'vendor_name': 'DataCore',
+            'QoS_support': False,
+            'volume_backend_name': self.get_volume_backend_name(),
+            'driver_version': self.get_version(),
+            'storage_protocol': self.get_storage_protocol(),
+            'total_capacity_gb': total_capacity_gb,
+            'free_capacity_gb': free_capacity_gb,
+            'provisioned_capacity_gb': provisioned_capacity_gb,
+            'reserved_percentage': reserved_percentage,
+            'max_over_subscription_ratio': ratio,
+            'thin_provisioning_support': True,
+            'thick_provisioning_support': False,
+        }
+        self._stats = stats_data
+
+    def _get_our_server_group(self):
+        server_group = datacore_utils.get_first(lambda group: group.OurGroup,
+                                                self._api.get_server_groups())
+
+        return server_group
+
+    def _get_volume_options_from_type(self, type_id, default_options):
+        options = dict(default_options.items())
+        if type_id:
+            admin_context = cinder_context.get_admin_context()
+            volume_type = volume_types.get_volume_type(admin_context, type_id)
+            specs = dict(volume_type).get('extra_specs')
+
+            for key, value in six.iteritems(specs):
+                if key in self.VALID_VOLUME_TYPE_KEYS:
+                    if key == self.DATACORE_DISK_POOLS_KEY:
+                        options[key] = [v.strip().lower()
+                                        for v in value.split(',')]
+                    else:
+                        options[key] = value.lower()
+
+        return options
+
+    def _get_volume_options(self, volume):
+        type_id = volume['volume_type_id']
+
+        volume_options = self._get_volume_options_from_type(
+            type_id, self._default_volume_options)
+
+        return volume_options
+
+    def _get_online_servers(self):
+        servers = self._api.get_servers()
+        online_servers = [server for server in servers
+                          if server.State == 'Online']
+        return online_servers
+
+    def _get_available_disk_pools(self, disk_pool_names=None):
+        online_servers = [server.Id for server in self._get_online_servers()]
+
+        pool_performance = {
+            performance.ObjectId: performance.PerformanceData for performance
+            in self._api.get_performance_by_type(['DiskPoolPerformance'])}
+
+        disk_pools = self._api.get_disk_pools()
+
+        lower_disk_pool_names = ([name.lower() for name in disk_pool_names]
+                                 if disk_pool_names else [])
+
+        available_disk_pools = [
+            pool for pool in disk_pools
+            if (self._is_pool_healthy(pool, pool_performance, online_servers)
+                and (not lower_disk_pool_names
+                     or pool.Caption.lower() in lower_disk_pool_names))]
+
+        available_disk_pools.sort(
+            key=lambda p: pool_performance[p.Id].BytesAvailable, reverse=True)
+
+        return available_disk_pools
+
+    def _get_virtual_disk_for(self, obj, raise_not_found=False):
+        disk_id = obj.get('provider_location')
+
+        virtual_disk = datacore_utils.get_first_or_default(
+            lambda disk: disk.Id == disk_id,
+            self._api.get_virtual_disks(),
+            None)
+        if not virtual_disk:
+            msg = (_("Virtual disk not found for %(object)s %(object_id)s.")
+                   % {'object': obj.__class__.__name__.lower(),
+                      'object_id': obj['id']})
+            if raise_not_found:
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+            else:
+                LOG.warning(msg)
+
+        return virtual_disk
+
+    def _set_virtual_disk_size(self, virtual_disk, new_size):
+        return self._api.set_virtual_disk_size(virtual_disk.Id, new_size)
+
+    def _get_storage_profile(self, profile_name, raise_not_found=False):
+        profiles = self._api.get_storage_profiles()
+        profile = datacore_utils.get_first_or_default(
+            lambda p: p.Caption.lower() == profile_name.lower(),
+            profiles,
+            None)
+        if not profile and raise_not_found:
+            msg = (_("Specified storage profile %s not found.")
+                   % profile_name)
+            LOG.error(msg)
+            raise cinder_exception.VolumeDriverException(message=msg)
+
+        return profile
+
+    def _get_storage_profile_id(self, profile_name):
+        profile_id = None
+        if profile_name:
+            profile = self._get_storage_profile(profile_name,
+                                                raise_not_found=True)
+            profile_id = profile.Id
+        return profile_id
+
+    def _await_virtual_disk_online(self, virtual_disk_id):
+        def inner(start_time):
+            disk_failed_delay = self.configuration.datacore_disk_failed_delay
+            virtual_disk = datacore_utils.get_first(
+                lambda disk: disk.Id == virtual_disk_id,
+                self._api.get_virtual_disks())
+            if virtual_disk.DiskStatus == 'Online':
+                raise loopingcall.LoopingCallDone(virtual_disk)
+            elif (virtual_disk.DiskStatus != 'FailedRedundancy'
+                  and time.time() - start_time >= disk_failed_delay):
+                msg = (_("Virtual disk %(disk)s did not come out of the "
+                         "%(state)s state after %(timeout)s seconds.")
+                       % {'disk': virtual_disk.Id,
+                          'state': virtual_disk.DiskStatus,
+                          'timeout': disk_failed_delay})
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+
+        inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time())
+        return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait()
+
+    def _create_volume_from(self, volume, src_obj):
+        src_virtual_disk = self._get_virtual_disk_for(src_obj,
+                                                      raise_not_found=True)
+
+        if src_virtual_disk.DiskStatus != 'Online':
+            LOG.warning("Attempting to create a volume from virtual disk "
+                        "%(disk)s that is in %(state)s state.",
+                        {'disk': src_virtual_disk.Id,
+                         'state': src_virtual_disk.DiskStatus})
+
+        volume_options = self._get_volume_options(volume)
+        profile_id = self._get_storage_profile_id(
+            volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
+        pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]
+
+        volume_virtual_disk = self._create_virtual_disk_copy(
+            src_virtual_disk,
+            volume['id'],
+            volume['display_name'],
+            profile_id=profile_id,
+            pool_names=pool_names)
+
+        volume_logical_disk = datacore_utils.get_first(
+            lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id,
+            self._api.get_logical_disks())
+
+        try:
+            volume_virtual_disk = self._set_virtual_disk_size(
+                volume_virtual_disk,
+                self._get_size_in_bytes(volume['size']))
+
+            disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
+            if disk_type == self.DATACORE_MIRRORED_DISK:
+                pools = self._get_available_disk_pools(pool_names)
+                selected_pool = datacore_utils.get_first_or_default(
+                    lambda pool: (
+                        pool.ServerId != volume_logical_disk.ServerHostId
+                        and pool.Id != volume_logical_disk.PoolId),
+                    pools,
+                    None)
+                if selected_pool:
+                    logical_disk = self._api.create_pool_logical_disk(
+                        selected_pool.Id,
+                        'Striped',
+                        volume_virtual_disk.Size.Value)
+                    self._api.bind_logical_disk(volume_virtual_disk.Id,
+                                                logical_disk.Id,
+                                                'Second',
+                                                True,
+                                                False,
+                                                True)
+                else:
+                    msg = _("Can not create mirrored virtual disk. "
+                            "Suitable disk pools not found.")
+                    LOG.error(msg)
+                    raise cinder_exception.VolumeDriverException(message=msg)
+
+            volume_virtual_disk = self._await_virtual_disk_online(
+                volume_virtual_disk.Id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception("Creation of volume %(volume)s failed.",
+                              {'volume': volume['id']})
+                try:
+                    self._api.delete_virtual_disk(volume_virtual_disk.Id, True)
+                except datacore_exception.DataCoreException as e:
+                    LOG.warning("An error occurred on a cleanup after failed "
+                                "creation of volume %(volume)s: %(error)s.",
+                                {'volume': volume['id'], 'error': e})
+
+        return {'provider_location': volume_virtual_disk.Id}
+
+    def _create_full_snapshot(self, description, name, pool_names, profile_id,
+                              src_virtual_disk):
+        pools = self._get_available_disk_pools(pool_names)
+        destination_pool = datacore_utils.get_first_or_default(
+            lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId
+                          or pool.ServerId == src_virtual_disk.SecondHostId),
+            pools,
+            None)
+
+        if not destination_pool:
+            msg = _("Suitable snapshot destination disk pool not found for "
+                    "virtual disk %s.") % src_virtual_disk.Id
+            LOG.error(msg)
+            raise cinder_exception.VolumeDriverException(message=msg)
+        server = datacore_utils.get_first(
+            lambda srv: srv.Id == destination_pool.ServerId,
+            self._api.get_servers())
+        if not server.SnapshotMapStorePoolId:
+            self._api.designate_map_store(destination_pool.Id)
+        snapshot = self._api.create_snapshot(src_virtual_disk.Id,
+                                             name,
+                                             description,
+                                             destination_pool.Id,
+                                             'Full',
+                                             False,
+                                             profile_id)
+        return snapshot
+
+    def _await_snapshot_migrated(self, snapshot_id):
+        def inner():
+            snapshot_data = datacore_utils.get_first(
+                lambda snapshot: snapshot.Id == snapshot_id,
+                self._api.get_snapshots())
+            if snapshot_data.State == 'Migrated':
+                raise loopingcall.LoopingCallDone(snapshot_data)
+            elif (snapshot_data.State != 'Healthy'
+                  and snapshot_data.Failure != 'NoFailure'):
+                msg = (_("Full migration of snapshot %(snapshot)s failed. "
+                         "Snapshot is in %(state)s state.")
+                       % {'snapshot': snapshot_data.Id,
+                          'state': snapshot_data.State})
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+
+        loop = loopingcall.FixedIntervalLoopingCall(inner)
+        return loop.start(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL,
+                          self.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY).wait()
+
+    def _create_virtual_disk_copy(self, src_virtual_disk, name, description,
+                                  profile_id=None, pool_names=None):
+        snapshot = self._create_full_snapshot(
+            description, name, pool_names, profile_id, src_virtual_disk)
+
+        try:
+            snapshot = self._await_snapshot_migrated(snapshot.Id)
+            self._api.delete_snapshot(snapshot.Id)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception("Split operation failed for snapshot "
+                              "%(snapshot)s.", {'snapshot': snapshot.Id})
+                try:
+                    logical_disk_copy = datacore_utils.get_first(
+                        lambda disk: (
+                            disk.Id == snapshot.DestinationLogicalDiskId),
+                        self._api.get_logical_disks())
+
+                    virtual_disk_copy = datacore_utils.get_first(
+                        lambda disk: (
+                            disk.Id == logical_disk_copy.VirtualDiskId),
+                        self._api.get_virtual_disks())
+
+                    self._api.delete_virtual_disk(virtual_disk_copy.Id, True)
+                except datacore_exception.DataCoreException as e:
+                    LOG.warning("An error occurred on a cleanup after failed "
+                                "split of snapshot %(snapshot)s: %(error)s.",
+                                {'snapshot': snapshot.Id, 'error': e})
+
+        logical_disk_copy = datacore_utils.get_first(
+            lambda disk: disk.Id == snapshot.DestinationLogicalDiskId,
+            self._api.get_logical_disks())
+
+        virtual_disk_copy = datacore_utils.get_first(
+            lambda disk: disk.Id == logical_disk_copy.VirtualDiskId,
+            self._api.get_virtual_disks())
+
+        return virtual_disk_copy
+
+    def _get_client(self, name, create_new=False):
+        client_hosts = self._api.get_clients()
+
+        client = datacore_utils.get_first_or_default(
+            lambda host: host.HostName == name, client_hosts, None)
+
+        if create_new:
+            if not client:
+                client = self._api.register_client(
+                    name, None, 'Other', 'PreferredServer', None)
+            self._api.set_client_capabilities(client.Id, True, True)
+
+        return client
+
+    @staticmethod
+    def _is_pool_healthy(pool, pool_performance, online_servers):
+        if (pool.PoolStatus == 'Running'
+                and hasattr(pool_performance[pool.Id], 'BytesAvailable')
+                and pool.ServerId in online_servers):
+            return True
+        return False
+
+    @staticmethod
+    def _get_size_in_bytes(size_in_gigabytes):
+        return size_in_gigabytes * units.Gi
+
+    @staticmethod
+    def _get_size_in_gigabytes(size_in_bytes):
+        return size_in_bytes / float(units.Gi)
diff --git a/cinder/volume/drivers/datacore/exception.py b/cinder/volume/drivers/datacore/exception.py
new file mode 100644
index 00000000000..1fab1050e38
--- /dev/null
+++ b/cinder/volume/drivers/datacore/exception.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Exception definitions."""
+
+from cinder import exception
+from cinder.i18n import _
+
+
+class DataCoreException(exception.VolumeBackendAPIException):
+    """Base DataCore Exception."""
+
+    message = _('DataCore exception.')
+
+
+class DataCoreConnectionException(DataCoreException):
+    """Thrown when there are connection problems during a DataCore API call."""
+
+    message = _('Failed to connect to DataCore Server Group: %(reason)s.')
+
+
+class DataCoreFaultException(DataCoreException):
+    """Thrown when there are faults during a DataCore API call."""
+
+    message = _('DataCore Server Group reported an error: %(reason)s.')
diff --git a/cinder/volume/drivers/datacore/fc.py b/cinder/volume/drivers/datacore/fc.py
new file mode 100644
index 00000000000..7524bf0c89b
--- /dev/null
+++ b/cinder/volume/drivers/datacore/fc.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Fibre Channel Driver for DataCore SANsymphony storage array."""
+
+from oslo_log import log as logging
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.datacore import driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+
+
+LOG = logging.getLogger(__name__)
+
+
+@interface.volumedriver
+class FibreChannelVolumeDriver(driver.DataCoreVolumeDriver):
+    """DataCore SANsymphony Fibre Channel volume driver.
+
+    Version history:
+
+    .. code-block:: none
+
+        1.0.0 - Initial driver
+
+    """
+
+    VERSION = '1.0.0'
+    STORAGE_PROTOCOL = 'FC'
+    CI_WIKI_NAME = 'DataCore_CI'
+
+    def __init__(self, *args, **kwargs):
+        super(FibreChannelVolumeDriver, self).__init__(*args, **kwargs)
+
+    def validate_connector(self, connector):
+        """Fail if connector doesn't contain all the data needed by the driver.
+
+        :param connector: Connector information
+        """
+
+        required_data = ['host', 'wwpns']
+        for required in required_data:
+            if required not in connector:
+                LOG.error("The volume driver requires %(data)s "
+                          "in the connector.", {'data': required})
+                raise cinder_exception.InvalidConnectorException(
+                    missing=required)
+
+    def initialize_connection(self, volume, connector):
+        """Allow connection to connector and return connection info.
+
+        :param volume: Volume object
+        :param connector: Connector information
+        :return: Connection information
+        """
+
+        LOG.debug("Initialize connection for volume %(volume)s for "
+                  "connector %(connector)s.",
+                  {'volume': volume['id'], 'connector': connector})
+
+        virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+
+        if virtual_disk.DiskStatus != 'Online':
+            LOG.warning("Attempting to attach virtual disk %(disk)s "
+                        "that is in %(state)s state.",
+                        {'disk': virtual_disk.Id,
+                         'state': virtual_disk.DiskStatus})
+
+        serve_result = self._serve_virtual_disk(connector, virtual_disk.Id)
+
+        online_servers = [server.Id for server in self._get_online_servers()]
+        online_ports = self._get_online_ports(online_servers)
+        online_devices = self._get_online_devices(online_ports)
+        online_units = [unit for unit in serve_result[1]
+                        if unit.VirtualTargetDeviceId in online_devices]
+
+        if not online_units:
+            msg = (_("Volume %(volume)s can not be attached "
+                     "to connector %(connector)s due to backend state.")
+                   % {'volume': volume['id'], 'connector': connector})
+            LOG.error(msg)
+            try:
+                self._api.unserve_virtual_disks_from_host(serve_result[0].Id,
+                                                          [virtual_disk.Id])
+            except datacore_exception.DataCoreException as e:
+                LOG.warning("An error occurred on a cleanup after failed "
+                            "attaching of volume %(volume)s to connector "
+                            "%(connector)s: %(error)s.",
+                            {'volume': volume['id'],
+                             'connector': connector,
+                             'error': e})
+            raise cinder_exception.VolumeDriverException(message=msg)
+
+        target_device = online_devices[online_units[0].VirtualTargetDeviceId]
+        target_port = online_ports[target_device.TargetPortId]
+
+        connection_data = {
+            'target_discovered': False,
+            'target_lun': online_units[0].Lun.Quad,
+            'target_wwn': target_port.PortName.replace('-', '').lower(),
+            'volume_id': volume['id'],
+            'access_mode': 'rw',
+        }
+
+        LOG.debug("Connection data: %s", connection_data)
+
+        return {
+            'driver_volume_type': 'fibre_channel',
+            'data': connection_data,
+        }
+
+    def _serve_virtual_disk(self, connector, virtual_disk_id):
+        server_group = self._get_our_server_group()
+
+        @cinder_utils.synchronized(
+            'datacore-backend-%s' % server_group.Id, external=True)
+        def serve_virtual_disk():
+            connector_wwpns = list(wwpn.replace('-', '').lower()
+                                   for wwpn in connector['wwpns'])
+
+            client = self._get_client(connector['host'], create_new=True)
+
+            available_ports = self._api.get_ports()
+
+            initiators = []
+            for port in available_ports:
+                port_name = port.PortName.replace('-', '').lower()
+                if (port.PortType == 'FibreChannel'
+                        and port.PortMode == 'Initiator'
+                        and port_name in connector_wwpns):
+                    initiators.append(port)
+            if not initiators:
+                msg = _("Fibre Channel ports not found for "
+                        "connector: %s") % connector
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+            else:
+                for initiator in initiators:
+                    if initiator.HostId != client.Id:
+                        try:
+                            self._api.assign_port(client.Id, initiator.Id)
+                        except datacore_exception.DataCoreException as e:
+                            LOG.info("Assigning initiator port %(initiator)s "
+                                     "to client %(client)s failed with "
+                                     "error: %(error)s",
+                                     {'initiator': initiator.Id,
+                                      'client': client.Id,
+                                      'error': e})
+
+            virtual_logical_units = self._api.serve_virtual_disks_to_host(
+                client.Id, [virtual_disk_id])
+
+            return client, virtual_logical_units
+
+        return serve_virtual_disk()
+
+    def _get_online_ports(self, online_servers):
+        ports = self._api.get_ports()
+        online_ports = {port.Id: port for port in ports
+                        if port.HostId in online_servers}
+
+        return online_ports
+
+    def _get_online_devices(self, online_ports):
+        devices = self._api.get_target_devices()
+        online_devices = {device.Id: device for device in devices
+                          if device.TargetPortId in online_ports}
+
+        return online_devices
diff --git a/cinder/volume/drivers/datacore/iscsi.py b/cinder/volume/drivers/datacore/iscsi.py
new file mode 100644
index 00000000000..8ae5abf3c94
--- /dev/null
+++ b/cinder/volume/drivers/datacore/iscsi.py
@@ -0,0 +1,440 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""iSCSI Driver for DataCore SANsymphony storage array."""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder import interface
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.datacore import driver
+from cinder.volume.drivers.datacore import exception as datacore_exception
+from cinder.volume.drivers.datacore import passwd
+from cinder.volume.drivers.datacore import utils as datacore_utils
+from cinder.volume import utils as volume_utils
+
+
+LOG = logging.getLogger(__name__)
+
+datacore_iscsi_opts = [
+    cfg.ListOpt('datacore_iscsi_unallowed_targets',
+                default=[],
+                help='List of iSCSI targets that cannot be used to attach '
+                     'volume. To prevent the DataCore iSCSI volume driver '
+                     'from using some front-end targets in volume attachment, '
+                     'specify this option and list the iqn and target machine '
+                     'for each target as the value, such as '
+                     '<iqn:target name>, <iqn:target name>, '
+                     '<iqn:target name>.'),
+    cfg.BoolOpt('datacore_iscsi_chap_enabled',
+                default=False,
+                help='Configure CHAP authentication for iSCSI connections.'),
+    cfg.StrOpt('datacore_iscsi_chap_storage',
+               default=None,
+               help='iSCSI CHAP authentication password storage file.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(datacore_iscsi_opts)
+
+
+@interface.volumedriver
+class ISCSIVolumeDriver(driver.DataCoreVolumeDriver):
+    """DataCore SANsymphony iSCSI volume driver.
+
+    Version history:
+
+    .. code-block:: none
+
+        1.0.0 - Initial driver
+
+    """
+
+    VERSION = '1.0.0'
+    STORAGE_PROTOCOL = 'iSCSI'
+    CI_WIKI_NAME = 'DataCore_CI'
+
+    def __init__(self, *args, **kwargs):
+        super(ISCSIVolumeDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(datacore_iscsi_opts)
+        self._password_storage = None
+
+    def do_setup(self, context):
+        """Perform validations and establish connection to server.
+
+        :param context: Context information
+        """
+
+        super(ISCSIVolumeDriver, self).do_setup(context)
+
+        password_storage_path = getattr(self.configuration,
+                                        'datacore_iscsi_chap_storage', None)
+        if (self.configuration.datacore_iscsi_chap_enabled
+                and not password_storage_path):
+            raise cinder_exception.InvalidInput(
+                _("datacore_iscsi_chap_storage not set."))
+        elif password_storage_path:
+            self._password_storage = passwd.PasswordFileStorage(
+                self.configuration.datacore_iscsi_chap_storage)
+
+    def validate_connector(self, connector):
+        """Fail if connector doesn't contain all the data needed by the driver.
+
+        :param connector: Connector information
+        """
+
+        required_data = ['host', 'initiator']
+        for required in required_data:
+            if required not in connector:
+                LOG.error("The volume driver requires %(data)s "
+                          "in the connector.", {'data': required})
+                raise cinder_exception.InvalidConnectorException(
+                    missing=required)
+
+    def initialize_connection(self, volume, connector):
+        """Allow connection to connector and return connection info.
+
+        :param volume: Volume object
+        :param connector: Connector information
+        :return: Connection information
+        """
+
+        LOG.debug("Initialize connection for volume %(volume)s for "
+                  "connector %(connector)s.",
+                  {'volume': volume['id'], 'connector': connector})
+
+        virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True)
+
+        if virtual_disk.DiskStatus != 'Online':
+            LOG.warning("Attempting to attach virtual disk %(disk)s "
+                        "that is in %(state)s state.",
+                        {'disk': virtual_disk.Id,
+                         'state': virtual_disk.DiskStatus})
+
+        server_group = self._get_our_server_group()
+
+        @cinder_utils.synchronized(
+            'datacore-backend-%s' % server_group.Id, external=True)
+        def serve_virtual_disk():
+            available_ports = self._api.get_ports()
+
+            iscsi_initiator = self._get_initiator(connector['host'],
+                                                  connector['initiator'],
+                                                  available_ports)
+
+            iscsi_targets = self._get_targets(virtual_disk, available_ports)
+
+            if not iscsi_targets:
+                msg = (_("Suitable targets not found for "
+                         "virtual disk %(disk)s for volume %(volume)s.")
+                       % {'disk': virtual_disk.Id, 'volume': volume['id']})
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+
+            auth_params = self._setup_iscsi_chap_authentication(
+                iscsi_targets, iscsi_initiator)
+
+            virtual_logical_units = self._map_virtual_disk(
+                virtual_disk, iscsi_targets, iscsi_initiator)
+
+            return iscsi_targets, virtual_logical_units, auth_params
+
+        targets, logical_units, chap_params = serve_virtual_disk()
+
+        target_portal = datacore_utils.build_network_address(
+            targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[0]
+            .Address.Address,
+            targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[0]
+            .TcpPort)
+
+        connection_data = {}
+
+        if chap_params:
+            connection_data['auth_method'] = 'CHAP'
+            connection_data['auth_username'] = chap_params[0]
+            connection_data['auth_password'] = chap_params[1]
+
+        connection_data['target_discovered'] = False
+        connection_data['target_iqn'] = targets[0].PortName
+        connection_data['target_portal'] = target_portal
+        connection_data['target_lun'] = logical_units[targets[0]].Lun.Quad
+        connection_data['volume_id'] = volume['id']
+        connection_data['access_mode'] = 'rw'
+
+        LOG.debug("Connection data: %s", connection_data)
+
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': connection_data,
+        }
+
+    def _map_virtual_disk(self, virtual_disk, targets, initiator):
+        logical_disks = self._api.get_logical_disks()
+
+        logical_units = {}
+        created_mapping = {}
+        created_devices = []
+        created_domains = []
+        try:
+            for target in targets:
+                target_domain = self._get_target_domain(target, initiator)
+                if not target_domain:
+                    target_domain = self._api.create_target_domain(
+                        initiator.HostId, target.HostId)
+                    created_domains.append(target_domain)
+
+                nexus = self._api.build_scsi_port_nexus_data(
+                    initiator.Id, target.Id)
+
+                target_device = self._get_target_device(
+                    target_domain, target, initiator)
+                if not target_device:
+                    target_device = self._api.create_target_device(
+                        target_domain.Id, nexus)
+                    created_devices.append(target_device)
+
+                logical_disk = self._get_logical_disk_on_host(
+                    virtual_disk.Id, target.HostId, logical_disks)
+
+                logical_unit = self._get_logical_unit(
+                    logical_disk, target_device)
+                if not logical_unit:
+                    logical_unit = self._create_logical_unit(
+                        logical_disk, nexus, target_device)
+                    created_mapping[logical_unit] = target_device
+                logical_units[target] = logical_unit
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception("Mapping operation for virtual disk %(disk)s "
+                              "failed with error.",
+                              {'disk': virtual_disk.Id})
+                try:
+                    for logical_unit in created_mapping:
+                        nexus = self._api.build_scsi_port_nexus_data(
+                            created_mapping[logical_unit].InitiatorPortId,
+                            created_mapping[logical_unit].TargetPortId)
+                        self._api.unmap_logical_disk(
+                            logical_unit.LogicalDiskId, nexus)
+                    for target_device in created_devices:
+                        self._api.delete_target_device(target_device.Id)
+                    for target_domain in created_domains:
+                        self._api.delete_target_domain(target_domain.Id)
+                except datacore_exception.DataCoreException as e:
+                    LOG.warning("An error occurred on a cleanup after "
+                                "failed mapping operation: %s.", e)
+
+        return logical_units
+
+    def _get_target_domain(self, target, initiator):
+        target_domains = self._api.get_target_domains()
+        target_domain = datacore_utils.get_first_or_default(
+            lambda domain: (domain.InitiatorHostId == initiator.HostId
+                            and domain.TargetHostId == target.HostId),
+            target_domains,
+            None)
+        return target_domain
+
+    def _get_target_device(self, target_domain, target, initiator):
+        target_devices = self._api.get_target_devices()
+        target_device = datacore_utils.get_first_or_default(
+            lambda device: (device.TargetDomainId == target_domain.Id
+                            and device.InitiatorPortId == initiator.Id
+                            and device.TargetPortId == target.Id),
+            target_devices,
+            None)
+        return target_device
+
+    def _get_logical_unit(self, logical_disk, target_device):
+        logical_units = self._api.get_logical_units()
+        logical_unit = datacore_utils.get_first_or_default(
+            lambda unit: (unit.LogicalDiskId == logical_disk.Id
+                          and unit.VirtualTargetDeviceId == target_device.Id),
+            logical_units,
+            None)
+        return logical_unit
+
+    def _create_logical_unit(self, logical_disk, nexus, target_device):
+        free_lun = self._api.get_next_free_lun(target_device.Id)
+        logical_unit = self._api.map_logical_disk(logical_disk.Id,
+                                                  nexus,
+                                                  free_lun,
+                                                  logical_disk.ServerHostId,
+                                                  'Client')
+        return logical_unit
+
+    def _check_iscsi_chap_configuration(self, iscsi_chap_enabled, targets):
+        logical_units = self._api.get_logical_units()
+        target_devices = self._api.get_target_devices()
+
+        for logical_unit in logical_units:
+            target_device_id = logical_unit.VirtualTargetDeviceId
+            target_device = datacore_utils.get_first(
+                lambda device, key=target_device_id: device.Id == key,
+                target_devices)
+            target_port_id = target_device.TargetPortId
+            target = datacore_utils.get_first_or_default(
+                lambda target_port, key=target_port_id: target_port.Id == key,
+                targets,
+                None)
+            if (target and iscsi_chap_enabled ==
+                    (target.ServerPortProperties.Authentication == 'None')):
+                msg = _("iSCSI CHAP authentication can't be configured for "
+                        "target %s. Device exists that served through "
+                        "this target.") % target.PortName
+                LOG.error(msg)
+                raise cinder_exception.VolumeDriverException(message=msg)
+
+    def _setup_iscsi_chap_authentication(self, targets, initiator):
+        iscsi_chap_enabled = self.configuration.datacore_iscsi_chap_enabled
+
+        self._check_iscsi_chap_configuration(iscsi_chap_enabled, targets)
+
+        server_group = self._get_our_server_group()
+        update_access_token = False
+        access_token = None
+        chap_secret = None
+        if iscsi_chap_enabled:
+            authentication = 'CHAP'
+            chap_secret = self._password_storage.get_password(
+                server_group.Id, initiator.PortName)
+            update_access_token = False
+            if not chap_secret:
+                chap_secret = volume_utils.generate_password(length=15)
+                self._password_storage.set_password(
+                    server_group.Id, initiator.PortName, chap_secret)
+                update_access_token = True
+            access_token = self._api.build_access_token(
+                initiator.PortName,
+                None,
+                None,
+                False,
+                initiator.PortName,
+                chap_secret)
+        else:
+            authentication = 'None'
+            if self._password_storage:
+                self._password_storage.delete_password(server_group.Id,
+                                                       initiator.PortName)
+        changed_targets = {}
+        try:
+            for target in targets:
+                if iscsi_chap_enabled:
+                    target_iscsi_nodes = getattr(target.iSCSINodes, 'Node', [])
+                    iscsi_node = datacore_utils.get_first_or_default(
+                        lambda node: node.Name == initiator.PortName,
+                        target_iscsi_nodes,
+                        None)
+                    if (not iscsi_node
+                            or not iscsi_node.AccessToken.TargetUsername
+                            or update_access_token):
+                        self._api.set_access_token(target.Id, access_token)
+                properties = target.ServerPortProperties
+                if properties.Authentication != authentication:
+                    changed_targets[target] = properties.Authentication
+                    properties.Authentication = authentication
+                    self._api.set_server_port_properties(
+                        target.Id, properties)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception("Configuring of iSCSI CHAP authentication for "
+                              "initiator %(initiator)s failed.",
+                              {'initiator': initiator.PortName})
+                try:
+                    for target in changed_targets:
+                        properties = target.ServerPortProperties
+                        properties.Authentication = changed_targets[target]
+                        self._api.set_server_port_properties(
+                            target.Id, properties)
+                except datacore_exception.DataCoreException as e:
+                    LOG.warning("An error occurred on a cleanup after  failed "
+                                "configuration of iSCSI CHAP authentication "
+                                "on initiator %(initiator)s: %(error)s.",
+                                {'initiator': initiator.PortName, 'error': e})
+        if iscsi_chap_enabled:
+            return initiator.PortName, chap_secret
+
+    def _get_initiator(self, host, iqn, available_ports):
+        client = self._get_client(host, create_new=True)
+
+        iscsi_initiator_ports = self._get_host_iscsi_initiator_ports(
+            client, available_ports)
+
+        iscsi_initiator = datacore_utils.get_first_or_default(
+            lambda port: port.PortName == iqn,
+            iscsi_initiator_ports,
+            None)
+
+        if not iscsi_initiator:
+            scsi_port_data = self._api.build_scsi_port_data(
+                client.Id, iqn, 'Initiator', 'iSCSI')
+            iscsi_initiator = self._api.register_port(scsi_port_data)
+        return iscsi_initiator
+
+    def _get_targets(self, virtual_disk, available_ports):
+        unallowed_targets = self.configuration.datacore_iscsi_unallowed_targets
+        iscsi_target_ports = self._get_frontend_iscsi_target_ports(
+            available_ports)
+        server_port_map = {}
+        for target_port in iscsi_target_ports:
+            if target_port.HostId in server_port_map:
+                server_port_map[target_port.HostId].append(target_port)
+            else:
+                server_port_map[target_port.HostId] = [target_port]
+        iscsi_targets = []
+        if virtual_disk.FirstHostId in server_port_map:
+            iscsi_targets += server_port_map[virtual_disk.FirstHostId]
+        if virtual_disk.SecondHostId in server_port_map:
+            iscsi_targets += server_port_map[virtual_disk.SecondHostId]
+        iscsi_targets = [target for target in iscsi_targets
+                         if target.PortName not in unallowed_targets]
+        return iscsi_targets
+
+    @staticmethod
+    def _get_logical_disk_on_host(virtual_disk_id,
+                                  host_id, logical_disks):
+        logical_disk = datacore_utils.get_first(
+            lambda disk: (disk.ServerHostId == host_id
+                          and disk.VirtualDiskId == virtual_disk_id),
+            logical_disks)
+        return logical_disk
+
+    @staticmethod
+    def _is_iscsi_frontend_port(port):
+        if (port.PortType == 'iSCSI'
+                and port.PortMode == 'Target'
+                and port.HostId
+                and port.PresenceStatus == 'Present'
+                and hasattr(port, 'IScsiPortStateInfo')):
+            port_roles = port.ServerPortProperties.Role.split()
+            port_state = (port.IScsiPortStateInfo.PortalsState
+                          .PortalStateInfo[0].State)
+            if 'Frontend' in port_roles and port_state == 'Ready':
+                return True
+        return False
+
+    @staticmethod
+    def _get_frontend_iscsi_target_ports(ports):
+        return [target_port for target_port in ports
+                if ISCSIVolumeDriver._is_iscsi_frontend_port(target_port)]
+
+    @staticmethod
+    def _get_host_iscsi_initiator_ports(host, ports):
+        return [port for port in ports
+                if port.PortType == 'iSCSI'
+                and port.PortMode == 'Initiator'
+                and port.HostId == host.Id]
diff --git a/cinder/volume/drivers/datacore/passwd.py b/cinder/volume/drivers/datacore/passwd.py
new file mode 100644
index 00000000000..96b4faca281
--- /dev/null
+++ b/cinder/volume/drivers/datacore/passwd.py
@@ -0,0 +1,166 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Password storage."""
+
+import contextlib
+import json
+import os
+import stat
+
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder import utils as cinder_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class FileStorage(object):
+    """Represents a file as a dictionary."""
+
+    def __init__(self, file_path):
+        self._file_path = file_path
+        self._file = None
+        self._is_open = False
+
+    def open(self):
+        """Open a file for simultaneous reading and writing.
+
+        If the specified file does not exist, it will be created
+        with the 0600 access permissions for the current user, if needed
+        the appropriate directories will be created with the 0750 access
+        permissions for the current user.
+        """
+
+        file_dir = os.path.dirname(self._file_path)
+        if file_dir and not os.path.isdir(file_dir):
+            os.makedirs(file_dir)
+            os.chmod(file_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
+        if not os.path.isfile(self._file_path):
+            open(self._file_path, 'w').close()
+            os.chmod(self._file_path, stat.S_IRUSR | stat.S_IWUSR)
+
+        if self._file:
+            self.close()
+        self._file = open(self._file_path, 'r+')
+        return self
+
+    def load(self):
+        """Reads the file and returns corresponded dictionary object.
+
+        :return: The dictionary that represents the file content.
+        """
+
+        storage = {}
+        if os.stat(self._file_path).st_size != 0:
+            storage = json.load(self._file)
+            if not isinstance(storage, dict):
+                msg = _('File %s has a malformed format.') % self._file_path
+                raise ValueError(msg)
+        return storage
+
+    def save(self, storage):
+        """Writes the specified dictionary to the file.
+
+        :param storage: Dictionary that should be written to the file.
+        """
+
+        if not isinstance(storage, dict):
+            msg = _('%s is not a dict.') % repr(storage)
+            raise TypeError(msg)
+
+        self._file.seek(0)
+        self._file.truncate()
+        json.dump(storage, self._file)
+
+    def close(self):
+        """Close the file."""
+
+        if self._file:
+            self._file.close()
+        self._file = None
+
+
+class PasswordFileStorage(object):
+    """Password storage implementation.
+
+    It stores passwords in a file in a clear text. The password file must be
+    secured by setting up file permissions.
+    """
+
+    def __init__(self, file_path):
+        self._file_path = file_path
+        self._file_storage = FileStorage(file_path)
+
+    def set_password(self, resource, username, password):
+        """Store the credential for the resource.
+
+        :param resource: Resource name for which credential will be stored
+        :param username: User name
+        :param password: Password
+        """
+
+        @cinder_utils.synchronized(
+            'datacore-password_storage-' + self._file_path, external=True)
+        def _set_password():
+            with contextlib.closing(self._file_storage.open()) as storage:
+                passwords = storage.load()
+                if resource not in passwords:
+                    passwords[resource] = {}
+                passwords[resource][username] = password
+                storage.save(passwords)
+
+        _set_password()
+
+    def get_password(self, resource, username):
+        """Returns the stored password for the resource.
+
+        If the password does not exist, it will return None
+
+        :param resource: Resource name for which credential was stored
+        :param username: User name
+        :return password: Password
+        """
+
+        @cinder_utils.synchronized(
+            'datacore-password_storage-' + self._file_path, external=True)
+        def _get_password():
+            with contextlib.closing(self._file_storage.open()) as storage:
+                passwords = storage.load()
+            if resource in passwords:
+                return passwords[resource].get(username)
+
+        return _get_password()
+
+    def delete_password(self, resource, username):
+        """Delete the stored credential for the resource.
+
+        :param resource: Resource name for which credential was stored
+        :param username: User name
+        """
+
+        @cinder_utils.synchronized(
+            'datacore-password_storage-' + self._file_path, external=True)
+        def _delete_password():
+            with contextlib.closing(self._file_storage.open()) as storage:
+                passwords = storage.load()
+                if resource in passwords and username in passwords[resource]:
+                    del passwords[resource][username]
+                    if not passwords[resource].keys():
+                        del passwords[resource]
+                    storage.save(passwords)
+
+        _delete_password()
diff --git a/cinder/volume/drivers/datacore/utils.py b/cinder/volume/drivers/datacore/utils.py
new file mode 100644
index 00000000000..5b82112f904
--- /dev/null
+++ b/cinder/volume/drivers/datacore/utils.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Utilities and helper functions."""
+
+from oslo_utils import netutils
+import six
+
+
+def build_network_address(host, port):
+    """Combines the specified host name or IP address with the specified port.
+
+    :param host: Host name or IP address in presentation (string) format
+    :param port: Port number
+    :return: The host name or IP address and port combination;
+             IPv6 addresses are enclosed in the square brackets
+    """
+    if netutils.is_valid_ipv6(host):
+        return '[%s]:%s' % (host, port)
+    else:
+        return '%s:%s' % (host, port)
+
+
+def get_first(predicate, source):
+    """Searches for an item that matches the conditions.
+
+    :param predicate: Defines the conditions of the item to search for
+    :param source: Iterable collection of items
+    :return: The first item that matches the conditions defined by the
+             specified predicate, if found; otherwise StopIteration is raised
+    """
+
+    return six.next(item for item in source if predicate(item))
+
+
+def get_first_or_default(predicate, source, default):
+    """Searches for an item that matches the conditions.
+
+    :param predicate: Defines the conditions of the item to search for
+    :param source: Iterable collection of items
+    :param default: Value that is returned if the iterator is exhausted
+    :return: The first item that matches the conditions defined by the
+             specified predicate, if found; otherwise the default value
+    """
+
+    try:
+        return get_first(predicate, source)
+    except StopIteration:
+        return default
+
+
+def get_distinct_by(key, source):
+    """Finds distinct items for the key and returns the result in a list.
+
+    :param key: Function computing a key value for each item
+    :param source: Iterable collection of items
+    :return: The list of distinct by the key value items
+    """
+
+    seen_keys = set()
+    return [item for item in source
+            if key(item) not in seen_keys and not seen_keys.add(key(item))]
diff --git a/driver-requirements.txt b/driver-requirements.txt
index 85c13fce6cd..20a4f4c77fd 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -43,3 +43,5 @@ capacity # BSD
 infi.dtypes.wwn # PSF
 infi.dtypes.iqn # PSF
 
+# DataCore SANsymphony
+websocket-client>=0.32.0 # LGPLv2+
diff --git a/releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml b/releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml
new file mode 100644
index 00000000000..810d04fdcff
--- /dev/null
+++ b/releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - Added iSCSI and Fibre Channel volume drivers for DataCore’s
+    SANsymphony and Hyper-converged Virtual SAN storage.