From 572b84c073ef66d976d4530d9b7f07f518695ab0 Mon Sep 17 00:00:00 2001
From: Peter Wang <peter.wang13@emc.com>
Date: Wed, 4 May 2016 18:13:10 +0800
Subject: [PATCH] VNX: New Cinder driver in Newton

Previous VNX driver implemented all array operations
in driver, which complicated the drive logic.
In this patch, we leverage a library named
storops to interact with VNX array.
New changes below:

* Consolidate VNX driver entry, both
FC and iSCSI driver use the same entry name:
volume_driver =
cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver

* A new driver option is introduced:
storage_protocol = <fc|iscsi>

* some existing bugs no longer existed.
Co-authored-By: Tang Tina <tina.tang@emc.com>
Co-authored-By: Liang Ryan <ryan.liang@emc.com>

DocImpact
Implements: blueprint new-vnx-driver
Closes-bug: #1569245
Closes-bug: #1524160
Closes-bug: #1589338
Change-Id: I9f31db708b022b16debaa4f6c5a87d95e5ac2a4f
---
 cinder/opts.py                                |    6 +-
 cinder/tests/unit/test_emc_vnx.py             | 6822 -----------------
 .../unit/volume/drivers/emc/vnx/__init__.py   |   27 +
 .../unit/volume/drivers/emc/vnx/fake_enum.py  |  119 +
 .../volume/drivers/emc/vnx/fake_exception.py  |  172 +
 .../volume/drivers/emc/vnx/fake_storops.py    |   76 +
 .../volume/drivers/emc/vnx/mocked_cinder.yaml |  442 ++
 .../volume/drivers/emc/vnx/mocked_vnx.yaml    | 2017 +++++
 .../unit/volume/drivers/emc/vnx/res_mock.py   |  441 ++
 .../volume/drivers/emc/vnx/test_adapter.py    | 1304 ++++
 .../volume/drivers/emc/vnx/test_client.py     |  463 ++
 .../volume/drivers/emc/vnx/test_common.py     |  297 +
 .../volume/drivers/emc/vnx/test_driver.py     |   71 +
 .../volume/drivers/emc/vnx/test_res_mock.py   |   90 +
 .../volume/drivers/emc/vnx/test_res_mock.yaml |   59 +
 .../volume/drivers/emc/vnx/test_taskflows.py  |  181 +
 .../unit/volume/drivers/emc/vnx/test_utils.py |  177 +
 .../unit/volume/drivers/emc/vnx/utils.py      |   93 +
 cinder/volume/driver.py                       |    6 +
 cinder/volume/drivers/emc/emc_cli_iscsi.py    |  296 -
 cinder/volume/drivers/emc/emc_vnx_cli.py      | 5054 ------------
 cinder/volume/drivers/emc/vnx/__init__.py     |    0
 cinder/volume/drivers/emc/vnx/adapter.py      | 1466 ++++
 cinder/volume/drivers/emc/vnx/client.py       |  552 ++
 cinder/volume/drivers/emc/vnx/common.py       |  483 ++
 cinder/volume/drivers/emc/vnx/const.py        |   39 +
 .../emc/{emc_cli_fc.py => vnx/driver.py}      |  192 +-
 cinder/volume/drivers/emc/vnx/taskflows.py    |  579 ++
 cinder/volume/drivers/emc/vnx/utils.py        |  339 +
 .../vnx-new-driver-7e96934c2d3a6edc.yaml      |   16 +
 30 files changed, 9612 insertions(+), 12267 deletions(-)
 delete mode 100644 cinder/tests/unit/test_emc_vnx.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/__init__.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/fake_enum.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/fake_exception.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/fake_storops.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/mocked_cinder.yaml
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/mocked_vnx.yaml
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_client.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_common.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.yaml
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py
 create mode 100644 cinder/tests/unit/volume/drivers/emc/vnx/utils.py
 delete mode 100644 cinder/volume/drivers/emc/emc_cli_iscsi.py
 delete mode 100644 cinder/volume/drivers/emc/emc_vnx_cli.py
 create mode 100644 cinder/volume/drivers/emc/vnx/__init__.py
 create mode 100644 cinder/volume/drivers/emc/vnx/adapter.py
 create mode 100644 cinder/volume/drivers/emc/vnx/client.py
 create mode 100644 cinder/volume/drivers/emc/vnx/common.py
 create mode 100644 cinder/volume/drivers/emc/vnx/const.py
 rename cinder/volume/drivers/emc/{emc_cli_fc.py => vnx/driver.py} (63%)
 create mode 100644 cinder/volume/drivers/emc/vnx/taskflows.py
 create mode 100644 cinder/volume/drivers/emc/vnx/utils.py
 create mode 100644 releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml

diff --git a/cinder/opts.py b/cinder/opts.py
index 6c175366109..8fbd98a2d85 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -78,10 +78,10 @@ from cinder.volume.drivers import drbdmanagedrv as \
     cinder_volume_drivers_drbdmanagedrv
 from cinder.volume.drivers.emc import emc_vmax_common as \
     cinder_volume_drivers_emc_emcvmaxcommon
-from cinder.volume.drivers.emc import emc_vnx_cli as \
-    cinder_volume_drivers_emc_emcvnxcli
 from cinder.volume.drivers.emc import scaleio as \
     cinder_volume_drivers_emc_scaleio
+from cinder.volume.drivers.emc.vnx import common as \
+    cinder_volume_drivers_emc_vnx_common
 from cinder.volume.drivers.emc import xtremio as \
     cinder_volume_drivers_emc_xtremio
 from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx
@@ -253,7 +253,6 @@ def list_opts():
                 cinder_context.context_opts,
                 cinder_scheduler_driver.scheduler_driver_opts,
                 cinder_volume_drivers_scality.volume_opts,
-                cinder_volume_drivers_emc_emcvnxcli.loc_opts,
                 cinder_volume_drivers_vmware_vmdk.vmdk_opts,
                 cinder_volume_drivers_lenovo_lenovocommon.common_opts,
                 cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts,
@@ -275,6 +274,7 @@ def list_opts():
                 cinder_api_views_versions.versions_opts,
                 cinder_volume_drivers_nimble.nimble_opts,
                 cinder_volume_drivers_windows_windows.windows_opts,
+                cinder_volume_drivers_emc_vnx_common.EMC_VNX_OPTS,
                 cinder_volume_drivers_san_hp_hpmsacommon.common_opts,
                 cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts,
                 cinder_image_glance.glance_opts,
diff --git a/cinder/tests/unit/test_emc_vnx.py b/cinder/tests/unit/test_emc_vnx.py
deleted file mode 100644
index fa8653dc1fa..00000000000
--- a/cinder/tests/unit/test_emc_vnx.py
+++ /dev/null
@@ -1,6822 +0,0 @@
-# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-import ddt
-import json
-import os
-import re
-
-import eventlet
-import mock
-from oslo_concurrency import processutils
-import six
-
-from cinder import context
-from cinder import exception
-from cinder.objects import fields
-from cinder import test
-from cinder.tests.unit.consistencygroup import fake_cgsnapshot
-from cinder.tests.unit.consistencygroup import fake_consistencygroup
-from cinder.tests.unit import fake_constants as fake
-from cinder.tests.unit import fake_snapshot
-from cinder.tests.unit import fake_volume
-from cinder.tests.unit import utils
-from cinder.volume import configuration as conf
-from cinder.volume.drivers.emc import emc_cli_fc
-from cinder.volume.drivers.emc import emc_cli_iscsi
-from cinder.volume.drivers.emc import emc_vnx_cli
-from cinder.zonemanager import fc_san_lookup_service as fc_service
-
-from mock import patch
-
-
-SUCCEED = ("", 0)
-FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
-VERSION = emc_vnx_cli.EMCVnxCliBase.VERSION
-build_replication_data = (
-    emc_vnx_cli.EMCVnxCliBase._build_replication_driver_data)
-REPLICATION_KEYS = emc_vnx_cli.EMCVnxCliBase.REPLICATION_KEYS
-
-
-def build_provider_location(lun_id, lun_type, base_lun_name=None, system=None):
-    pl_dict = {'system': 'FNM11111' if system is None else system,
-               'type': six.text_type(lun_type),
-               'id': six.text_type(lun_id),
-               'base_lun_name': six.text_type(base_lun_name),
-               'version': VERSION}
-    return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
-
-
-def build_migration_dest_name(src_name):
-    return src_name + '_dest'
-
-
-class EMCVNXCLIDriverTestData(object):
-
-    base_lun_name = fake.VOLUME_NAME
-    replication_metadata = {'host': 'host@backendsec#unit_test_pool',
-                            'system': 'fake_serial'}
-    fake_wwn = 'fake_wwn'
-    test_volume = {
-        'status': 'creating',
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'provider_location': build_provider_location(1, 'lun', base_lun_name),
-        'display_name': fake.VOLUME_NAME,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'consistencygroup_id': None
-    }
-
-    test_legacy_volume = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'provider_location': 'system^FNM11111|type^lun|id^1',
-        'display_name': fake.VOLUME_NAME,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'consistencygroup_id': None
-    }
-
-    test_volume_clone_cg = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME_NAME,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'consistencygroup_id': None,
-        'provider_location': build_provider_location(1, 'lun', base_lun_name),
-    }
-
-    test_volume_cg = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME_NAME,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'provider_location': build_provider_location(1, 'lun', base_lun_name),
-    }
-
-    test_volume_rw = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME_NAME,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'consistencygroup_id': None,
-        'provider_location': build_provider_location(1, 'lun', base_lun_name),
-    }
-
-    test_volume2 = {
-        'name': fake.VOLUME2_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME2_NAME,
-        'id': fake.VOLUME2_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME2_NAME,
-        'consistencygroup_id': None,
-        'display_description': 'test volume',
-        'volume_type_id': None,
-        'provider_location':
-            build_provider_location(1, 'lun', fake.VOLUME2_NAME)}
-
-    volume_in_cg = {
-        'name': fake.VOLUME2_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME2_NAME,
-        'id': fake.VOLUME2_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'volume-1_in_cg',
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'display_description': 'test volume',
-        'provider_location':
-            build_provider_location(1, 'lun', fake.VOLUME2_NAME),
-        'volume_type_id': None}
-
-    volume2_in_cg = {
-        'name': fake.VOLUME3_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME3_NAME,
-        'id': fake.VOLUME3_ID,
-        'provider_auth': None,
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'volume-3_in_cg',
-        'provider_location':
-            build_provider_location(3, 'lun', fake.VOLUME3_NAME),
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'display_description': 'test volume',
-        'volume_type_id': None}
-
-    test_volume_with_type = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'thin_vol',
-        'consistencygroup_id': None,
-        'display_description': 'vol with type',
-        'volume_type_id': fake.VOLUME_TYPE_ID,
-        'provider_location':
-            build_provider_location(1, 'smp', fake.VOLUME_NAME),
-        'volume_metadata': [{'key': 'snapcopy', 'value': 'True'}]}
-
-    test_failed_volume = {
-        'name': fake.VOLUME4_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME4_NAME,
-        'id': fake.VOLUME4_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'failed_vol',
-        'consistencygroup_id': None,
-        'display_description': 'test failed volume',
-        'volume_type_id': None}
-
-    test_volume1_in_sg = {
-        'name': fake.VOLUME_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME_NAME,
-        'id': fake.VOLUME_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'failed_vol',
-        'display_description': 'Volume 1 in SG',
-        'volume_type_id': None,
-        'provider_location':
-            build_provider_location(4, 'lun', fake.VOLUME_NAME, 'fakesn')}
-
-    test_volume2_in_sg = {
-        'name': fake.VOLUME2_NAME,
-        'size': 1,
-        'volume_name': fake.VOLUME2_NAME,
-        'id': fake.VOLUME2_ID,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': 'failed_vol',
-        'display_description': 'Volume 2 in SG',
-        'volume_type_id': None,
-        'provider_location':
-            build_provider_location(3, 'lun', fake.VOLUME2_NAME, 'fakesn')}
-
-    test_snapshot = {
-        'name': fake.SNAPSHOT_NAME,
-        'size': 1,
-        'id': fake.SNAPSHOT_ID,
-        'volume_name': test_volume['name'],
-        'volume': test_volume,
-        'volume_size': 1,
-        'consistencygroup_id': None,
-        'cgsnapshot_id': None,
-        'project_id': fake.PROJECT_ID}
-
-    test_snapshot2 = {
-        'name': fake.SNAPSHOT2_ID,
-        'size': 1,
-        'id': fake.SNAPSHOT2_ID,
-        'volume_name': test_volume['name'],
-        'volume': test_volume,
-        'volume_size': 1,
-        'project_id': fake.PROJECT_ID}
-
-    test_clone = {
-        'name': fake.VOLUME2_NAME,
-        'size': 1,
-        'id': fake.VOLUME2_ID,
-        'volume_name': fake.VOLUME2_NAME,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME2_NAME,
-        'consistencygroup_id': None,
-        'display_description': 'volume created from snapshot',
-        'volume_type_id': '19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
-        'provider_location': None,
-        'volume_metadata': [{'key': 'snapcopy', 'value': 'True'}]}
-
-    test_clone_cg = {
-        'name': fake.VOLUME2_NAME,
-        'size': 1,
-        'id': fake.VOLUME2_ID,
-        'volume_name': fake.VOLUME2_NAME,
-        'provider_auth': None,
-        'host': "host@backendsec#unit_test_pool",
-        'project_id': fake.PROJECT_ID,
-        'display_name': fake.VOLUME2_NAME,
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'display_description': 'volume created from snapshot',
-        'volume_type_id': None,
-        'provider_location':
-            build_provider_location(2, 'lun', fake.VOLUME2_NAME, 'fakesn')}
-
-    test_volume3 = {
-        'migration_status': None, 'availability_zone': 'nova',
-        'id': fake.VOLUME3_ID,
-        'name': fake.VOLUME3_NAME,
-        'size': 2,
-        'status': 'available',
-        'volume_type_id': fake.VOLUME_TYPE_ID,
-        'deleted': False,
-        'host': "host@backendsec#unit_test_pool",
-        'source_volid': None, 'provider_auth': None,
-        'display_name': 'vol-test03',
-        'attach_status': 'detached',
-        'volume_type': [],
-        'volume_attachment': [],
-        'provider_location':
-            build_provider_location(1, 'lun', fake.VOLUME3_NAME),
-        '_name_id': None, 'metadata': {}}
-
-    test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
-                    'id': fake.VOLUME4_ID,
-                    'name': fake.VOLUME4_NAME,
-                    'size': 2,
-                    'status': 'available',
-                    'volume_type_id': fake.VOLUME_TYPE_ID,
-                    'deleted': False,
-                    'provider_location':
-                    build_provider_location(4, 'lun', fake.VOLUME4_NAME),
-                    'host': 'ubuntu-server12@array_backend_1',
-                    'source_volid': None, 'provider_auth': None,
-                    'display_name': 'vol-test04',
-                    'volume_attachment': [],
-                    'attach_status': 'detached',
-                    'volume_type': [],
-                    '_name_id': None, 'metadata': {}}
-
-    test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
-                    'id': fake.VOLUME5_ID,
-                    'name_id': fake.VOLUME_ID,
-                    'name': fake.VOLUME5_ID,
-                    'size': 1,
-                    'status': 'available',
-                    'volume_type_id': fake.VOLUME_TYPE_ID,
-                    'deleted': False,
-                    'provider_location':
-                    build_provider_location(5, 'lun', fake.VOLUME5_ID),
-                    'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
-                    'source_volid': None, 'provider_auth': None,
-                    'display_name': 'vol-test05',
-                    'volume_attachment': [],
-                    'attach_status': 'detached',
-                    'volume_type': [],
-                    '_name_id': None, 'metadata': {}}
-
-    test_volume_replication = {
-        'migration_status': None,
-        'availability_zone': 'nova',
-        'id': fake.VOLUME5_ID,
-        'name_id': None,
-        'name': fake.VOLUME5_NAME,
-        'size': 1,
-        'status': 'available',
-        'volume_type_id': fake.VOLUME_TYPE_ID,
-        'deleted': False, 'provider_location':
-        build_provider_location(5, 'lun', fake.VOLUME5_NAME),
-        'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
-        'source_volid': None, 'provider_auth': None,
-        'display_name': 'vol-test05',
-        'volume_attachment': [],
-        'attach_status': 'detached',
-        'volume_type': [],
-        'replication_driver_data': '',
-        'replication_status': 'enabled',
-        '_name_id': None, 'metadata': replication_metadata}
-
-    test_replication_failover = {
-        'migration_status': None,
-        'availability_zone': 'nova',
-        'id': fake.VOLUME5_ID,
-        'name_id': None,
-        'name': fake.VOLUME5_ID,
-        'size': 1,
-        'status': 'available',
-        'volume_type_id': fake.VOLUME_TYPE_ID,
-        'deleted': False, 'provider_location':
-        build_provider_location(5, 'lun', fake.VOLUME5_ID),
-        'host': 'ubuntu-server12@array_backend_1#unit_test_pool',
-        'source_volid': None, 'provider_auth': None,
-        'display_name': 'vol-test05',
-        'volume_attachment': [],
-        'attach_status': 'detached',
-        'volume_type': [],
-        'replication_driver_data': '',
-        'replication_status': 'failed-over',
-        '_name_id': None, 'metadata': replication_metadata}
-
-    test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
-                     'deleted': False,
-                     'extra_specs': {'storagetype:provisioning': 'thin'},
-                     'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-    test_replication_type = {'name': 'rep_type',
-                             'extra_specs': {'replication_enbled':
-                                             '<is> True'},
-                             'id': fake.VOLUME_TYPE_ID}
-
-    test_diff = {'encryption': {}, 'qos_specs': {},
-                 'extra_specs':
-                 {'storagetype:provisioning': ('thick', 'thin')}}
-
-    test_host = {'host': 'ubuntu-server12@pool_backend_1#POOL_SAS1',
-                 'capabilities':
-                 {'pool_name': 'POOL_SAS1',
-                  'location_info': 'POOL_SAS1|FNM00124500890',
-                  'volume_backend_name': 'pool_backend_1',
-                  'storage_protocol': 'iSCSI'}}
-
-    connector = {
-        'ip': '10.0.0.2',
-        'initiator': 'iqn.1993-08.org.debian:01:222',
-        'wwpns': ["1234567890123456", "1234567890543216"],
-        'wwnns': ["2234567890123456", "2234567890543216"],
-        'host': 'fakehost'}
-
-    test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None,
-                      'deleted': False,
-                      'extra_specs': {'storagetype:pool': 'POOL_SAS2'},
-                      'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-    test_diff2 = {'encryption': {}, 'qos_specs': {},
-                  'extra_specs':
-                  {'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}}
-
-    test_host2 = {'host': 'ubuntu-server12@array_backend_1',
-                  'capabilities':
-                  {'location_info': '|FNM00124500890',
-                   'volume_backend_name': 'array_backend_1',
-                   'storage_protocol': 'iSCSI'}}
-
-    test_cg = {'id': fake.CONSISTENCY_GROUP_ID,
-               'name': 'group_name',
-               'status': fields.ConsistencyGroupStatus.DELETING}
-
-    test_cg_with_type = {'id': fake.CONSISTENCY_GROUP_ID,
-                         'name': 'group_name',
-                         'status': fields.ConsistencyGroupStatus.CREATING,
-                         'volume_type_id':
-                         'abc1-2320-9013-8813-8941-1374-8112-1231,'
-                         '19fdd0dd-03b3-4d7c-b541-f4df46f308c8,'}
-
-    test_cgsnapshot = {
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'id': fake.CGSNAPSHOT_ID,
-        'status': 'available'}
-
-    test_member_cgsnapshot = {
-        'name': fake.CGSNAPSHOT_NAME,
-        'size': 1,
-        'id': fake.CGSNAPSHOT_ID,
-        'volume': test_volume,
-        'volume_name': fake.VOLUME_NAME,
-        'volume_size': 1,
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'cgsnapshot_id': fake.CGSNAPSHOT_ID,
-        'project_id': fake.PROJECT_ID
-    }
-
-    test_member_cgsnapshot2 = {
-        'name': 'snapshot-2222',
-        'size': 1,
-        'id': fake.CGSNAPSHOT2_ID,
-        'volume': test_volume2,
-        'volume_name': fake.VOLUME2_NAME,
-        'volume_size': 1,
-        'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-        'cgsnapshot_id': fake.CGSNAPSHOT_ID,
-        'project_id': fake.PROJECT_ID
-    }
-
-    test_lun_id = 1
-    test_existing_ref = {'source-id': test_lun_id}
-    test_existing_ref_source_name = {'source-name': fake.VOLUME_NAME}
-    test_pool_name = 'unit_test_pool'
-    device_map = {
-        '1122334455667788': {
-            'initiator_port_wwn_list': ['123456789012345', '123456789054321'],
-            'target_port_wwn_list': ['1122334455667777']}}
-    i_t_map = {'123456789012345': ['1122334455667777'],
-               '123456789054321': ['1122334455667777']}
-
-    POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
-                         '-userCap', '-availableCap',
-                         '-state', '-prcntFullThreshold')
-
-    POOL_PROPERTY_W_FASTCACHE_CMD = ('storagepool', '-list', '-name',
-                                     'unit_test_pool', '-availableCap',
-                                     '-userCap', '-state',
-                                     '-subscribedCap',
-                                     '-prcntFullThreshold',
-                                     '-fastcache')
-
-    def POOL_GET_ALL_CMD(self, withfastcache=False):
-        if withfastcache:
-            return ('storagepool', '-list', '-availableCap',
-                    '-userCap', '-state', '-subscribedCap',
-                    '-prcntFullThreshold',
-                    '-fastcache')
-        else:
-            return ('storagepool', '-list', '-availableCap',
-                    '-userCap', '-state', '-subscribedCap',
-                    '-prcntFullThreshold')
-
-    def POOL_GET_ALL_RESULT(self, withfastcache=False):
-        if withfastcache:
-            return ("Pool Name:  unit_test_pool\n"
-                    "Pool ID:  0\n"
-                    "Percent Full Threshold:  70\n"
-                    "User Capacity (Blocks):  6881061888\n"
-                    "User Capacity (GBs):  3281.146\n"
-                    "Available Capacity (Blocks):  6512292864\n"
-                    "Available Capacity (GBs):  3105.303\n"
-                    "Total Subscribed Capacity (GBs):  536.140\n"
-                    "FAST Cache:  Enabled\n"
-                    "State: Ready\n"
-                    "\n"
-                    "Pool Name:  unit_test_pool2\n"
-                    "Pool ID:  1\n"
-                    "Percent Full Threshold:  70\n"
-                    "User Capacity (Blocks):  8598306816\n"
-                    "User Capacity (GBs):  4099.992\n"
-                    "Available Capacity (Blocks):  8356663296\n"
-                    "Available Capacity (GBs):  3984.768\n"
-                    "Total Subscribed Capacity (GBs):  636.240\n"
-                    "FAST Cache:  Disabled\n"
-                    "State: Ready\n", 0)
-        else:
-            return ("Pool Name:  unit_test_pool\n"
-                    "Pool ID:  0\n"
-                    "Percent Full Threshold:  70\n"
-                    "User Capacity (Blocks):  6881061888\n"
-                    "User Capacity (GBs):  3281.146\n"
-                    "Available Capacity (Blocks):  6512292864\n"
-                    "Available Capacity (GBs):  3105.303\n"
-                    "Total Subscribed Capacity (GBs):  536.140\n"
-                    "State: Ready\n"
-                    "\n"
-                    "Pool Name:  unit_test_pool2\n"
-                    "Pool ID:  1\n"
-                    "Percent Full Threshold:  70\n"
-                    "User Capacity (Blocks):  8598306816\n"
-                    "User Capacity (GBs):  4099.992\n"
-                    "Available Capacity (Blocks):  8356663296\n"
-                    "Available Capacity (GBs):  3984.768\n"
-                    "Total Subscribed Capacity (GBs):  636.240\n"
-                    "State: Ready\n", 0)
-
-    def POOL_GET_STATE_RESULT(self, pools):
-        output = []
-        for i, po in enumerate(pools):
-            if i != 0:
-                output.append("\n")
-            output.append("Pool Name:  %s" % po['pool_name'])
-            output.append("Pool ID: %s" % i)
-            output.append("State: %s" % po['state'])
-        return ("\n".join(output), 0)
-
-    def POOL_GET_ALL_STATES_TEST(self, states=['Ready']):
-        output = ""
-        for i, stat in enumerate(states):
-            out = ("Pool Name:  Pool_" + str(i) + "\n"
-                   "Pool ID:  " + str(i) + "\n"
-                   "Percent Full Threshold:  70\n"
-                   "User Capacity (Blocks):  8598306816\n"
-                   "User Capacity (GBs):  4099.992\n"
-                   "Available Capacity (Blocks):  8356663296\n"
-                   "Available Capacity (GBs):  3984.768\n"
-                   "FAST Cache:  Enabled\n"
-                   "State: " + stat + "\n\n")
-            output += out
-        return (output, 0)
-
-    def SNAP_NOT_EXIST(self):
-        return ("Could not retrieve the specified (Snapshot).\n "
-                "The (Snapshot) may not exist", 9)
-
-    NDU_LIST_CMD = ('ndu', '-list')
-    NDU_LIST_RESULT = ("Name of the software package:   -Compression " +
-                       "Name of the software package:   -Deduplication " +
-                       "Name of the software package:   -FAST " +
-                       "Name of the software package:   -FASTCache " +
-                       "Name of the software package:   -ThinProvisioning "
-                       "Name of the software package:   -VNXSnapshots "
-                       "Name of the software package:   -MirrorView/S",
-                       0)
-
-    NDU_LIST_RESULT_WO_LICENSE = (
-        "Name of the software package:   -Unisphere ",
-        0)
-
-    LUN_NOT_IN_MIGRATING = (
-        'The specified source LUN is not currently migrating', 23)
-
-    MIGRATE_PROPERTY_MIGRATING = """\
-        Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-        Source LU ID:  63950
-        Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-        Dest LU ID:  136
-        Migration Rate:  high
-        Current State:  MIGRATING
-        Percent Complete:  50
-        Time Remaining:  0 second(s)
-        """
-    MIGRATE_PROPERTY_STOPPED = """\
-        Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-        Source LU ID:  63950
-        Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-        Dest LU ID:  136
-        Migration Rate:  high
-        Current State:  STOPPED - Destination full
-        Percent Complete:  60
-        Time Remaining:  0 second(s)
-        """
-    LIST_LUN_1_SPECS = """
-        LOGICAL UNIT NUMBER 1
-        Name:  os-044e89e9-3aeb-46eb-a1b0-946f0a13545c
-        Pool Name:  unit_test_pool
-        Is Thin LUN:  No
-        Is Compressed:  No
-        Deduplication State:  Off
-        Deduplication Status:  OK(0x0)
-        Tiering Policy:  Auto Tier
-        Initial Tier:  Highest Available
-    """
-
-    def LIST_LUN_1_ALL(self, lun_id=1, wwn=fake_wwn):
-        return ("""
-        LOGICAL UNIT NUMBER %(lun_id)s
-        Name:  os-044e89e9-3aeb-46eb-a1b0-946f0a13545c
-        UID: %(wwn)s
-        Current Owner:  SP A
-        User Capacity (Blocks):  46137344
-        User Capacity (GBs):  1.000
-        Pool Name:  unit_test_pool
-        Current State:  Ready
-        Status:  OK(0x0)
-        Is Faulted:  false
-        Is Transitioning:  false
-        Current Operation:  None
-        Current Operation State:  N/A
-        Current Operation Status:  N/A
-        Current Operation Percent Completed:  0
-        Is Thin LUN:  No
-        Is Compressed:  No
-        Deduplication State:  Off
-        Deduplication Status:  OK(0x0)
-        Tiering Policy:  Auto Tier
-        Initial Tier:  Highest Available
-        Attached Snapshot:  N/A
-    """ % {'lun_id': lun_id, 'wwn': wwn}, 0)
-
-    def SNAP_MP_CREATE_CMD(self, name=fake.VOLUME_NAME,
-                           source=fake.VOLUME_NAME):
-        return ('lun', '-create', '-type', 'snap', '-primaryLunName',
-                source, '-name', name)
-
-    def SNAP_ATTACH_CMD(self, name=fake.VOLUME_NAME,
-                        snapName=fake.SNAPSHOT_NAME):
-        return ('lun', '-attach', '-name', name, '-snapName', snapName)
-
-    def SNAP_DELETE_CMD(self, name):
-        return ('snap', '-destroy', '-id', name, '-o')
-
-    def SNAP_CREATE_CMD(self, name):
-        return ('snap', '-create', '-res', 1, '-name', name,
-                '-allowReadWrite', 'yes',
-                '-allowAutoDelete', 'no')
-
-    def SNAP_MODIFY_CMD(self, name, rw):
-        return ('snap', '-modify', '-id', name, '-allowReadWrite', rw,
-                '-allowAutoDelete', 'yes')
-
-    def SNAP_LIST_CMD(self, res_id=1):
-        cmd = ('snap', '-list', '-res', int(res_id))
-        return cmd
-
-    def LUN_DELETE_CMD(self, name):
-        return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
-
-    def LUN_EXTEND_CMD(self, name, newsize):
-        return ('lun', '-expand', '-name', name, '-capacity', newsize,
-                '-sq', 'gb', '-o', '-ignoreThresholds')
-
-    def LUN_PROPERTY_POOL_CMD(self, lunname):
-        return ('lun', '-list', '-name', lunname, '-poolName')
-
-    def LUN_PROPERTY_ALL_CMD(self, lunname):
-        return ('lun', '-list', '-name', lunname,
-                '-state', '-status', '-opDetails', '-userCap', '-owner',
-                '-attachedSnapshot', '-uid')
-
-    def LUN_PROPERTY_BY_ID(self, lun_id):
-        return ('lun', '-list', '-l', lun_id,
-                '-state', '-status', '-opDetails', '-userCap', '-owner',
-                '-attachedSnapshot', '-uid')
-
-    @staticmethod
-    def LUN_RENAME_CMD(lun_id, lun_name):
-        return ('lun', '-modify', '-l', int(lun_id),
-                '-newName', lun_name, '-o')
-
-    @staticmethod
-    def LUN_LIST_ALL_CMD(lun_id):
-        return ('lun', '-list', '-l', int(lun_id),
-                '-attachedSnapshot', '-userCap',
-                '-dedupState', '-initialTier',
-                '-isCompressed', '-isThinLUN',
-                '-opDetails', '-owner', '-poolName',
-                '-state', '-status', '-tieringPolicy',
-                '-uid')
-
-    @staticmethod
-    def LUN_LIST_SPECS_CMD(lun_id):
-        return ('lun', '-list', '-l', int(lun_id),
-                '-poolName', '-isThinLUN', '-isCompressed',
-                '-dedupState', '-initialTier', '-tieringPolicy',
-                '-uid')
-
-    @staticmethod
-    def LUN_MODIFY_TIER(lun_id, tier=None, policy=None):
-        if tier is None:
-            tier = 'highestAvailable'
-        if policy is None:
-            policy = 'highestAvailable'
-        return ('lun', '-modify', '-l', lun_id, '-o',
-                '-initialTier', tier,
-                '-tieringPolicy', policy)
-
-    def MIGRATION_CMD(self, src_id=1, dest_id=1, rate='high'):
-        cmd = ("migrate", "-start", "-source", src_id, "-dest", dest_id,
-               "-rate", rate, "-o")
-        return cmd
-
-    def MIGRATION_VERIFY_CMD(self, src_id):
-        return ("migrate", "-list", "-source", src_id)
-
-    def MIGRATION_CANCEL_CMD(self, src_id):
-        return ("migrate", "-cancel", "-source", src_id, '-o')
-
-    def GETPORT_CMD(self):
-        return ("connection", "-getport", "-address", "-vlanid")
-
-    def PINGNODE_CMD(self, sp, portid, vportid, ip):
-        return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
-                "-vportid", vportid, "-address", ip, '-count', '1')
-
-    def GETFCPORT_CMD(self):
-        return ('port', '-list', '-sp')
-
-    def CONNECTHOST_CMD(self, hostname, gname):
-        return ('storagegroup', '-connecthost',
-                '-host', hostname, '-gname', gname, '-o')
-
-    def ENABLE_COMPRESSION_CMD(self, lun_id):
-        return ('compression', '-on',
-                '-l', lun_id, '-ignoreThresholds', '-o')
-
-    def STORAGEGROUP_LIST_CMD(self, gname=None):
-        if gname:
-            return ('storagegroup', '-list',
-                    '-gname', gname, '-host', '-iscsiAttributes')
-        else:
-            return ('storagegroup', '-list')
-
-    def STORAGEGROUP_REMOVEHLU_CMD(self, gname, hlu):
-        return ('storagegroup', '-removehlu',
-                '-hlu', hlu, '-gname', gname, '-o')
-
-    def SNAP_COPY_CMD(self, src_snap, snap_name):
-        return ('snap', '-copy', '-id', src_snap, '-name', snap_name,
-                '-ignoreMigrationCheck', '-ignoreDeduplicationCheck')
-
-    def ALLOW_READWRITE_ON_SNAP_CMD(self, snap_name):
-        return ('snap', '-modify', '-id', snap_name,
-                '-allowReadWrite', 'yes', '-allowAutoDelete', 'yes')
-
-    def MODIFY_TIERING_CMD(self, lun_name, tiering):
-        cmd = ['lun', '-modify', '-name', lun_name, '-o']
-        cmd.extend(self.tiering_values[tiering])
-        return tuple(cmd)
-
-    provisioning_values = {
-        'thin': ['-type', 'Thin'],
-        'thick': ['-type', 'NonThin'],
-        'compressed': ['-type', 'Thin'],
-        'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
-    tiering_values = {
-        'starthighthenauto': [
-            '-initialTier', 'highestAvailable',
-            '-tieringPolicy', 'autoTier'],
-        'auto': [
-            '-initialTier', 'optimizePool',
-            '-tieringPolicy', 'autoTier'],
-        'highestavailable': [
-            '-initialTier', 'highestAvailable',
-            '-tieringPolicy', 'highestAvailable'],
-        'lowestavailable': [
-            '-initialTier', 'lowestAvailable',
-            '-tieringPolicy', 'lowestAvailable'],
-        'nomovement': [
-            '-initialTier', 'optimizePool',
-            '-tieringPolicy', 'noMovement']}
-
-    def LUN_CREATION_CMD(self, name, size, pool,
-                         provisioning=None, tiering=None,
-                         ignore_thresholds=False, poll=True):
-        initial = ['lun', '-create',
-                   '-capacity', size,
-                   '-sq', 'gb',
-                   '-poolName', pool,
-                   '-name', name]
-        if not poll:
-            initial = ['-np'] + initial
-        if provisioning:
-            initial.extend(self.provisioning_values[provisioning])
-        else:
-            initial.extend(self.provisioning_values['thick'])
-        if tiering:
-            initial.extend(self.tiering_values[tiering])
-        if ignore_thresholds:
-            initial.append('-ignoreThresholds')
-        return tuple(initial)
-
-    def CHECK_FASTCACHE_CMD(self, storage_pool):
-        return ('storagepool', '-list', '-name',
-                storage_pool, '-fastcache')
-
-    def CREATE_CONSISTENCYGROUP_CMD(self, cg_name, members=None):
-        create_cmd = ('snap', '-group', '-create',
-                      '-name', cg_name, '-allowSnapAutoDelete', 'no')
-
-        if not members:
-            return create_cmd
-        else:
-            return create_cmd + ('-res', ','.join(map(six.text_type,
-                                                      members)))
-
-    def DELETE_CONSISTENCYGROUP_CMD(self, cg_name):
-        return ('-np', 'snap', '-group', '-destroy',
-                '-id', cg_name)
-
-    def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id):
-        return ('snap', '-group',
-                '-addmember', '-id', cg_name, '-res', lun_id)
-
-    def CREATE_CG_SNAPSHOT(self, cg_name, snap_name):
-        return ('-np', 'snap', '-create', '-res', cg_name,
-                '-resType', 'CG', '-name', snap_name, '-allowReadWrite',
-                'yes', '-allowAutoDelete', 'no')
-
-    def DELETE_CG_SNAPSHOT(self, snap_name):
-        return ('-np', 'snap', '-destroy', '-id', snap_name, '-o')
-
-    def GET_CG_BY_NAME_CMD(self, cg_name):
-        return ('snap', '-group', '-list', '-id', cg_name)
-
-    def GET_SNAP(self, snap_name):
-        return ('snap', '-list', '-id', snap_name)
-
-    def REMOVE_LUNS_FROM_CG_CMD(self, cg_name, remove_ids):
-        return ('snap', '-group', '-rmmember', '-id', cg_name, '-res',
-                ','.join(remove_ids))
-
-    def REPLACE_LUNS_IN_CG_CMD(self, cg_name, new_ids):
-        return ('snap', '-group', '-replmember', '-id', cg_name, '-res',
-                ','.join(new_ids))
-
-    # Replication related commands
-    def MIRROR_CREATE_CMD(self, mirror_name, lun_id):
-        return ('mirror', '-sync', '-create', '-name', mirror_name,
-                '-lun', lun_id, '-usewriteintentlog', '-o')
-
-    def MIRROR_DESTROY_CMD(self, mirror_name):
-        return ('mirror', '-sync', '-destroy', '-name', mirror_name,
-                '-force', '-o')
-
-    def MIRROR_ADD_IMAGE_CMD(self, mirror_name, sp_ip, lun_id):
-        return ('mirror', '-sync', '-addimage', '-name', mirror_name,
-                '-arrayhost', sp_ip, '-lun', lun_id, '-recoverypolicy',
-                'auto', '-syncrate', 'high')
-
-    def MIRROR_REMOVE_IMAGE_CMD(self, mirror_name, image_uid):
-        return ('mirror', '-sync', '-removeimage', '-name', mirror_name,
-                '-imageuid', image_uid, '-o')
-
-    def MIRROR_FRACTURE_IMAGE_CMD(self, mirror_name, image_uid):
-        return ('mirror', '-sync', '-fractureimage', '-name', mirror_name,
-                '-imageuid', image_uid, '-o')
-
-    def MIRROR_SYNC_IMAGE_CMD(self, mirror_name, image_uid):
-        return ('mirror', '-sync', '-syncimage', '-name', mirror_name,
-                '-imageuid', image_uid, '-o')
-
-    def MIRROR_PROMOTE_IMAGE_CMD(self, mirror_name, image_uid):
-        return ('mirror', '-sync', '-promoteimage', '-name', mirror_name,
-                '-imageuid', image_uid, '-o')
-
-    def MIRROR_LIST_CMD(self, mirror_name):
-        return ('mirror', '-sync', '-list', '-name', mirror_name)
-
-    # Mirror related output
-    def MIRROR_LIST_RESULT(self, mirror_name, mirror_state='Synchronized'):
-        return ("""MirrorView Name:  %(name)s
-MirrorView Description:
-MirrorView UID:  50:06:01:60:B6:E0:1C:F4:0E:00:00:00:00:00:00:00
-Logical Unit Numbers:  37
-Remote Mirror Status:  Mirrored
-MirrorView State:  Active
-MirrorView Faulted:  NO
-MirrorView Transitioning:  NO
-Quiesce Threshold:  60
-Minimum number of images required:  0
-Image Size:  2097152
-Image Count:  2
-Write Intent Log Used:  YES
-Images:
-Image UID:  50:06:01:60:B6:E0:1C:F4
-Is Image Primary:  YES
-Logical Unit UID:  60:06:01:60:13:00:3E:00:14:FA:3C:8B:A5:98:E5:11
-Image Condition:  Primary Image
-Preferred SP:  A
-
-Image UID:  50:06:01:60:88:60:05:FE
-Is Image Primary:  NO
-Logical Unit UID:  60:06:01:60:41:C4:3D:00:B2:D5:33:DB:C7:98:E5:11
-Image State:  %(state)s
-Image Condition:  Normal
-Recovery Policy:  Automatic
-Preferred SP:  A
-Synchronization Rate:  High
-Image Faulted:  NO
-Image Transitioning:  NO
-Synchronizing Progress(%%):  100
-""" % {'name': mirror_name, 'state': mirror_state}, 0)
-
-    def MIRROR_LIST_ERROR_RESULT(self, mirror_name):
-        return ("Getting mirror list failed. Mirror not found", 145)
-
-    def MIRROR_CREATE_ERROR_RESULT(self, mirror_name):
-        return (
-            "Error: mirrorview command failed\n"
-            "Mirror name already in use", 67)
-
-    def MIRROR_DESTROY_ERROR_RESULT(self, mirror_name):
-        return ("Destroying mirror failed. Mirror not found", 145)
-
-    def MIRROR_ADD_IMAGE_ERROR_RESULT(self):
-        return (
-            "Adding sync mirror image failed. Invalid LUN number\n"
-            "LUN does not exist or Specified LU not available "
-            "for mirroring.", 169)
-
-    def MIRROR_PROMOTE_IMAGE_ERROR_RESULT(self):
-        return (
-            "Error: mirrorview command failed\n"
-            "UID of the secondary image to be promoted is not local to "
-            "this array.Mirrorview can't promote a secondary image not "
-            "local to this array. Make sure you are sending the promote "
-            "command to the correct array where the secondary image is "
-            "located. (0x7105824e)", 78)
-
-    # Test Objects
-
-    def CONSISTENCY_GROUP_VOLUMES(self):
-        volumes = []
-        volumes.append(self.test_volume)
-        volumes.append(self.test_volume)
-        return volumes
-
-    def SNAPS_IN_SNAP_GROUP(self):
-        snaps = []
-        snaps.append(self.test_snapshot)
-        snaps.append(self.test_snapshot)
-        return snaps
-
-    def VOLUMES_NOT_IN_CG(self):
-        add_volumes = []
-        add_volumes.append(self.test_volume4)
-        add_volumes.append(self.test_volume5)
-        return add_volumes
-
-    def VOLUMES_IN_CG(self):
-        remove_volumes = []
-        remove_volumes.append(self.volume_in_cg)
-        remove_volumes.append(self.volume2_in_cg)
-        return remove_volumes
-
-    def CG_PROPERTY(self, cg_name):
-        return """
-Name:  %(cg_name)s
-Description:
-Allow auto delete:  No
-Member LUN ID(s):  1, 3
-State:  Ready
-""" % {'cg_name': cg_name}, 0
-
-    def CG_NOT_FOUND(self):
-        return ("Cannot find the consistency group. \n\n", 13)
-
-    def CG_REPL_ERROR(self):
-        return """
-        The specified LUN is already a member
-        of another consistency group. (0x716d8045)
-        """, 71
-
-    def LUN_PREP_ERROR(self):
-        return ("The operation cannot be performed because "
-                "the LUN is 'Preparing'.  Wait for the LUN's "
-                "Current Operation to complete 'Preparing' "
-                "and retry the operation. (0x712d8e0e)", 14)
-
-    POOL_PROPERTY = (
-        "Pool Name:  unit_test_pool\n"
-        "Pool ID:  1\n"
-        "Percent Full Threshold:  70\n"
-        "User Capacity (Blocks):  6881061888\n"
-        "User Capacity (GBs):  3281.146\n"
-        "Available Capacity (Blocks):  6832207872\n"
-        "Available Capacity (GBs):  3257.851\n"
-        "State: Ready\n"
-        "\n", 0)
-
-    POOL_PROPERTY_W_FASTCACHE = (
-        "Pool Name:  unit_test_pool\n"
-        "Pool ID:  1\n"
-        "Percent Full Threshold:  70\n"
-        "User Capacity (Blocks):  6881061888\n"
-        "User Capacity (GBs):  3281.146\n"
-        "Available Capacity (Blocks):  6832207872\n"
-        "Available Capacity (GBs):  3257.851\n"
-        "Total Subscribed Capacity (GBs):  636.240\n"
-        "FAST Cache:  Enabled\n"
-        "State: Ready\n\n", 0)
-
-    ALL_PORTS = ("SP:  A\n" +
-                 "Port ID:  4\n" +
-                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
-                 "iSCSI Alias:  0215.a4\n\n" +
-                 "Virtual Port ID:  0\n" +
-                 "VLAN ID:  Disabled\n" +
-                 "IP Address:  10.244.214.118\n\n" +
-                 "SP:  A\n" +
-                 "Port ID:  5\n" +
-                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" +
-                 "iSCSI Alias:  0215.a5\n" +
-                 "SP:  A\n" +
-                 "Port ID:  0\n" +
-                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.a0\n" +
-                 "iSCSI Alias:  0215.a0\n\n" +
-                 "Virtual Port ID:  0\n" +
-                 "VLAN ID:  Disabled\n" +
-                 "IP Address:  10.244.214.119\n\n" +
-                 "SP:  B\n" +
-                 "Port ID:  2\n" +
-                 "Port WWN:  iqn.1992-04.com.emc:cx.fnm00124000215.b2\n" +
-                 "iSCSI Alias:  0215.b2\n\n" +
-                 "Virtual Port ID:  0\n" +
-                 "VLAN ID:  Disabled\n" +
-                 "IP Address:  10.244.214.120\n\n", 0)
-
-    WHITE_LIST_PORTS = ("""SP:  A
-Port ID:  0
-Port WWN:  iqn.1992-04.com.emc:cx.fnmxxx.a0
-iSCSI Alias:  0235.a7
-
-Virtual Port ID:  0
-VLAN ID:  Disabled
-IP Address:  192.168.3.52
-
-SP:  A
-Port ID:  9
-Port WWN:  iqn.1992-04.com.emc:cx.fnmxxx.a9
-iSCSI Alias:  0235.a9
-
-SP:  A
-Port ID:  4
-Port WWN:  iqn.1992-04.com.emc:cx.fnmxxx.a4
-iSCSI Alias:  0235.a4
-
-SP:  B
-Port ID:  2
-Port WWN:  iqn.1992-04.com.emc:cx.fnmxxx.b2
-iSCSI Alias:  0235.b6
-
-Virtual Port ID:  0
-VLAN ID:  Disabled
-IP Address:  192.168.4.53
-""", 0)
-
-    iscsi_connection_info = {
-        'data': {'target_discovered': True,
-                 'target_iqn':
-                 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                 'target_lun': 2,
-                 'target_portal': '10.244.214.118:3260',
-                 'target_iqns': ['iqn.1992-04.com.emc:cx.fnm00124000215.a4'],
-                 'target_luns': [2],
-                 'target_portals': ['10.244.214.118:3260'],
-                 'volume_id': fake.VOLUME_ID},
-        'driver_volume_type': 'iscsi'}
-
-    iscsi_connection_info_mp = {
-        'data': {'target_discovered': True,
-                 'target_iqns': [
-                     'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                     'iqn.1992-04.com.emc:cx.fnm00124000215.a5'],
-                 'target_iqn': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                 'target_luns': [2, 2],
-                 'target_lun': 2,
-                 'target_portals': [
-                     '10.244.214.118:3260',
-                     '10.244.214.119:3260'],
-                 'target_portal': '10.244.214.118:3260',
-                 'volume_id': fake.VOLUME_ID},
-        'driver_volume_type': 'iscsi'}
-
-    PING_OK = ("Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
-               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
-               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n" +
-               "Reply from 10.0.0.2:  bytes=32 time=1ms TTL=30\n", 0)
-
-    FC_PORTS = ("Information about each SPPORT:\n" +
-                "\n" +
-                "SP Name:             SP A\n" +
-                "SP Port ID:          0\n" +
-                "SP UID:              50:06:01:60:88:60:01:95:" +
-                "50:06:01:60:08:60:01:95\n" +
-                "Link Status:         Up\n" +
-                "Port Status:         Online\n" +
-                "Switch Present:      YES\n" +
-                "Switch UID:          10:00:00:05:1E:72:EC:A6:" +
-                "20:46:00:05:1E:72:EC:A6\n" +
-                "SP Source ID:        272896\n" +
-                "\n" +
-                "SP Name:             SP B\n" +
-                "SP Port ID:          4\n" +
-                "SP UID:              iqn.1992-04.com.emc:cx." +
-                "fnm00124000215.b4\n" +
-                "Link Status:         Up\n" +
-                "Port Status:         Online\n" +
-                "Switch Present:      Not Applicable\n" +
-                "\n" +
-                "SP Name:             SP A\n" +
-                "SP Port ID:          2\n" +
-                "SP UID:              50:06:01:60:88:60:01:95:" +
-                "50:06:01:62:08:60:01:95\n" +
-                "Link Status:         Down\n" +
-                "Port Status:         Online\n" +
-                "Switch Present:      NO\n" +
-                "\n" +
-                "SP Name:             SP B\n" +
-                "SP Port ID:          2\n" +
-                "SP UID:              50:06:01:60:88:60:08:0F:"
-                "50:06:01:6A:08:60:08:0F\n" +
-                "Link Status:         Up\n" +
-                "Port Status:         Online\n" +
-                "Switch Present:      YES\n" +
-                "Switch UID:          10:00:50:EB:1A:03:3F:59:"
-                "20:11:50:EB:1A:03:3F:59\n" +
-                "SP Source ID:        69888\n", 0)
-
-    FAKEHOST_PORTS = (
-        "Information about each HBA:\n" +
-        "\n" +
-        "HBA UID:                 20:00:00:90:FA:53:46:41:12:34:" +
-        "56:78:90:12:34:56\n" +
-        "Server Name:             fakehost\n" +
-        "Server IP Address:       10.0.0.2" +
-        "HBA Model Description:\n" +
-        "HBA Vendor Description:\n" +
-        "HBA Device Driver Name:\n" +
-        "Information about each port of this HBA:\n\n" +
-        "    SP Name:               SP A\n" +
-        "    SP Port ID:            0\n" +
-        "    HBA Devicename:\n" +
-        "    Trusted:               NO\n" +
-        "    Logged In:             YES\n" +
-        "    Defined:               YES\n" +
-        "    Initiator Type:           3\n" +
-        "    StorageGroup Name:     fakehost\n\n" +
-        "    SP Name:               SP A\n" +
-        "    SP Port ID:            2\n" +
-        "    HBA Devicename:\n" +
-        "    Trusted:               NO\n" +
-        "    Logged In:             YES\n" +
-        "    Defined:               YES\n" +
-        "    Initiator Type:           3\n" +
-        "    StorageGroup Name:     fakehost\n\n" +
-        "    SP Name:               SP B\n" +
-        "    SP Port ID:            2\n" +
-        "    HBA Devicename:\n" +
-        "    Trusted:               NO\n" +
-        "    Logged In:             YES\n" +
-        "    Defined:               YES\n" +
-        "    Initiator Type:           3\n" +
-        "    StorageGroup Name:     fakehost\n\n"
-        "Information about each SPPORT:\n" +
-        "\n" +
-        "SP Name:             SP A\n" +
-        "SP Port ID:          0\n" +
-        "SP UID:              50:06:01:60:88:60:01:95:" +
-        "50:06:01:60:08:60:01:95\n" +
-        "Link Status:         Up\n" +
-        "Port Status:         Online\n" +
-        "Switch Present:      YES\n" +
-        "Switch UID:          10:00:00:05:1E:72:EC:A6:" +
-        "20:46:00:05:1E:72:EC:A6\n" +
-        "SP Source ID:        272896\n" +
-        "\n" +
-        "SP Name:             SP B\n" +
-        "SP Port ID:          4\n" +
-        "SP UID:              iqn.1992-04.com.emc:cx." +
-        "fnm00124000215.b4\n" +
-        "Link Status:         Up\n" +
-        "Port Status:         Online\n" +
-        "Switch Present:      Not Applicable\n" +
-        "\n" +
-        "SP Name:             SP A\n" +
-        "SP Port ID:          2\n" +
-        "SP UID:              50:06:01:60:88:60:01:95:" +
-        "50:06:01:62:08:60:01:95\n" +
-        "Link Status:         Down\n" +
-        "Port Status:         Online\n" +
-        "Switch Present:      NO\n" +
-        "\n" +
-        "SP Name:             SP B\n" +
-        "SP Port ID:          2\n" +
-        "SP UID:              50:06:01:60:88:60:01:95:" +
-        "50:06:01:6A:08:60:08:0F\n" +
-        "Link Status:         Up\n" +
-        "Port Status:         Online\n" +
-        "Switch Present:      YES\n" +
-        "Switch UID:          10:00:00:05:1E:72:EC:A6:" +
-        "20:46:00:05:1E:72:EC:A6\n" +
-        "SP Source ID:        272896\n", 0)
-
-    def LUN_PROPERTY(self, name, is_thin=False, has_snap=False, size=1,
-                     state='Ready', faulted='false', operation='None',
-                     lunid=1, pool_name='unit_test_pool',
-                     wwn='fake_wwn'):
-        return ("""
-               LOGICAL UNIT NUMBER %(lunid)s
-               Name:  %(name)s
-               UID:  %(wwn)s
-               Current Owner:  SP A
-               Default Owner:  SP A
-               Allocation Owner:  SP A
-               Attached Snapshot: %(has_snap)s
-               User Capacity (Blocks):  2101346304
-               User Capacity (GBs):  %(size)d
-               Consumed Capacity (Blocks):  2149576704
-               Consumed Capacity (GBs):  1024.998
-               Pool Name:  %(pool_name)s
-               Current State:  %(state)s
-               Status:  OK(0x0)
-               Is Faulted:  %(faulted)s
-               Is Transitioning:  false
-               Current Operation:  %(operation)s
-               Current Operation State:  N/A
-               Current Operation Status:  N/A
-               Current Operation Percent Completed:  0
-               Is Thin LUN:  %(is_thin)s""" % {
-            'lunid': lunid,
-            'name': name,
-            'has_snap': 'FakeSnap' if has_snap else 'N/A',
-            'size': size,
-            'pool_name': pool_name,
-            'state': state,
-            'faulted': faulted,
-            'operation': operation,
-            'is_thin': 'Yes' if is_thin else 'No',
-            'wwn': wwn}, 0)
-
-    def STORAGE_GROUP_ISCSI_FC_HBA(self, sgname):
-
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         4
-        Host name:             fakehost
-        SPPort:                A-4v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-          22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56   SP B         2
-        Host name:             fakehost2
-        SPPort:                B-2v0
-        Initiator IP:          N/A
-        TPGT:                  0
-        ISID:                  N/A
-
-          22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16   SP B         2
-        Host name:             fakehost2
-        SPPort:                B-2v0
-        Initiator IP:          N/A
-        TPGT:                  0
-        ISID:                  N/A
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            1               1
-        Shareable:             YES""" % sgname, 0)
-
-    def STORAGE_GROUP_NO_MAP(self, sgname):
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        Shareable:             YES""" % sgname, 0)
-
-    def STORAGE_GROUP_HAS_MAP(self, sgname):
-
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         4
-        Host name:             fakehost
-        SPPort:                A-4v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            1               1
-        Shareable:             YES""" % sgname, 0)
-
-    def STORAGE_GROUP_HAS_MAP_ISCSI(self, sgname):
-
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         2
-        Host name:             fakehost
-        SPPort:                A-2v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-          iqn.1993-08.org.debian:01:222                     SP A         0
-        Host name:             fakehost
-        SPPort:                A-0v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-          iqn.1993-08.org.debian:01:222                     SP B         2
-        Host name:             fakehost
-        SPPort:                B-2v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            1               1
-        Shareable:             YES""" % sgname, 0)
-
-    def STORAGE_GROUP_HAS_MAP_MP(self, sgname):
-
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         4
-        Host name:             fakehost
-        SPPort:                A-4v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-          iqn.1993-08.org.debian:01:222                     SP A         5
-        Host name:             fakehost
-        SPPort:                A-5v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            1               1
-        Shareable:             YES""" % sgname, 0)
-
-    def STORAGE_GROUP_HAS_MAP_2(self, sgname):
-
-        return ("""\
-        Storage Group Name:    %s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         4
-        Host name:             fakehost
-        SPPort:                A-4v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            1               1
-            2               3
-        Shareable:             YES""" % sgname, 0)
-
-    def POOL_FEATURE_INFO_POOL_LUNS_CMD(self):
-        cmd = ('storagepool', '-feature', '-info',
-               '-maxPoolLUNs', '-numPoolLUNs')
-        return cmd
-
-    def POOL_FEATURE_INFO_POOL_LUNS(self, max, total):
-        return (('Max. Pool LUNs:  %s\n' % max) +
-                ('Total Number of Pool LUNs:  %s\n' % total), 0)
-
-    def STORAGE_GROUPS_HAS_MAP(self, sgname1, sgname2):
-
-        return ("""
-
-        Storage Group Name:    irrelative
-        Storage Group UID:     9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:5741c6307e60            SP A         6
-        Host name:             fakehost
-        SPPort:                A-6v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        Storage Group Name:    %(sgname1)s
-        Storage Group UID:     54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:222                     SP A         4
-        Host name:             fakehost
-        SPPort:                A-4v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            31              3
-            41              4
-        Shareable:             YES
-
-        Storage Group Name:    %(sgname2)s
-        Storage Group UID:     9C:86:4F:30:07:76:E4:11:AC:83:C8:C0:8E:9C:D6:1F
-        HBA/SP Pairs:
-
-          HBA UID                                          SP Name     SPPort
-          -------                                          -------     ------
-          iqn.1993-08.org.debian:01:5741c6307e60            SP A         6
-        Host name:             fakehost
-        SPPort:                A-6v0
-        Initiator IP:          fakeip
-        TPGT:                  3
-        ISID:                  fakeid
-
-        HLU/ALU Pairs:
-
-          HLU Number     ALU Number
-          ----------     ----------
-            32              3
-            42              4
-        Shareable:             YES""" % {'sgname1': sgname1,
-                                         'sgname2': sgname2}, 0)
-
-    def LUN_DELETE_IN_SG_ERROR(self, up_to_date=True):
-        if up_to_date:
-            return ("Cannot unbind LUN "
-                    "because it's contained in a Storage Group",
-                    156)
-        else:
-            return ("SP B: Request failed.  "
-                    "Host LUN/LUN mapping still exists.",
-                    0)
-
-    def set_path_cmd(self, gname, hba, sp, spport, vport=None, ip=None):
-        if vport is None:
-            return ('storagegroup', '-setpath', '-gname', gname,
-                    '-hbauid', hba,
-                    '-sp', sp, '-spport', spport,
-                    '-ip', ip, '-host', gname, '-o')
-        return ('storagegroup', '-setpath', '-gname', gname,
-                '-hbauid', hba,
-                '-sp', sp, '-spport', spport, '-spvport', vport,
-                '-ip', ip, '-host', gname, '-o')
-
-    @staticmethod
-    def convert_snapshot(snapshot, expected_attrs=['volume']):
-        if expected_attrs:
-            snapshot = snapshot.copy()
-            snapshot['volume'] = fake_volume.fake_volume_obj(
-                None, **snapshot['volume'])
-        snap = fake_snapshot.fake_snapshot_obj(
-            None, expected_attrs=expected_attrs, **snapshot)
-        return snap
-
-    @staticmethod
-    def convert_volume(volume):
-        vol = fake_volume.fake_volume_obj(
-            None, **volume)
-        return vol
-
-
-class DriverTestCaseBase(test.TestCase):
-    def setUp(self):
-        super(DriverTestCaseBase, self).setUp()
-
-        self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute',
-                       self.fake_command_execute_for_driver_setup)
-        self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial',
-                       mock.Mock(return_value={'array_serial':
-                                               'fake_serial'}))
-        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
-
-        self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
-        self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
-
-        self.configuration = conf.Configuration(None)
-        self.configuration.append_config_values = mock.Mock(return_value=0)
-        self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
-        self.configuration.san_ip = '10.0.0.1'
-        self.configuration.storage_vnx_pool_name = 'unit_test_pool'
-        self.configuration.san_login = 'sysadmin'
-        self.configuration.san_password = 'sysadmin'
-        self.configuration.initiator_auto_registration = True
-        self.configuration.check_max_pool_luns_threshold = False
-        self.stubs.Set(self.configuration, 'safe_get',
-                       self.fake_safe_get({'storage_vnx_pool_names':
-                                           'unit_test_pool',
-                                           'volume_backend_name':
-                                           'namedbackend'}))
-        self.stubs.Set(eventlet, 'sleep', mock.Mock())
-        self.testData = EMCVNXCLIDriverTestData()
-
-        self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
-            '-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
-        self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
-        self.configuration.ignore_pool_full_threshold = False
-
-    def driverSetup(self, commands=tuple(), results=tuple()):
-        self.driver = self.generate_driver(self.configuration)
-        fake_command_execute = self.get_command_execute_simulator(
-            commands, results)
-        fake_cli = mock.Mock(side_effect=fake_command_execute)
-        self.driver.cli._client.command_execute = fake_cli
-        return fake_cli
-
-    def generate_driver(self, conf):
-        raise NotImplementedError
-
-    def get_command_execute_simulator(self, commands=tuple(),
-                                      results=tuple()):
-        assert(len(commands) == len(results))
-
-        def fake_command_execute(*args, **kwargv):
-            for i in range(len(commands)):
-                if args == commands[i]:
-                    if isinstance(results[i], list):
-                        if len(results[i]) > 0:
-                            ret = results[i][0]
-                            del results[i][0]
-                            return ret
-                    else:
-                        return results[i]
-            return self.standard_fake_command_execute(*args, **kwargv)
-        return fake_command_execute
-
-    def standard_fake_command_execute(self, *args, **kwargv):
-        standard_commands = [
-            self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-            self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-            self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)),
-            self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
-            self.testData.LUN_PROPERTY_ALL_CMD(fake.SNAPSHOT_NAME),
-            self.testData.POOL_PROPERTY_CMD]
-
-        standard_results = [
-            self.testData.LUN_PROPERTY(fake.VOLUME_NAME),
-            self.testData.LUN_PROPERTY(fake.VOLUME2_NAME),
-            self.testData.LUN_PROPERTY(
-                build_migration_dest_name(fake.VOLUME2_NAME)),
-            self.testData.LUN_PROPERTY('vol-vol1'),
-            self.testData.LUN_PROPERTY(fake.SNAPSHOT_NAME),
-            self.testData.POOL_PROPERTY]
-
-        standard_default = SUCCEED
-        for i in range(len(standard_commands)):
-            if args == standard_commands[i]:
-                return standard_results[i]
-
-        return standard_default
-
-    def fake_command_execute_for_driver_setup(self, *command, **kwargv):
-        if (command == ('connection', '-getport', '-address', '-vlanid') or
-                command == ('connection', '-getport', '-vlanid')):
-            return self.testData.ALL_PORTS
-        elif command == ('storagepool', '-list', '-state'):
-            return self.testData.POOL_GET_STATE_RESULT([
-                {'pool_name': self.testData.test_pool_name, 'state': "Ready"},
-                {'pool_name': "unit_test_pool2", 'state': "Ready"}])
-        if command == self.testData.GETFCPORT_CMD():
-            return self.testData.FC_PORTS
-        else:
-            return SUCCEED
-
-    def fake_safe_get(self, values):
-        def _safe_get(key):
-            return values.get(key)
-        return _safe_get
-
-
-@ddt.ddt
-class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
-    def setUp(self):
-        super(EMCVNXCLIDriverISCSITestCase, self).setUp()
-
-    def generate_driver(self, conf):
-        return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_create_destroy_volume_without_extra_spec(self):
-        fake_cli = self.driverSetup()
-        self.driver.create_volume(self.testData.test_volume)
-        self.driver.delete_volume(self.testData.test_volume)
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'thick', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME))]
-
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_create_volume_ignore_thresholds(self):
-        self.configuration.ignore_pool_full_threshold = True
-        fake_cli = self.driverSetup()
-        self.driver.create_volume(self.testData.test_volume)
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'thick', None,
-                ignore_thresholds=True, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False)]
-
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
-    def test_create_volume_compressed(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'compressed', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=True),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
-                1))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        'oslo_service.loopingcall.FixedIntervalLoopingCall',
-        new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'provisioning:type': 'thin',
-                                'storagetype:provisioning': 'thick'}))
-    def test_create_volume_thin(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'thin', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        'oslo_service.loopingcall.FixedIntervalLoopingCall',
-        new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'provisioning:type': 'thick'}))
-    def test_create_volume_thick(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, False),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'thick', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'compressed',
-                                'storagetype:tiering': 'HighestAvailable'}))
-    def test_create_volume_compressed_tiering_highestavailable(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'compressed', 'highestavailable', poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=True),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
-                1))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
-    def test_create_volume_deduplicated(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'deduplicated', None, poll=False))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
-    def test_create_volume_tiering_auto(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-
-        # verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                None, 'auto', poll=False))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:tiering': 'Auto',
-                                'storagetype:provisioning': 'Deduplicated'}))
-    def test_create_volume_deduplicated_tiering_auto(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-        self.driverSetup(commands, results)
-        ex = self.assertRaises(
-            exception.VolumeBackendAPIException,
-            self.driver.create_volume,
-            self.testData.test_volume_with_type)
-        self.assertTrue(
-            re.match(r".*deduplicated and auto tiering can't be both enabled",
-                     ex.msg))
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'Compressed'}))
-    def test_create_volume_compressed_no_enabler(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   ('No package', 0)]
-        self.driverSetup(commands, results)
-        ex = self.assertRaises(
-            exception.VolumeBackendAPIException,
-            self.driver.create_volume,
-            self.testData.test_volume_with_type)
-        self.assertTrue(
-            re.match(r".*Compression Enabler is not installed",
-                     ex.msg))
-
-    def test_get_volume_stats(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True)]
-        self.driverSetup(commands, results)
-        stats = self.driver.get_volume_stats(True)
-
-        self.assertEqual(VERSION, stats['driver_version'],
-                         "driver_version is incorrect")
-        self.assertEqual('iSCSI', stats['storage_protocol'],
-                         "storage_protocol is incorrect")
-        self.assertEqual("EMC", stats['vendor_name'],
-                         "vendor name is incorrect")
-        self.assertEqual("namedbackend", stats['volume_backend_name'],
-                         "volume backend name is incorrect")
-
-        pool_stats = stats['pools'][0]
-
-        expected_pool_stats = {
-            'free_capacity_gb': 3105.303,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool|fake_serial',
-            'total_capacity_gb': 3281.146,
-            'provisioned_capacity_gb': 536.14,
-            'compression_support': 'True',
-            'deduplication_support': 'True',
-            'thin_provisioning_support': True,
-            'thick_provisioning_support': True,
-            'max_over_subscription_ratio': 20.0,
-            'consistencygroup_support': 'True',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'pool_name': 'unit_test_pool',
-            'fast_cache_enabled': True,
-            'fast_support': 'True'}
-
-        self.assertEqual(expected_pool_stats, pool_stats)
-
-    def test_get_volume_stats_ignore_threshold(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True)]
-        self.driverSetup(commands, results)
-        self.driver.cli.ignore_pool_full_threshold = True
-        stats = self.driver.get_volume_stats(True)
-
-        pool_stats = stats['pools'][0]
-        self.assertEqual(2, pool_stats['reserved_percentage'])
-
-    def test_get_volume_stats_reserved_percentage_from_conf(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True)]
-        self.configuration.reserved_percentage = 22
-        self.driverSetup(commands, results)
-        self.driver.cli.ignore_pool_full_threshold = True
-        stats = self.driver.get_volume_stats(True)
-
-        pool_stats = stats['pools'][0]
-        self.assertEqual(22, pool_stats['reserved_percentage'])
-
-    def test_get_volume_stats_too_many_luns(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True),
-                    self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True),
-                   self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000)]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.cli.check_max_pool_luns_threshold = True
-        stats = self.driver.get_volume_stats(True)
-        pool_stats = stats['pools'][0]
-        self.assertEqual(0, pool_stats['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-        expect_cmd = [
-            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-        self.driver.cli.check_max_pool_luns_threshold = False
-        stats = self.driver.get_volume_stats(True)
-        pool_stats = stats['pools'][0]
-        self.assertIsNotNone(stats['driver_version'],
-                             "driver_version is not returned")
-        self.assertEqual(3105.303, pool_stats['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(return_value={'lun_id': 1,
-                                        'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration_timeout(self):
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1)]
-        FAKE_ERROR_MSG = """\
-A network error occurred while trying to connect: '10.244.213.142'.
-Message : Error occurred because connection refused. \
-Unable to establish a secure connection to the Management Server.
-"""
-        FAKE_MIGRATE_PROPERTY = """\
-Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-Source LU ID:  63950
-Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-Dest LU ID:  136
-Migration Rate:  high
-Current State:  MIGRATED
-Percent Complete:  100
-Time Remaining:  0 second(s)
-"""
-        results = [(FAKE_ERROR_MSG, 255),
-                   [(FAKE_MIGRATE_PROPERTY, 0),
-                   (FAKE_MIGRATE_PROPERTY, 0),
-                   ('The specified source LUN is not currently migrating',
-                    23)]]
-        fake_cli = self.driverSetup(commands, results)
-        fakehost = {'capabilities': {'location_info':
-                                     'unit_test_pool2|fake_serial',
-                                     'storage_protocol': 'iSCSI'}}
-        ret = self.driver.migrate_volume(None, self.testData.test_volume,
-                                         fakehost)[0]
-        self.assertTrue(ret)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False),
-                      mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                                poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 1, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration(self):
-
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1)]
-        FAKE_MIGRATE_PROPERTY = """\
-Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-Source LU ID:  63950
-Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-Dest LU ID:  136
-Migration Rate:  high
-Current State:  MIGRATED
-Percent Complete:  100
-Time Remaining:  0 second(s)
-"""
-        results = [SUCCEED,
-                   [(FAKE_MIGRATE_PROPERTY, 0),
-                    ('The specified source LUN is not '
-                     'currently migrating', 23)]]
-        fake_cli = self.driverSetup(commands, results)
-        fake_host = {'capabilities': {'location_info':
-                                      'unit_test_pool2|fake_serial',
-                                      'storage_protocol': 'iSCSI'}}
-        ret = self.driver.migrate_volume(None, self.testData.test_volume,
-                                         fake_host)[0]
-        self.assertTrue(ret)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(return_value={'lun_id': 1,
-                                        'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration_with_rate(self):
-
-        test_volume_asap = self.testData.test_volume.copy()
-        test_volume_asap.update({'metadata': {'migrate_rate': 'asap'}})
-        commands = [self.testData.MIGRATION_CMD(rate="asap"),
-                    self.testData.MIGRATION_VERIFY_CMD(1)]
-        FAKE_MIGRATE_PROPERTY = """\
-Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-Source LU ID:  63950
-Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-Dest LU ID:  136
-Migration Rate:  ASAP
-Current State:  MIGRATED
-Percent Complete:  100
-Time Remaining:  0 second(s)
-"""
-        results = [SUCCEED,
-                   [(FAKE_MIGRATE_PROPERTY, 0),
-                    ('The specified source LUN is not '
-                     'currently migrating', 23)]]
-        fake_cli = self.driverSetup(commands, results)
-        fake_host = {'capabilities': {'location_info':
-                                      'unit_test_pool2|fake_serial',
-                                      'storage_protocol': 'iSCSI'}}
-        ret = self.driver.migrate_volume(None, test_volume_asap,
-                                         fake_host)[0]
-        self.assertTrue(ret)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(rate='asap'),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 5, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
-    def test_volume_migration_02(self):
-
-        commands = [self.testData.MIGRATION_CMD(5, 5),
-                    self.testData.MIGRATION_VERIFY_CMD(5)]
-        FAKE_MIGRATE_PROPERTY = """\
-Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-Source LU ID:  63950
-Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-Dest LU ID:  136
-Migration Rate:  high
-Current State:  MIGRATED
-Percent Complete:  100
-Time Remaining:  0 second(s)
-"""
-        results = [SUCCEED,
-                   [(FAKE_MIGRATE_PROPERTY, 0),
-                    ('The specified source LUN is not currently migrating',
-                     23)]]
-        fake_cli = self.driverSetup(commands, results)
-        fakehost = {'capabilities': {'location_info':
-                                     'unit_test_pool2|fake_serial',
-                                     'storage_protocol': 'iSCSI'}}
-        ret = self.driver.migrate_volume(None, self.testData.test_volume5,
-                                         fakehost)[0]
-        self.assertTrue(ret)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(5),
-                                poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 1, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration_failed(self):
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1),
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [FAKE_ERROR_RETURN,
-                   ('The specified source LUN is not currently migrating',
-                    23),
-                   self.testData.LIST_LUN_1_ALL(1, 'fake_wwn')]
-        fake_cli = self.driverSetup(commands, results)
-        fakehost = {'capabilities': {'location_info':
-                                     'unit_test_pool2|fake_serial',
-                                     'storage_protocol': 'iSCSI'}}
-        self.assertRaisesRegex(exception.EMCVnxCLICmdError,
-                               r'LUN is not currently migrating',
-                               self.driver.migrate_volume,
-                               None, self.testData.test_volume,
-                               fakehost)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True,
-                                poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 1, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration_sp_down_success(self):
-        FAKE_ERROR_MSG = """\
-Error occurred during HTTP request/response from the target: '10.244.213.142'.
-Message : HTTP/1.1 503 Service Unavailable"""
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1),
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [(FAKE_ERROR_MSG, 255),
-                   ('The specified source LUN is not '
-                    'currently migrating', 23),
-                   self.testData.LIST_LUN_1_ALL(wwn='new_wwn')]
-        fake_cli = self.driverSetup(commands, results)
-        fakehost = {'capabilities': {'location_info':
-                                     'unit_test_pool2|fake_serial',
-                                     'storage_protocol': 'iSCSI'}}
-        ret = self.driver.migrate_volume(None, self.testData.test_volume,
-                                         fakehost)[0]
-        self.assertTrue(ret)
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True,
-                                poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 1, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    def test_volume_migration_stopped(self):
-
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1),
-                    self.testData.MIGRATION_CANCEL_CMD(1)]
-
-        results = [SUCCEED, [(self.testData.MIGRATE_PROPERTY_MIGRATING, 0),
-                             (self.testData.MIGRATE_PROPERTY_STOPPED, 0),
-                             ('The specified source LUN is not '
-                              'currently migrating', 23)],
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        fake_host = {'capabilities': {'location_info':
-                                      'unit_test_pool2|fake_serial',
-                                      'storage_protocol': 'iSCSI'}}
-
-        self.assertRaisesRegex(exception.VolumeBackendAPIException,
-                               "Migration of LUN 1 has been stopped or"
-                               " faulted.",
-                               self.driver.migrate_volume,
-                               None, self.testData.test_volume, fake_host)
-
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False),
-                      mock.call(*self.testData.MIGRATION_CANCEL_CMD(1)),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
-                "CommandLineHelper.create_lun_by_cmd",
-                mock.Mock(
-                    return_value={'lun_id': 1, 'state': 'Ready',
-                                  'operation': 'None',
-                                  'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            side_effect=[1, 1]))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:tiering': 'Auto'}))
-    def test_volume_migration_smp(self):
-
-        commands = [self.testData.MIGRATION_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1)]
-        FAKE_MIGRATE_PROPERTY = """\
-Source LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
-Source LU ID:  63950
-Dest LU Name:  volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
-Dest LU ID:  136
-Migration Rate:  high
-Current State:  MIGRATED
-Percent Complete:  100
-Time Remaining:  0 second(s)
-"""
-        results = [SUCCEED,
-                   [(FAKE_MIGRATE_PROPERTY, 0),
-                    ('The specified source LUN is not '
-                     'currently migrating', 23)]]
-        fake_cli = self.driverSetup(commands, results)
-        fake_host = {'capabilities': {'location_info':
-                                      'unit_test_pool2|fake_serial',
-                                      'storage_protocol': 'iSCSI'}}
-
-        vol = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume)
-        vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
-        vol['volume_metadata'] = [{'key': 'snapcopy', 'value': 'True'}]
-        tmp_snap = "snap-as-vol-%s" % vol['id']
-        ret = self.driver.migrate_volume(None,
-                                         vol,
-                                         fake_host)
-        self.assertTrue(ret[0])
-        self.assertIn('type^lun', ret[1]['provider_location'])
-        # verification
-        expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
-                                retry_disable=True,
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=True),
-                      mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                                poll=False),
-                      mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                                poll=True),
-                      mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
-                                poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_destroy_volume_snapshot(self):
-        fake_cli = self.driverSetup()
-
-        # case
-        self.driver.create_snapshot(self.testData.test_snapshot)
-        self.driver.delete_snapshot(self.testData.test_snapshot)
-
-        # verification
-        expect_cmd = [mock.call(*self.testData.SNAP_CREATE_CMD(
-                                fake.SNAPSHOT_NAME),
-                                poll=False),
-                      mock.call(*self.testData.SNAP_DELETE_CMD(
-                                fake.SNAPSHOT_NAME),
-                                poll=True)]
-
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_snapshot_preparing_volume(self):
-        commands = [self.testData.SNAP_CREATE_CMD(fake.SNAPSHOT_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME)]
-        results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
-                   [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='Preparing'),
-                    self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='Optimizing'),
-                    self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='None')]]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.create_snapshot(self.testData.test_snapshot)
-        expected = [
-            mock.call(*self.testData.SNAP_CREATE_CMD(fake.SNAPSHOT_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.SNAP_CREATE_CMD(fake.SNAPSHOT_NAME),
-                      poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "oslo_concurrency.processutils.execute",
-        mock.Mock(
-            return_value=(
-                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection(self):
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.PING_OK]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        connection_info = self.driver.initialize_connection(
-            self.testData.test_volume,
-            self.testData.connector)
-
-        self.assertEqual(self.testData.iscsi_connection_info,
-                         connection_info)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call(*self.testData.set_path_cmd(
-                              'fakehost', 'iqn.1993-08.org.debian:01:222', 'A',
-                              4, 0, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                              'fakehost', 'iqn.1993-08.org.debian:01:222',
-                              'A', 0, 0, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                              'fakehost', 'iqn.1993-08.org.debian:01:222',
-                              'B', 2, 0, '10.0.0.2')),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
-                                                          '10.0.0.2'))]
-        fake_cli.assert_has_calls(expected)
-
-        # Test for manual registration
-        self.configuration.initiator_auto_registration = False
-
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
-                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
-        results = [
-            [("No group", 83),
-             self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-            ('', 0),
-            self.testData.PING_OK
-        ]
-        fake_cli = self.driverSetup(commands, results)
-        test_volume_rw = self.testData.test_volume_rw
-        connection_info = self.driver.initialize_connection(
-            test_volume_rw,
-            self.testData.connector)
-
-        self.assertEqual(self.testData.iscsi_connection_info,
-                         connection_info)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-connecthost',
-                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o', poll=False),
-                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
-                                                          '10.0.0.2'))]
-        fake_cli.assert_has_calls(expected)
-
-        # Test No Ping
-        self.configuration.iscsi_initiators = None
-
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost')]
-        results = [
-            [("No group", 83),
-             self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-            ('', 0)]
-        fake_cli = self.driverSetup(commands, results)
-        test_volume_rw = self.testData.test_volume_rw.copy()
-        test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
-        connection_info = self.driver.initialize_connection(
-            test_volume_rw,
-            self.testData.connector)
-
-        self.assertEqual(self.testData.iscsi_connection_info,
-                         connection_info)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-connecthost',
-                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o', poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
-                'CommandLineHelper.ping_node',
-                mock.Mock(return_value=True))
-    @mock.patch('random.shuffle', mock.Mock(return_value=0))
-    def test_initialize_connection_multipath(self):
-        self.configuration.initiator_auto_registration = False
-
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
-        results = [self.testData.STORAGE_GROUP_HAS_MAP_MP('fakehost')]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.iscsi_targets = {
-            'A': [
-                {'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
-                 'SP': 'A',
-                 'Port ID': 4,
-                 'Virtual Port ID': 0,
-                 'IP Address': '10.244.214.118'},
-                {'Port WWN': 'iqn.1992-04.com.emc:cx.fnm00124000215.a5',
-                 'SP': 'A',
-                 'Port ID': 5,
-                 'Virtual Port ID': 0,
-                 'IP Address': '10.244.214.119'}],
-            'B': []}
-        test_volume_rw = self.testData.test_volume_rw.copy()
-        test_volume_rw['provider_location'] = 'system^fakesn|type^lun|id^1'
-        connector_m = dict(self.testData.connector)
-        connector_m['multipath'] = True
-        connection_info = self.driver.initialize_connection(
-            test_volume_rw,
-            connector_m)
-
-        self.assertEqual(self.testData.iscsi_connection_info_mp,
-                         connection_info)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o', poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "oslo_concurrency.processutils.execute",
-        mock.Mock(
-            return_value=(
-                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            return_value=3))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_exist(self):
-        """Test if initialize connection exists.
-
-        A LUN is added to the SG right before the attach,
-        it may not exists in the first SG query
-        """
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        self.configuration.max_luns_per_storage_group = 2
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
-                     '-gname', 'fakehost', '-o'),
-                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
-        results = [[self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')],
-                   ("fakeerror", 23),
-                   self.testData.PING_OK]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        iscsi_data = self.driver.initialize_connection(
-            self.testData.test_volume,
-            self.testData.connector
-        )
-        self.assertEqual(2, iscsi_data['data']['target_lun'],
-                         "iSCSI initialize connection returned wrong HLU")
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 3,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
-                                                          '10.0.0.2'))]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_iscsi_white_list(self):
-        self.configuration.io_port_list = 'a-0-0,B-2-0'
-        test_volume = self.testData.test_volume.copy()
-        test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')]]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.iscsi_targets = {'A': [{'SP': 'A', 'Port ID': 0,
-                                                'Virtual Port ID': 0,
-                                                'Port WWN': 'fake_iqn',
-                                                'IP Address': '192.168.1.1'}],
-                                         'B': [{'SP': 'B', 'Port ID': 2,
-                                                'Virtual Port ID': 0,
-                                                'Port WWN': 'fake_iqn1',
-                                                'IP Address': '192.168.1.2'}]}
-        self.driver.initialize_connection(
-            test_volume,
-            self.testData.connector)
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call(*self.testData.set_path_cmd(
-                              'fakehost', 'iqn.1993-08.org.debian:01:222',
-                              'A', 0, 0, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                              'fakehost', 'iqn.1993-08.org.debian:01:222',
-                              'B', 2, 0, '10.0.0.2')),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
-                'EMCVnxCliBase._build_pool_stats',
-                mock.Mock(return_value=None))
-    @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
-                'CommandLineHelper.get_pool',
-                mock.Mock(return_value={'total_capacity_gb': 0.0,
-                                        'free_capacity_gb': 0.0}))
-    def test_update_iscsi_io_ports(self):
-        self.configuration.io_port_list = 'a-0-0,B-2-0'
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.GETPORT_CMD()]
-        results = [self.testData.WHITE_LIST_PORTS]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.update_volume_stats()
-        expected = [mock.call(*self.testData.GETPORT_CMD(), poll=False)]
-        fake_cli.assert_has_calls(expected)
-        io_ports = self.driver.cli.iscsi_targets
-        self.assertEqual((0, 'iqn.1992-04.com.emc:cx.fnmxxx.a0'),
-                         (io_ports['A'][0]['Port ID'],
-                          io_ports['A'][0]['Port WWN']))
-        self.assertEqual((2, 'iqn.1992-04.com.emc:cx.fnmxxx.b2'),
-                         (io_ports['B'][0]['Port ID'],
-                          io_ports['B'][0]['Port WWN']))
-
-    @mock.patch(
-        "oslo_concurrency.processutils.execute",
-        mock.Mock(
-            return_value=(
-                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            return_value=4))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_no_hlu_left_1(self):
-        """Test initialize connection with no hlu per first SG query.
-
-        There is no hlu per the first SG query
-        But there are hlu left after the full poll
-        """
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        self.configuration.max_luns_per_storage_group = 2
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    ('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
-                     '-gname', 'fakehost', '-o'),
-                    self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
-        results = [[self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   ("", 0),
-                   self.testData.PING_OK]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        iscsi_data = self.driver.initialize_connection(
-            self.testData.test_volume,
-            self.testData.connector)
-        self.assertEqual(2, iscsi_data['data']['target_lun'],
-                         "iSCSI initialize connection returned wrong HLU")
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 4,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
-                                                          u'10.0.0.2'))]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "oslo_concurrency.processutils.execute",
-        mock.Mock(
-            return_value=(
-                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(
-            return_value=4))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_no_hlu_left_2(self):
-        """Test initialize connection with no hlu left."""
-        # Test for auto registration
-        self.configuration.initiator_auto_registration = True
-        self.configuration.max_luns_per_storage_group = 2
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost')]
-        results = [
-            [self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost'),
-             self.testData.STORAGE_GROUP_HAS_MAP_2('fakehost')]
-        ]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.initialize_connection,
-                          self.testData.test_volume,
-                          self.testData.connector)
-        expected = [
-            mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                      poll=False),
-            mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                      poll=True),
-        ]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('os.path.exists', return_value=True)
-    def test_terminate_connection(self, _mock_exists):
-
-        self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
-            configuration=self.configuration)
-        cli_helper = self.driver.cli._client
-        data = {'storage_group_name': "fakehost",
-                'storage_group_uid': "2F:D4:00:00:00:00:00:"
-                "00:00:00:FF:E5:3A:03:FD:6D",
-                'lunmap': {1: 16, 2: 88, 3: 47}}
-        cli_helper.get_storage_group = mock.Mock(
-            return_value=data)
-        lun_info = {'lun_name': "unit_test_lun",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
-        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
-        self.driver.terminate_connection(self.testData.test_volume,
-                                         self.testData.connector)
-        cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
-            16, self.testData.connector["host"])
-
-    def test_create_volume_cli_failed(self):
-        commands = [self.testData.LUN_CREATION_CMD(
-            fake.VOLUME4_NAME, 1, 'unit_test_pool', None, None, poll=False)]
-        results = [FAKE_ERROR_RETURN]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.create_volume,
-                          self.testData.test_failed_volume)
-        expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
-            fake.VOLUME4_NAME, 1, 'unit_test_pool', None, None, poll=False))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_create_faulted_volume(self):
-        volume_name = 'faulted_volume'
-        cmd_create = self.testData.LUN_CREATION_CMD(
-            volume_name, 1, 'unit_test_pool', None, None, poll=False)
-        cmd_list_preparing = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
-        commands = [cmd_create, cmd_list_preparing]
-        results = [SUCCEED,
-                   [self.testData.LUN_PROPERTY(name=volume_name,
-                                               state='Faulted',
-                                               faulted='true',
-                                               operation='Preparing'),
-                    self.testData.LUN_PROPERTY(name=volume_name,
-                                               state='Faulted',
-                                               faulted='true',
-                                               operation='None')]]
-        fake_cli = self.driverSetup(commands, results)
-        faulted_volume = self.testData.test_volume.copy()
-        faulted_volume.update({'name': volume_name})
-        self.driver.create_volume(faulted_volume)
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                volume_name, 1, 'unit_test_pool', None, None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(volume_name),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_create_offline_volume(self):
-        volume_name = 'offline_volume'
-        cmd_create = self.testData.LUN_CREATION_CMD(
-            volume_name, 1, 'unit_test_pool', None, None, poll=False)
-        cmd_list = self.testData.LUN_PROPERTY_ALL_CMD(volume_name)
-        commands = [cmd_create, cmd_list]
-        results = [SUCCEED,
-                   self.testData.LUN_PROPERTY(name=volume_name,
-                                              state='Offline',
-                                              faulted='true')]
-        self.driverSetup(commands, results)
-        offline_volume = self.testData.test_volume.copy()
-        offline_volume.update({'name': volume_name})
-        self.assertRaisesRegex(exception.VolumeBackendAPIException,
-                               "Volume %s was created in VNX, but in"
-                               " Offline state." % volume_name,
-                               self.driver.create_volume,
-                               offline_volume)
-
-    def test_create_volume_snapshot_failed(self):
-        test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_snapshot2)
-
-        commands = [self.testData.SNAP_CREATE_CMD(test_snapshot.name)]
-        results = [FAKE_ERROR_RETURN]
-        fake_cli = self.driverSetup(commands, results)
-        # case
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.create_snapshot,
-                          test_snapshot)
-        # verification
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_CREATE_CMD(test_snapshot.name),
-                poll=False)]
-
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @ddt.data('high', 'asap', 'low', 'medium')
-    def test_create_volume_from_snapshot(self, migrate_rate):
-        test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_snapshot)
-        test_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume2)
-        test_volume.metadata = {'migrate_rate': migrate_rate}
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(test_volume.name))
-        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(test_volume.name))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name(test_volume.name))
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate)
-        output_migrate = ("", 0)
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
-                    cmd_migrate_verify]
-        results = [output_dest, output_dest, output_migrate,
-                   output_migrate_verify]
-        fake_cli1 = self.driverSetup(commands, results)
-        self.driver.create_volume_from_snapshot(test_volume,
-                                                test_snapshot)
-        expect_cmd1 = [
-            mock.call(*self.testData.SNAP_MP_CREATE_CMD(
-                      name=test_volume.name, source=test_snapshot.volume_name),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(test_volume.name),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=test_volume.name, snapName=test_snapshot.name)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(test_volume.name),
-                1, 'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                      build_migration_dest_name(test_volume.name)),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                      build_migration_dest_name(test_volume.name)),
-                      poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True)]
-        fake_cli1.assert_has_calls(expect_cmd1)
-
-        self.configuration.ignore_pool_full_threshold = True
-        fake_cli2 = self.driverSetup(commands, results)
-        self.driver.create_volume_from_snapshot(test_volume,
-                                                test_snapshot)
-        expect_cmd2 = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(test_volume.name), 1,
-                'unit_test_pool', None, None,
-                ignore_thresholds=True))]
-        fake_cli2.assert_has_calls(expect_cmd2)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'provisioning:type': 'thick'}))
-    def test_create_volume_from_snapshot_smp(self):
-        fake_cli = self.driverSetup()
-        test_snap = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_snapshot)
-        new_volume = self.testData.test_volume_with_type.copy()
-        new_volume['name_id'] = new_volume['id']
-        vol = self.driver.create_volume_from_snapshot(
-            new_volume, test_snap)
-        self.assertIn('type^smp', vol['provider_location'])
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_COPY_CMD(
-                    src_snap=test_snap.name,
-                    snap_name='snap-as-vol-%s' % test_snap.volume.id)),
-            mock.call(
-                *self.testData.SNAP_MODIFY_CMD(
-                    name='snap-as-vol-%s' % test_snap.volume.id,
-                    rw='yes')),
-            mock.call(
-                *self.testData.SNAP_MP_CREATE_CMD(
-                    name=new_volume['name'], source=test_snap.volume_name),
-                poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(new_volume['name']),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=new_volume['name'],
-                    snapName='snap-as-vol-%s' % test_snap.volume.id))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_create_volume_from_snapshot_sync_failed(self):
-
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name('vol2'))
-        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name('vol2'))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name('vol2'))
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
-        cmd_detach_lun = ('lun', '-detach', '-name', fake.VOLUME2_NAME, '-o')
-        output_migrate = ("", 0)
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        cmd_migrate_cancel = self.testData.MIGRATION_CANCEL_CMD(1)
-        output_migrate_cancel = ("", 0)
-
-        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
-                    cmd_migrate_verify, cmd_migrate_cancel]
-        results = [output_dest, output_dest, output_migrate,
-                   [FAKE_ERROR_RETURN, output_migrate_verify],
-                   output_migrate_cancel]
-
-        fake_cli = self.driverSetup(commands, results)
-        new_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume2)
-        src_snapshot = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          new_volume, src_snapshot)
-
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_MP_CREATE_CMD(
-                    name=fake.VOLUME2_NAME, source=fake.VOLUME_NAME),
-                poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=fake.VOLUME2_NAME, snapName=src_snapshot.name)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME), 1,
-                'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME)),
-                      poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_DELETE_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME))),
-            mock.call(*cmd_detach_lun),
-            mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME2_NAME))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_vol_from_snap_failed_in_migrate_lun(self):
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name('vol2'))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name('vol2'))
-        output_not_migrating = self.testData.LUN_NOT_IN_MIGRATING
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
-        cmd_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        cmd_detach_lun = ('lun', '-detach', '-name', fake.VOLUME2_NAME, '-o')
-        commands = [cmd_dest, cmd_migrate, cmd_verify,
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [output_dest, FAKE_ERROR_RETURN, output_not_migrating,
-                   self.testData.LIST_LUN_1_ALL(1)]
-        fake_cli = self.driverSetup(commands, results)
-        src_vol = self.testData.test_volume2
-        test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_snapshot)
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.create_volume_from_snapshot,
-                          src_vol,
-                          test_snapshot)
-        expect_cmd = [
-            mock.call(*self.testData.SNAP_MP_CREATE_CMD(
-                      name=fake.VOLUME2_NAME, source=fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-                      poll=True),
-            mock.call(*self.testData.SNAP_ATTACH_CMD(
-                      name=fake.VOLUME2_NAME, snapName=test_snapshot.name)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME), 1,
-                      'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME)),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME)),
-                      poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      poll=True,
-                      retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1), poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1), poll=True),
-            mock.call(*self.testData.LUN_DELETE_CMD(
-                      build_migration_dest_name(fake.VOLUME2_NAME))),
-            mock.call(*cmd_detach_lun),
-            mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME2_NAME))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @ddt.data('high', 'asap', 'low', 'medium')
-    def test_create_cloned_volume(self, migrate_rate):
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(fake.VOLUME2_NAME))
-        cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(fake.VOLUME2_NAME))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name(fake.VOLUME2_NAME))
-        cmd_clone = self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME)
-        output_clone = self.testData.LUN_PROPERTY(fake.VOLUME2_NAME)
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate)
-        output_migrate = ("", 0)
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [cmd_dest, cmd_dest_p, cmd_clone, cmd_migrate,
-                    cmd_migrate_verify]
-        results = [output_dest, output_dest, output_clone, output_migrate,
-                   output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
-
-        volume = self.testData.test_volume.copy()
-        volume['id'] = fake.VOLUME2_ID
-        volume = EMCVNXCLIDriverTestData.convert_volume(volume)
-        # Make sure this size is used
-        volume.size = 10
-        volume.metadata = {'migrate_rate': migrate_rate}
-        self.driver.create_cloned_volume(volume, self.testData.test_volume)
-        tmp_snap = 'tmp-snap-' + volume.id
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_CREATE_CMD(tmp_snap), poll=False),
-            mock.call(*self.testData.SNAP_MP_CREATE_CMD(
-                name=fake.VOLUME2_NAME,
-                source=fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=fake.VOLUME2_NAME, snapName=tmp_snap)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME), 10,
-                'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)), poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1, rate=migrate_rate),
-                      poll=True,
-                      retry_disable=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                      poll=True),
-            mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
-                      poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'provisioning:type': 'thick'}))
-    def test_create_cloned_volume_smp(self):
-        fake_cli = self.driverSetup()
-        test_clone = self.testData.test_clone.copy()
-        test_clone['name_id'] = test_clone['id']
-        vol = self.driver.create_cloned_volume(
-            test_clone,
-            self.testData.test_volume_with_type)
-        self.assertIn('type^smp', vol['provider_location'])
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_CREATE_CMD(
-                    name='snap-as-vol-%s' % fake.VOLUME2_ID),
-                poll=False),
-            mock.call(
-                *self.testData.SNAP_MP_CREATE_CMD(
-                    name=fake.VOLUME2_NAME, source=fake.VOLUME_NAME),
-                poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=fake.VOLUME2_NAME,
-                    snapName='snap-as-vol-%s' % fake.VOLUME2_ID))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_delete_volume_failed(self):
-        commands = [self.testData.LUN_DELETE_CMD(fake.VOLUME4_NAME)]
-        results = [FAKE_ERROR_RETURN]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.delete_volume,
-                          self.testData.test_failed_volume)
-        expected = [mock.call(*self.testData.LUN_DELETE_CMD(
-            fake.VOLUME4_NAME))]
-        fake_cli.assert_has_calls(expected)
-
-    def test_delete_volume_in_sg_failed(self):
-        commands = [self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_DELETE_CMD(fake.VOLUME2_NAME)]
-        results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
-                   self.testData.LUN_DELETE_IN_SG_ERROR(False)]
-        self.driverSetup(commands, results)
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.delete_volume,
-                          self.testData.test_volume1_in_sg)
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.delete_volume,
-                          self.testData.test_volume2_in_sg)
-
-    def test_delete_volume_in_sg_force(self):
-        commands = [self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME),
-                    self.testData.STORAGEGROUP_LIST_CMD(),
-                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
-                                                             '41'),
-                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost1',
-                                                             '42'),
-                    self.testData.LUN_DELETE_CMD(fake.VOLUME2_NAME),
-                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
-                                                             '31'),
-                    self.testData.STORAGEGROUP_REMOVEHLU_CMD('fakehost2',
-                                                             '32')]
-        results = [[self.testData.LUN_DELETE_IN_SG_ERROR(),
-                    SUCCEED],
-                   self.testData.STORAGE_GROUPS_HAS_MAP('fakehost1',
-                                                        'fakehost2'),
-                   SUCCEED,
-                   SUCCEED,
-                   [self.testData.LUN_DELETE_IN_SG_ERROR(False),
-                    SUCCEED],
-                   SUCCEED,
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.force_delete_lun_in_sg = True
-        self.driver.delete_volume(self.testData.test_volume1_in_sg)
-        self.driver.delete_volume(self.testData.test_volume2_in_sg)
-        expected = [mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME)),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
-                              poll=True),
-                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
-                        'fakehost1', '41'), poll=False),
-                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
-                        'fakehost2', '42'), poll=False),
-                    mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME)),
-                    mock.call(*self.testData.LUN_DELETE_CMD(
-                        fake.VOLUME2_NAME)),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD(),
-                              poll=True),
-                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
-                        'fakehost1', '31'), poll=False),
-                    mock.call(*self.testData.STORAGEGROUP_REMOVEHLU_CMD(
-                        'fakehost2', '32'), poll=False),
-                    mock.call(*self.testData.LUN_DELETE_CMD(
-                        fake.VOLUME2_NAME))]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
-    def test_delete_volume_smp(self):
-        fake_cli = self.driverSetup()
-        vol = self.testData.test_volume_with_type.copy()
-        vol['metadata'] = [{'key': 'snapcopy', 'value': 'True'}]
-        vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
-        vol['name_id'] = vol['id']
-        tmp_snap = 'snap-as-vol-%s' % vol['id']
-        self.driver.delete_volume(vol)
-        expected = [mock.call(*self.testData.LUN_DELETE_CMD(vol['name'])),
-                    mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
-                              poll=True)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_extend_volume(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME)]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=2)]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.extend_volume(self.testData.test_volume, 2)
-        expected = [
-            mock.call(*self.testData.LUN_EXTEND_CMD(fake.VOLUME_NAME, 2),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    def test_extend_volume_has_snapshot(self):
-        commands = [self.testData.LUN_EXTEND_CMD(fake.VOLUME4_NAME, 2)]
-        results = [FAKE_ERROR_RETURN]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.extend_volume,
-                          self.testData.test_failed_volume,
-                          2)
-        expected = [
-            mock.call(*self.testData.LUN_EXTEND_CMD(fake.VOLUME4_NAME, 2),
-                      poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_extend_volume_failed(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME4_NAME)]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME4_NAME, size=2)]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli._client.timeout = 0
-
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.driver.extend_volume,
-                          self.testData.test_failed_volume,
-                          3)
-        expected = [
-            mock.call(
-                *self.testData.LUN_EXTEND_CMD(fake.VOLUME4_NAME, 3),
-                poll=False),
-            mock.call(
-                *self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME4_NAME),
-                poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_extend_preparing_volume(self):
-        commands = [self.testData.LUN_EXTEND_CMD(fake.VOLUME_NAME, 2),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME)]
-        results = [[self.testData.LUN_PREP_ERROR(), SUCCEED],
-                   [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='Preparing'),
-                    self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='Optimizing'),
-                    self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=1,
-                                               operation='None'),
-                    self.testData.LUN_PROPERTY(fake.VOLUME_NAME, size=2)]]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.extend_volume(self.testData.test_volume, 2)
-        expected = [
-            mock.call(*self.testData.LUN_EXTEND_CMD(
-                fake.VOLUME_NAME, 2), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_EXTEND_CMD(
-                fake.VOLUME_NAME, 2), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={}))
-    def test_manage_existing(self):
-        data = self.testData
-        test_volume = data.test_volume_with_type
-        lun_rename_cmd = data.LUN_RENAME_CMD(
-            '1', test_volume['name'])
-        lun_list_cmd = data.LUN_LIST_ALL_CMD('1')
-
-        commands = (lun_rename_cmd, lun_list_cmd)
-        results = (SUCCEED, data.LIST_LUN_1_ALL())
-
-        self.configuration.storage_vnx_pool_name = (
-            self.testData.test_pool_name)
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.manage_existing(
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={}))
-    def test_manage_existing_source_name(self):
-        data = self.testData
-        test_volume = data.test_volume_with_type
-        lun_rename_cmd = data.LUN_RENAME_CMD(
-            '1', test_volume['name'])
-        lun_list_cmd = data.LUN_LIST_ALL_CMD('1')
-
-        commands = (lun_rename_cmd, lun_list_cmd)
-        results = (SUCCEED, data.LIST_LUN_1_ALL())
-
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.manage_existing(
-            data.test_volume_with_type,
-            data.test_existing_ref_source_name)
-        expected = [mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={
-            'storagetype:provisioning': 'compressed',
-            'compression_support': 'True'}))
-    @mock.patch("time.time", mock.Mock(return_value=123))
-    def test_manage_existing_success_retype_with_migration(self):
-        data = self.testData
-        test_volume = EMCVNXCLIDriverTestData.convert_volume(
-            data.test_volume_with_type)
-        test_volume.metadata = {}
-        test_volume.provider_location = build_provider_location(
-            1, 'lun', test_volume.name)
-
-        lun_rename_cmd = data.LUN_RENAME_CMD(
-            '1', test_volume['name'])
-        lun_list_cmd = data.LUN_LIST_ALL_CMD('1')
-        snap_existing_cmd = data.SNAP_LIST_CMD('1')
-        new_lun_name = test_volume['name'] + '-123'
-        lun_create_cmd = data.LUN_CREATION_CMD(
-            new_lun_name,
-            1,
-            'unit_test_pool',
-            'compressed')
-        lun3_status_cmd = data.LUN_PROPERTY_ALL_CMD(new_lun_name)
-        compression_cmd = data.ENABLE_COMPRESSION_CMD(3)
-        lun1_status_cmd = data.LUN_PROPERTY_ALL_CMD(test_volume['name'])
-        migration_cmd = data.MIGRATION_CMD(1, 3)
-        migration_verify_cmd = data.MIGRATION_VERIFY_CMD(1)
-
-        commands = (lun_list_cmd,
-                    snap_existing_cmd,
-                    lun_create_cmd,
-                    lun3_status_cmd,
-                    compression_cmd,
-                    lun1_status_cmd,
-                    migration_cmd,
-                    migration_verify_cmd,
-                    lun_rename_cmd)
-
-        cmd_success = ('', 0)
-        migrate_verify = ('The specified source LUN '
-                          'is not currently migrating', 23)
-        lun3_status = data.LUN_PROPERTY(new_lun_name, lunid=3)
-        lun1_status = data.LUN_PROPERTY(test_volume['name'], lunid=1)
-        results = (data.LIST_LUN_1_ALL(),
-                   ('no snap', 1023),
-                   cmd_success,
-                   lun3_status,
-                   cmd_success,
-                   lun1_status,
-                   cmd_success,
-                   migrate_verify,
-                   cmd_success)
-
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.manage_existing(
-            test_volume,
-            {'source-id': 1})
-
-        expected = [mock.call(*lun_list_cmd, poll=False),
-                    mock.call(*snap_existing_cmd, poll=False),
-                    mock.call(*lun_create_cmd),
-                    mock.call(*lun3_status_cmd, poll=False),
-                    mock.call(*lun3_status_cmd, poll=False),
-                    mock.call(*lun3_status_cmd, poll=True),
-                    mock.call(*compression_cmd),
-                    mock.call(*migration_cmd, poll=True, retry_disable=True),
-                    mock.call(*migration_verify_cmd, poll=True),
-                    mock.call(*self.testData.LUN_PROPERTY_BY_ID(
-                        3), poll=True),
-                    mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={
-            'storagetype:provisioning': 'thick',
-            'storagetype:tiering': 'nomovement'}))
-    @mock.patch("time.time", mock.Mock(return_value=1))
-    def test_manage_existing_success_retype_change_tier(self):
-        data = self.testData
-        test_volume = data.test_volume_with_type
-        lun_rename_cmd = data.LUN_RENAME_CMD(
-            '1', test_volume['name'])
-        lun_list_cmd = data.LUN_LIST_ALL_CMD('1')
-        lun_tier_cmd = data.LUN_MODIFY_TIER(data.test_lun_id,
-                                            'optimizePool',
-                                            'noMovement')
-
-        commands = (lun_rename_cmd,
-                    lun_list_cmd,
-                    lun_tier_cmd)
-
-        cmd_success = ('', 0)
-
-        results = (cmd_success,
-                   data.LIST_LUN_1_ALL(),
-                   cmd_success)
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.manage_existing(
-            data.test_volume_with_type,
-            {'source-id': 1})
-
-        expected = [mock.call(*lun_list_cmd, poll=False),
-                    mock.call(*lun_tier_cmd),
-                    mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={}))
-    def test_manage_existing_lun_in_another_pool(self):
-        data = self.testData
-        get_lun_cmd = ('lun', '-list', '-l', data.test_lun_id,
-                       '-state', '-userCap', '-owner',
-                       '-attachedSnapshot', '-poolName')
-        lun_list_cmd = data.LUN_LIST_SPECS_CMD(data.test_lun_id)
-        invalid_pool_name = "fake_pool"
-        commands = (get_lun_cmd, lun_list_cmd)
-        lun_properties = data.LUN_PROPERTY('lun_name',
-                                           pool_name=invalid_pool_name)
-        results = (lun_properties, (data.LIST_LUN_1_SPECS, 0))
-
-        self.configuration.storage_vnx_pool_name = invalid_pool_name
-        fake_cli = self.driverSetup(commands, results)
-        # mock the command executor
-        ex = self.assertRaises(
-            exception.ManageExistingInvalidReference,
-            self.driver.manage_existing_get_size,
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        self.assertTrue(
-            re.match(r'.*not managed by the host',
-                     ex.msg))
-        expected = [mock.call(*get_lun_cmd, poll=True)]
-        fake_cli.assert_has_calls(expected)
-
-    def test_manage_existing_get_size(self):
-        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-userCap', '-owner',
-                       '-attachedSnapshot', '-poolName')
-        test_size = 2
-        commands = [get_lun_cmd]
-        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
-
-        self.configuration.storage_vnx_pool_name = (
-            self.testData.test_pool_name)
-        fake_cli = self.driverSetup(commands, results)
-
-        get_size = self.driver.manage_existing_get_size(
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*get_lun_cmd, poll=True)]
-        self.assertEqual(test_size, get_size)
-        fake_cli.assert_has_calls(expected)
-        # Test the function with invalid reference.
-        invaild_ref = {'fake': 'fake_ref'}
-        self.assertRaises(exception.ManageExistingInvalidReference,
-                          self.driver.manage_existing_get_size,
-                          self.testData.test_volume_with_type,
-                          invaild_ref)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1, 'state': 'Ready',
-                                'operation': 'None',
-                                'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "time.time",
-        mock.Mock(return_value=123456))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'compressed'}))
-    def test_retype_compressed_to_deduplicated(self):
-        """Unit test for retype compressed to deduplicated."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:provsioning': ('compressed',
-                                                  'deduplicated')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                         'deduplicated'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
-
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(),
-                    cmd_migrate_verify,
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023),
-                   output_migrate_verify,
-                   self.testData.LIST_LUN_1_ALL(1, 'new_wwn')]
-        fake_cli1 = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-        src_vol = self.testData.test_volume3
-        self.driver.retype(None, src_vol,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-        expect_cmd1 = [
-            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME3_NAME + '-123456',
-                2, 'unit_test_pool', 'deduplicated', None)),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                      poll=True)]
-        fake_cli1.assert_has_calls(expect_cmd1)
-
-        self.configuration.ignore_pool_full_threshold = True
-        fake_cli2 = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        self.driver.retype(None, self.testData.test_volume3,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-        expect_cmd2 = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME3_NAME + '-123456',
-                2, 'unit_test_pool', 'deduplicated', None,
-                ignore_thresholds=True))]
-        fake_cli2.assert_has_calls(expect_cmd2)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1}))
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "time.time",
-        mock.Mock(return_value=123456))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(side_effect=[{'provisioning:type': 'thin'},
-                               {'provisioning:type': 'thick'}]))
-    def test_retype_turn_on_compression_and_autotiering(self):
-        """Unit test for retype a volume to compressed and auto tiering."""
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                         'compressed',
-                                         'storagetype:tiering': 'auto'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {'host': 'host@backendsec#unit_test_pool',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-        # Retype a thin volume to a compressed volume
-        self.driver.retype(None, self.testData.test_volume3,
-                           new_type_data, None, host_test_data)
-        expect_cmd = [
-            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
-            mock.call(*self.testData.MODIFY_TIERING_CMD(fake.VOLUME3_NAME,
-                                                        'auto'))
-        ]
-        fake_cli.assert_has_calls(expect_cmd)
-
-        # Retype a thick volume to a compressed volume
-        self.driver.retype(None, self.testData.test_volume3,
-                           new_type_data, None, host_test_data)
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1}))
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "time.time",
-        mock.Mock(return_value=123456))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'provisioning:type': 'thin'}))
-    def test_retype_turn_on_compression_volume_has_snap(self):
-        """Unit test for retype a volume which has snap to compressed."""
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                         'compressed'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {'host': 'host@backendsec#unit_test_pool',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('Has snap', 0)]
-        self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-        # Retype a thin volume which has a snap to a compressed volume
-        retyped = self.driver.retype(None, self.testData.test_volume3,
-                                     new_type_data, None, host_test_data)
-        self.assertFalse(retyped,
-                         "Retype should failed due to "
-                         "the volume has snapshot")
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1, 'state': 'Ready',
-                                'operation': 'None',
-                                'wwn': 'fake_wwn'}))
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "time.time",
-        mock.Mock(return_value=123456))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_retype_pool_changed_dedup_to_compressed_auto(self):
-        """Test retype from dedup to compressed and auto tiering.
-
-        Unit test for retype dedup to compressed and auto tiering
-        and pool changed
-        """
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:provsioning': ('deduplicated',
-                                                  'compressed'),
-                      'storagetype:tiering': (None, 'auto'),
-                      'storagetype:pool': ('unit_test_pool',
-                                           'unit_test_pool2')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                             'compressed',
-                                         'storagetype:tiering': 'auto',
-                                         'storagetype:pool':
-                                             'unit_test_pool2'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {'host':
-                          'ubuntu-server12@pool_backend_1#unit_test_pool2',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool2|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
-
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1),
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023),
-                   ('The specified source LUN is not currently migrating', 23),
-                   self.testData.LIST_LUN_1_ALL(1, 'new_wwn')]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-        src_vol = self.testData.test_volume3
-        self.driver.retype(None, src_vol,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-        expect_cmd = [
-            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME3_NAME + '-123456', 2, 'unit_test_pool2',
-                'compressed', 'auto')),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
-            mock.call(*self.testData.MIGRATION_CMD(),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                      poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'compressed',
-                                'storagetype:pool': 'unit_test_pool',
-                                'storagetype:tiering': 'auto'}))
-    def test_retype_compressed_auto_to_compressed_nomovement(self):
-        """Unit test for retype only tiering changed."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:tiering': ('auto', 'nomovement')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                             'compressed',
-                                         'storagetype:tiering': 'nomovement',
-                                         'storagetype:pool':
-                                             'unit_test_pool'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {
-            'host': 'host@backendsec#unit_test_pool',
-            'capabilities': {
-                'location_info': 'unit_test_pool|FNM00124500890',
-                'volume_backend_name': 'pool_backend_1',
-                'storage_protocol': 'iSCSI'}}
-
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        self.driver.retype(None, self.testData.test_volume3,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-        expect_cmd = [
-            mock.call(
-                'lun', '-modify', '-name', fake.VOLUME3_NAME,
-                '-o', '-initialTier',
-                'optimizePool', '-tieringPolicy', 'noMovement')]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'thin',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_retype_compressed_to_thin_cross_array(self):
-        """Unit test for retype cross array."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:provsioning': ('compressed', 'thin')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning': 'thin',
-                                         'storagetype:pool':
-                                             'unit_test_pool'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {
-            'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
-            'capabilities':
-                {'location_info': 'unit_test_pool|FNM00124500891',
-                 'volume_backend_name': 'pool_backend_2',
-                 'storage_protocol': 'iSCSI'}}
-
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023)]
-        self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        retyped = self.driver.retype(None, self.testData.test_volume3,
-                                     new_type_data, diff_data,
-                                     host_test_data)
-        self.assertFalse(retyped,
-                         "Retype should failed due to"
-                         " different protocol or array")
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1, 'state': 'Ready',
-                                'operation': 'None',
-                                'wwn': 'fake_wwn'}))
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "time.time",
-        mock.Mock(return_value=123456))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'thin',
-                                'storagetype:tiering': 'auto',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_retype_thin_auto_to_dedup_diff_protocol(self):
-        """Unit test for retype different protocol."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:provsioning': ('thin', 'deduplicated'),
-                      'storagetype:tiering': ('auto', None)}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning':
-                                             'deduplicated',
-                                         'storagetype:pool':
-                                             'unit_test_pool'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {
-            'host': 'ubuntu-server12@pool_backend_2#unit_test_pool',
-            'capabilities':
-                {'location_info': 'unit_test_pool|FNM00124500890',
-                 'volume_backend_name': 'pool_backend_2',
-                 'storage_protocol': 'FC'}}
-
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(),
-                    self.testData.MIGRATION_VERIFY_CMD(1),
-                    self.testData.LUN_PROPERTY_BY_ID(1)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023),
-                   ('The specified source LUN is not currently migrating', 23),
-                   self.testData.LIST_LUN_1_ALL(1, 'new_wwn')]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-        src_vol = self.testData.test_volume3
-        self.driver.retype(None, src_vol,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-        expect_cmd = [
-            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME3_NAME + '-123456',
-                2, 'unit_test_pool', 'deduplicated', None)),
-            mock.call(*self.testData.MIGRATION_CMD(),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1),
-                      poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1, 'state': 'Ready',
-                                'operation': 'None'}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'thin',
-                                'storagetype:tiering': 'auto',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
-        """Unit test for retype volume has snap when need migration."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'storagetype:provsioning': ('thin', None),
-                      'storagetype:tiering': ('auto', 'highestAvailable')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:tiering':
-                                             'highestAvailable',
-                                         'storagetype:pool':
-                                             'unit_test_pool'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {
-            'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
-            'capabilities':
-                {'location_info': 'unit_test_pool|FNM00124500890',
-                 'volume_backend_name': 'pool_backend_1',
-                 'storage_protocol': 'iSCSI'}}
-
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD()]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('Has snap', 0)]
-        self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        retyped = self.driver.retype(None, self.testData.test_volume3,
-                                     new_type_data,
-                                     diff_data,
-                                     host_test_data)
-        self.assertFalse(retyped,
-                         "Retype should failed due to"
-                         " different protocol or array")
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
-        mock.Mock(return_value=1))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1, 'state': 'Ready',
-                                'operation': 'None'}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'thin',
-                                'storagetype:tiering': 'auto',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_retype_thin_auto_to_thin_auto(self):
-        """Unit test for retype volume which has no change."""
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs': {}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:tiering':
-                                             'auto',
-                                         'storagetype:provisioning':
-                                             'thin'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {
-            'host': 'ubuntu-server12@pool_backend_1#unit_test_pool',
-            'capabilities':
-                {'location_info': 'unit_test_pool|FNM00124500890',
-                 'volume_backend_name': 'pool_backend_1',
-                 'storage_protocol': 'iSCSI'}}
-
-        commands = [self.testData.NDU_LIST_CMD]
-        results = [self.testData.NDU_LIST_RESULT]
-        self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        self.driver.retype(None, self.testData.test_volume3,
-                           new_type_data,
-                           diff_data,
-                           host_test_data)
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
-        "migrate_lun_with_verification",
-        mock.Mock(return_value=True))
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper."
-        "create_lun_with_advance_feature",
-        mock.Mock(return_value={'lun_id': '1',
-                                'wwn': 'fake_wwn'}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'thin',
-                                'copytype:snap': 'true'}))
-    def test_retype_copytype_snap_true_to_false(self):
-        diff_data = {'encryption': {}, 'qos_specs': {},
-                     'extra_specs':
-                     {'copytype:snap': ('true',
-                                        'false')}}
-
-        new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
-                         'deleted': False,
-                         'extra_specs': {'storagetype:provisioning': 'thin',
-                                         'copytype:snap': 'false'},
-                         'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
-
-        host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
-                          'capabilities':
-                          {'location_info': 'unit_test_pool|FNM00124500890',
-                           'volume_backend_name': 'pool_backend_1',
-                           'storage_protocol': 'iSCSI'}}
-
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.SNAP_LIST_CMD(),
-                    cmd_migrate_verify]
-        results = [self.testData.NDU_LIST_RESULT,
-                   ('No snap', 1023),
-                   output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
-            return_value={'array_serial': "FNM00124500890"})
-
-        vol = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume3)
-        vol['provider_location'] = 'system^FNM11111|type^smp|id^1'
-        vol['volume_metadata'] = [{'key': 'snapcopy', 'value': 'True'}]
-        tmp_snap = 'snap-as-vol-%s' % vol['id']
-        ret = self.driver.retype(None, vol,
-                                 new_type_data,
-                                 diff_data,
-                                 host_test_data)
-        self.assertEqual(tuple, type(ret))
-        self.assertTrue(ret[0])
-        self.assertIn('type^lun', ret[1]['provider_location'])
-        expect_cmd = [
-            mock.call(*self.testData.SNAP_LIST_CMD(), poll=False),
-            mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap),
-                      poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'fast_cache_enabled': 'True'}))
-    def test_create_volume_with_fastcache(self):
-        """Test creating volume with fastcache enabled."""
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    ]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_PROPERTY_W_FASTCACHE,
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   ]
-        fake_cli = self.driverSetup(commands, results)
-
-        lun_info = {'lun_name': fake.VOLUME_NAME,
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready",
-                    'status': 'OK(0x0)',
-                    'operation': 'None'
-                    }
-
-        cli_helper = self.driver.cli._client
-        cli_helper.command_execute = fake_cli
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
-        cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
-        cli_helper.get_pool_list = mock.Mock(return_value=[{
-            'lun_nums': 1000,
-            'total_capacity_gb': 10,
-            'free_capacity_gb': 5,
-            'provisioned_capacity_gb': 8,
-            'pool_name': "unit_test_pool",
-            'fast_cache_enabled': 'True',
-            'state': 'Ready',
-            'pool_full_threshold': 70.0}])
-
-        self.driver.update_volume_stats()
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        pool_stats = self.driver.cli.stats['pools'][0]
-        self.assertEqual('True', pool_stats['fast_cache_enabled'])
-        expect_cmd = [
-            mock.call('connection', '-getport', '-address', '-vlanid',
-                      poll=False),
-            mock.call('-np', 'lun', '-create', '-capacity',
-                      1, '-sq', 'gb', '-poolName',
-                      self.testData.test_pool_name,
-                      '-name', fake.VOLUME_NAME, '-type', 'NonThin')]
-
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_get_lun_id_provider_location_exists(self):
-        """Test function get_lun_id."""
-        self.driverSetup()
-        volume_01 = {
-            'name': 'vol_01',
-            'size': 1,
-            'volume_name': 'vol_01',
-            'id': fake.VOLUME_ID,
-            'name_id': fake.VOLUME_ID,
-            'provider_location': 'system^FNM11111|type^lun|id^4',
-            'project_id': fake.PROJECT_ID,
-            'display_name': 'vol_01',
-            'display_description': 'test volume',
-            'volume_type_id': None}
-        self.assertEqual(4, self.driver.cli.get_lun_id(volume_01))
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 2}))
-    def test_get_lun_id_provider_location_has_no_lun_id(self):
-        """Test function get_lun_id."""
-        self.driverSetup()
-        volume_02 = {
-            'name': fake.VOLUME2_NAME,
-            'size': 1,
-            'volume_name': fake.VOLUME2_NAME,
-            'id': fake.VOLUME2_ID,
-            'provider_location': 'system^FNM11111|type^lun|',
-            'project_id': fake.PROJECT_ID,
-            'display_name': 'vol_02',
-            'display_description': 'test volume',
-            'volume_type_id': None}
-        self.assertEqual(2, self.driver.cli.get_lun_id(volume_02))
-
-    def test_create_consistency_group(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
-                    self.testData.GET_CG_BY_NAME_CMD(cg_name)]
-        results = [SUCCEED, self.testData.CG_PROPERTY(cg_name)]
-        fake_cli = self.driverSetup(commands, results)
-
-        model_update = self.driver.create_consistencygroup(
-            None, self.testData.test_cg)
-        self.assertDictMatch({'status': (
-            fields.ConsistencyGroupStatus.AVAILABLE)}, model_update)
-        expect_cmd = [
-            mock.call(
-                *self.testData.CREATE_CONSISTENCYGROUP_CMD(
-                    cg_name), poll=False),
-            mock.call(
-                *self.testData.GET_CG_BY_NAME_CMD(cg_name))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_consistency_group_retry(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name),
-                    self.testData.GET_CG_BY_NAME_CMD(cg_name)]
-        results = [SUCCEED,
-                   [self.testData.CG_NOT_FOUND(),
-                    self.testData.CG_PROPERTY(cg_name)]]
-        fake_cli = self.driverSetup(commands, results)
-        model_update = self.driver.create_consistencygroup(
-            None, self.testData.test_cg)
-        self.assertDictMatch({'status': (
-            fields.ConsistencyGroupStatus.AVAILABLE)}, model_update)
-        expect_cmd = [
-            mock.call(
-                *self.testData.CREATE_CONSISTENCYGROUP_CMD(
-                    cg_name), poll=False),
-            mock.call(
-                *self.testData.GET_CG_BY_NAME_CMD(cg_name)),
-            mock.call(
-                *self.testData.GET_CG_BY_NAME_CMD(cg_name))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch(
-        "cinder.volume.volume_types.get_volume_type_extra_specs",
-        mock.Mock(side_effect=[{'storagetype:provisioning': 'thin'},
-                               {'storagetype:provisioning': 'compressed'}]))
-    def test_create_consistency_group_failed_with_compression(self):
-        self.driverSetup([], [])
-        self.assertRaisesRegex(exception.VolumeBackendAPIException,
-                               "Failed to create consistency group " +
-                               fake.CONSISTENCY_GROUP_ID +
-                               " because VNX consistency group cannot "
-                               "accept compressed LUNs as members.",
-                               self.driver.create_consistencygroup,
-                               None,
-                               self.testData.test_cg_with_type)
-
-    def test_delete_consistency_group(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name),
-                    self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME)]
-        results = [SUCCEED, SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.delete_consistencygroup(
-            None, self.testData.test_cg,
-            self.testData.CONSISTENCY_GROUP_VOLUMES())
-        expect_cmd = [
-            mock.call(
-                *self.testData.DELETE_CONSISTENCYGROUP_CMD(
-                    cg_name)),
-            mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME)),
-            mock.call(*self.testData.LUN_DELETE_CMD(fake.VOLUME_NAME))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_cgsnapshot(self):
-        cgsnapshot = self.testData.test_cgsnapshot['id']
-        cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
-        commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
-                    self.testData.GET_SNAP(cgsnapshot)]
-        results = [SUCCEED,
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        snapshot_obj = fake_snapshot.fake_snapshot_obj(
-            self.testData.SNAPS_IN_SNAP_GROUP())
-        snapshot_obj.consistencygroup_id = cg_name
-        cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        self.driver.create_cgsnapshot(None, cgsnap,
-                                      [snapshot_obj])
-        expect_cmd = [
-            mock.call(
-                *self.testData.CREATE_CG_SNAPSHOT(
-                    cg_name, cgsnapshot)),
-            mock.call(
-                *self.testData.GET_SNAP(cgsnapshot))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_cgsnapshot_retry(self):
-        cgsnapshot = self.testData.test_cgsnapshot['id']
-        cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
-        commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot),
-                    self.testData.GET_SNAP(cgsnapshot)]
-        results = [SUCCEED,
-                   [self.testData.SNAP_NOT_EXIST(), SUCCEED]]
-        fake_cli = self.driverSetup(commands, results)
-        snapshot_obj = fake_snapshot.fake_snapshot_obj(
-            self.testData.SNAPS_IN_SNAP_GROUP())
-        snapshot_obj.consistencygroup_id = cg_name
-        cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        self.driver.create_cgsnapshot(None, cgsnap,
-                                      [snapshot_obj])
-        expect_cmd = [
-            mock.call(
-                *self.testData.CREATE_CG_SNAPSHOT(
-                    cg_name, cgsnapshot)),
-            mock.call(
-                *self.testData.GET_SNAP(cgsnapshot)),
-            mock.call(
-                *self.testData.GET_SNAP(cgsnapshot))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_delete_cgsnapshot(self):
-        snap_name = self.testData.test_cgsnapshot['id']
-        commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)]
-        results = [SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        snapshot_obj = fake_snapshot.fake_snapshot_obj(
-            self.testData.SNAPS_IN_SNAP_GROUP())
-        cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
-        snapshot_obj.consistencygroup_id = cg_name
-        cg_snap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        self.driver.delete_cgsnapshot(None,
-                                      cg_snap,
-                                      [snapshot_obj])
-        expect_cmd = [
-            mock.call(
-                *self.testData.DELETE_CG_SNAPSHOT(
-                    snap_name))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    def test_add_volume_to_cg(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1),
-                    ]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.create_volume(self.testData.test_volume_cg)
-
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                None, None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
-                fake.CONSISTENCY_GROUP_ID, 1), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_cloned_volume_from_consistency_group(self):
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(fake.VOLUME_NAME))
-        cmd_dest_p = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name(fake.VOLUME_NAME))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name(fake.VOLUME_NAME))
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
-        output_migrate = ("", 0)
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
-
-        commands = [cmd_dest, cmd_dest_p, cmd_migrate,
-                    cmd_migrate_verify]
-        results = [output_dest, output_dest, output_migrate,
-                   output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
-        test_volume_clone_cg = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_clone_cg)
-        self.driver.create_cloned_volume(test_volume_clone_cg,
-                                         self.testData.test_clone_cg)
-        tmp_cgsnapshot = 'tmp-snap-' + self.testData.test_volume['id']
-        expect_cmd = [
-            mock.call(
-                *self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
-            mock.call(
-                *self.testData.GET_SNAP(tmp_cgsnapshot)),
-            mock.call(*self.testData.SNAP_MP_CREATE_CMD(
-                name=fake.VOLUME_NAME, source=fake.VOLUME2_NAME),
-                poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=fake.VOLUME_NAME, snapName=tmp_cgsnapshot)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(fake.VOLUME_NAME), 1,
-                'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME_NAME)), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME_NAME)), poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True),
-            mock.call(*self.testData.LUN_PROPERTY_BY_ID(1), poll=True),
-            mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_volume_from_cgsnapshot(self):
-        cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name('vol2'))
-        cmd_dest_np = self.testData.LUN_PROPERTY_ALL_CMD(
-            build_migration_dest_name('vol2'))
-        output_dest = self.testData.LUN_PROPERTY(
-            build_migration_dest_name('vol2'))
-        cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
-        output_migrate = ("", 0)
-        cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        commands = [cmd_dest, cmd_dest_np, cmd_migrate,
-                    cmd_migrate_verify]
-        results = [output_dest, output_dest, output_migrate,
-                   output_migrate_verify]
-        fake_cli = self.driverSetup(commands, results)
-        test_snapshot = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_member_cgsnapshot)
-        self.driver.create_volume_from_snapshot(
-            self.testData.volume_in_cg, test_snapshot)
-        expect_cmd = [
-            mock.call(
-                *self.testData.SNAP_MP_CREATE_CMD(
-                    name=fake.VOLUME2_NAME, source=fake.VOLUME_NAME),
-                poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME2_NAME),
-                      poll=True),
-            mock.call(
-                *self.testData.SNAP_ATTACH_CMD(
-                    name=fake.VOLUME2_NAME, snapName=fake.CGSNAPSHOT_ID)),
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME), 1,
-                'unit_test_pool', None, None)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                build_migration_dest_name(fake.VOLUME2_NAME)), poll=False),
-            mock.call(*self.testData.MIGRATION_CMD(1, 1),
-                      retry_disable=True,
-                      poll=True),
-            mock.call(*self.testData.MIGRATION_VERIFY_CMD(1),
-                      poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_update_consistencygroup(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
-        results = [self.testData.CG_PROPERTY(cg_name)]
-        fake_cli = self.driverSetup(commands, results)
-        (model_update, add_vols, remove_vols) = (
-            self.driver.update_consistencygroup(None, self.testData.test_cg,
-                                                self.testData.
-                                                VOLUMES_NOT_IN_CG(),
-                                                self.testData.VOLUMES_IN_CG()))
-        expect_cmd = [
-            mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
-                cg_name, ['4', '5']), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-        self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
-                         model_update['status'])
-
-    def test_update_consistencygroup_remove_all(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
-        results = [self.testData.CG_PROPERTY(cg_name)]
-        fake_cli = self.driverSetup(commands, results)
-
-        (model_update, add_vols, remove_vols) = (
-            self.driver.update_consistencygroup(None, self.testData.test_cg,
-                                                None,
-                                                self.testData.VOLUMES_IN_CG()))
-        expect_cmd = [
-            mock.call(*self.testData.REMOVE_LUNS_FROM_CG_CMD(
-                cg_name, ['1', '3']), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-        self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
-                         model_update['status'])
-
-    def test_update_consistencygroup_remove_not_in_cg(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name)]
-        results = [self.testData.CG_PROPERTY(cg_name)]
-        fake_cli = self.driverSetup(commands, results)
-
-        (model_update, add_vols, remove_vols) = (
-            self.driver.update_consistencygroup(None, self.testData.test_cg,
-                                                None,
-                                                self.testData.
-                                                VOLUMES_NOT_IN_CG()))
-        expect_cmd = [
-            mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
-                cg_name, ['1', '3']), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-        self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
-                         model_update['status'])
-
-    def test_update_consistencygroup_error(self):
-        cg_name = self.testData.test_cg['id']
-        commands = [self.testData.GET_CG_BY_NAME_CMD(cg_name),
-                    self.testData.REPLACE_LUNS_IN_CG_CMD(
-                    cg_name, ['1', '3'])]
-        results = [self.testData.CG_PROPERTY(cg_name),
-                   self.testData.CG_REPL_ERROR()]
-        fake_cli = self.driverSetup(commands, results)
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.update_consistencygroup,
-                          None,
-                          self.testData.test_cg,
-                          [],
-                          self.testData.VOLUMES_NOT_IN_CG())
-        expect_cmd = [
-            mock.call(*self.testData.REPLACE_LUNS_IN_CG_CMD(
-                cg_name, ['1', '3']), poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_consistencygroup_from_cgsnapshot(self):
-        output_migrate_verify = ('The specified source LUN '
-                                 'is not currently migrating.', 23)
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        new_cg.id = fake.CONSISTENCY_GROUP_ID
-        vol1_in_new_cg = self.testData.test_volume_cg.copy()
-        vol1_in_new_cg.update(
-            {'name': fake.VOLUME_NAME,
-             'id': fake.VOLUME_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': None})
-        vol2_in_new_cg = self.testData.test_volume_cg.copy()
-        vol2_in_new_cg.update(
-            {'name': fake.VOLUME2_NAME,
-             'id': fake.VOLUME2_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': None})
-        src_cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-
-        snap1_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_member_cgsnapshot)
-        snap2_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_member_cgsnapshot2)
-        copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
-        td = self.testData
-        commands = [td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name),
-                    td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name),
-                    td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
-                                          self.testData.test_volume['name']),
-                    td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
-                                       copied_snap_name),
-                    td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
-                                        vol1_in_new_cg['size'],
-                                        'unit_test_pool', 'thin', None),
-                    td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
-                    td.MIGRATION_CMD(6231, 1),
-
-                    td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
-                                          self.testData.test_volume2['name']),
-                    td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
-                                       copied_snap_name),
-                    td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
-                                        vol2_in_new_cg['size'],
-                                        'unit_test_pool', 'thin', None),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
-                    td.MIGRATION_CMD(6232, 2),
-
-                    td.MIGRATION_VERIFY_CMD(6231),
-                    td.MIGRATION_VERIFY_CMD(6232),
-                    td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]),
-                    td.DELETE_CG_SNAPSHOT(copied_snap_name)
-                    ]
-        results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED, SUCCEED,
-                   td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
-                                   lunid=1),
-                   td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
-                   SUCCEED, SUCCEED, SUCCEED, SUCCEED,
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
-                                   lunid=2),
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
-                   SUCCEED, output_migrate_verify, output_migrate_verify,
-                   SUCCEED, SUCCEED]
-
-        fake_cli = self.driverSetup(commands, results)
-        cg_model_update, volumes_model_update = (
-            self.driver.create_consistencygroup_from_src(
-                None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
-                cgsnapshot=src_cgsnap, snapshots=[snap1_in_src_cgsnap,
-                                                  snap2_in_src_cgsnap],
-                source_cg=None, source_vols=None))
-        self.assertEqual(2, len(volumes_model_update))
-        self.assertIn('id^%s' % 6231,
-                      volumes_model_update[0]['provider_location'])
-        self.assertIn('id^%s' % 6232,
-                      volumes_model_update[1]['provider_location'])
-
-        expect_cmd = [
-            mock.call(*td.SNAP_COPY_CMD(src_cgsnap['id'], copied_snap_name)),
-            mock.call(*td.ALLOW_READWRITE_ON_SNAP_CMD(copied_snap_name)),
-            mock.call(*td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
-                      self.testData.test_volume['name']),
-                      poll=False),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
-                      poll=True),
-            mock.call(*td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
-                      copied_snap_name)),
-            mock.call(*td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
-                      vol1_in_new_cg['size'],
-                      'unit_test_pool', 'thick', None)),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(
-                      vol1_in_new_cg['name'] + '_dest'), poll=False),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(
-                      vol1_in_new_cg['name'] + '_dest'), poll=False),
-            mock.call(*td.MIGRATION_CMD(6231, 1),
-                      poll=True, retry_disable=True),
-            mock.call(*td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
-                      self.testData.test_volume2['name']),
-                      poll=False),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
-                      poll=True),
-            mock.call(*td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
-                      copied_snap_name)),
-            mock.call(*td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
-                      vol2_in_new_cg['size'],
-                      'unit_test_pool', 'thick', None)),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(
-                      vol2_in_new_cg['name'] + '_dest'), poll=False),
-            mock.call(*td.LUN_PROPERTY_ALL_CMD(
-                      vol2_in_new_cg['name'] + '_dest'), poll=False),
-            mock.call(*td.MIGRATION_CMD(6232, 2),
-                      poll=True, retry_disable=True),
-            mock.call(*td.MIGRATION_VERIFY_CMD(6231), poll=True),
-            mock.call(*td.LUN_PROPERTY_BY_ID(1), poll=True),
-            mock.call(*td.MIGRATION_VERIFY_CMD(6232), poll=True),
-            mock.call(*td.LUN_PROPERTY_BY_ID(2), poll=True),
-            mock.call(*td.CREATE_CONSISTENCYGROUP_CMD(
-                      new_cg['id'], [6231, 6232]), poll=True),
-            mock.call(*td.GET_CG_BY_NAME_CMD(new_cg.id)),
-            mock.call(*td.DELETE_CG_SNAPSHOT(copied_snap_name))]
-        self.assertEqual(expect_cmd, fake_cli.call_args_list)
-
-    def test_create_cg_from_src_failed_without_source(self):
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        vol1_in_new_cg = self.testData.test_volume_cg
-        self.driverSetup()
-        self.assertRaises(
-            exception.InvalidInput,
-            self.driver.create_consistencygroup_from_src,
-            new_cg, [vol1_in_new_cg],
-            None, None, None, None)
-
-    def test_create_cg_from_src_failed_with_multiple_sources(self):
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        vol1_in_new_cg = self.testData.test_volume_cg
-        src_cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        snap1_in_src_cgsnap = fake_snapshot.fake_snapshot_obj(
-            None, **self.testData.test_member_cgsnapshot)
-        src_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        src_cg.id = fake.CONSISTENCY_GROUP_ID
-        vol1_in_src_cg = {'id': fake.VOLUME_ID,
-                          'consistencygroup_id': src_cg.id}
-        self.driverSetup()
-        self.assertRaises(
-            exception.InvalidInput,
-            self.driver.create_consistencygroup_from_src,
-            new_cg, [vol1_in_new_cg],
-            src_cgsnap, [snap1_in_src_cgsnap], src_cg, [vol1_in_src_cg])
-
-    def test_create_cg_from_src_failed_with_invalid_source(self):
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        src_cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        vol1_in_new_cg = self.testData.test_volume_cg
-
-        src_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        src_cg.id = fake.CONSISTENCY_GROUP_ID
-        self.driverSetup()
-        self.assertRaises(
-            exception.InvalidInput,
-            self.driver.create_consistencygroup_from_src,
-            new_cg, [vol1_in_new_cg],
-            src_cgsnap, None, src_cg, None)
-
-    def test_create_cg_from_cgsnapshot_migrate_verify_failed(self):
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        new_cg.id = fake.CONSISTENCY_GROUP_ID
-        vol1_in_new_cg = self.testData.test_volume_cg.copy()
-        vol1_in_new_cg.update(
-            {'name': 'volume-1_in_cg',
-             'id': fake.VOLUME_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': None})
-        vol2_in_new_cg = self.testData.test_volume_cg.copy()
-        vol2_in_new_cg.update(
-            {'name': 'volume-2_in_cg',
-             'id': fake.VOLUME2_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': None})
-        src_cgsnap = fake_cgsnapshot.fake_cgsnapshot_obj(
-            None, **self.testData.test_cgsnapshot)
-        snap1_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_member_cgsnapshot)
-        snap2_in_src_cgsnap = EMCVNXCLIDriverTestData.convert_snapshot(
-            self.testData.test_member_cgsnapshot2)
-        copied_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
-        output_migrate_verify = (r'The specified source LUN '
-                                 'is not currently migrating', 23)
-        td = self.testData
-        commands = [td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
-                    td.MIGRATION_CMD(6231, 1),
-                    td.MIGRATION_VERIFY_CMD(6231),
-                    td.LUN_PROPERTY_BY_ID(1)]
-        results = [td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
-                                   lunid=1),
-                   td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
-                                   lunid=2),
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
-                   FAKE_ERROR_RETURN,
-                   output_migrate_verify,
-                   td.LIST_LUN_1_ALL(2, td.fake_wwn)]
-
-        fake_cli = self.driverSetup(commands, results)
-        self.assertRaises(exception.EMCVnxCLICmdError,
-                          self.driver.create_consistencygroup_from_src,
-                          None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
-                          cgsnapshot=src_cgsnap,
-                          snapshots=[snap1_in_src_cgsnap,
-                                     snap2_in_src_cgsnap],
-                          source_cg=None, source_vols=None)
-
-        expect_cmd = [
-            mock.call(*self.testData.LUN_DELETE_CMD(
-                      vol2_in_new_cg['name'] + '_dest')),
-            mock.call('lun', '-detach', '-name', vol2_in_new_cg['name'], '-o'),
-            mock.call(*self.testData.LUN_DELETE_CMD(vol2_in_new_cg['name'])),
-            mock.call(*self.testData.LUN_DELETE_CMD(
-                      vol1_in_new_cg['name'] + '_dest')),
-            mock.call('lun', '-detach', '-name', vol1_in_new_cg['name'], '-o'),
-            mock.call(*self.testData.LUN_DELETE_CMD(vol1_in_new_cg['name'])),
-            mock.call(*td.SNAP_DELETE_CMD(copied_snap_name), poll=True)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_create_consistencygroup_from_cg(self):
-        output_migrate_verify = ('The specified source LUN '
-                                 'is not currently migrating.', 23)
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        new_cg.id = fake.CONSISTENCY_GROUP2_ID
-        vol1_in_new_cg = self.testData.test_volume_cg.copy()
-        vol1_in_new_cg.update(
-            {'name': 'volume-1_in_cg',
-             'id': fake.VOLUME3_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID,
-             'provider_location': None})
-        vol2_in_new_cg = self.testData.test_volume_cg.copy()
-        vol2_in_new_cg.update(
-            {'name': 'volume-2_in_cg',
-             'id': fake.VOLUME4_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID,
-             'provider_location': None})
-        src_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        src_cg.id = fake.CONSISTENCY_GROUP_ID
-        vol1_in_src_cg = self.testData.test_volume_cg.copy()
-        vol1_in_src_cg.update(
-            {'name': 'volume-1_in_src_cg',
-             'id': fake.VOLUME_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': build_provider_location(
-                 1, 'lun', 'volume-1_in_src_cg')})
-        vol2_in_src_cg = self.testData.test_volume_cg.copy()
-        vol2_in_src_cg.update(
-            {'name': 'volume-2_in_src_cg',
-             'id': fake.VOLUME2_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': build_provider_location(
-                 2, 'lun', 'volume-2_in_src_cg')})
-        temp_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
-        td = self.testData
-        commands = [td.CREATE_CG_SNAPSHOT(src_cg['id'], temp_snap_name),
-                    td.SNAP_MP_CREATE_CMD(vol1_in_new_cg['name'],
-                                          vol1_in_src_cg['name']),
-                    td.SNAP_ATTACH_CMD(vol1_in_new_cg['name'],
-                                       temp_snap_name),
-                    td.LUN_CREATION_CMD(vol1_in_new_cg['name'] + '_dest',
-                                        vol1_in_new_cg['size'],
-                                        'unit_test_pool', 'thin', None),
-                    td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol1_in_new_cg['name']),
-                    td.MIGRATION_CMD(6231, 1),
-
-                    td.SNAP_MP_CREATE_CMD(vol2_in_new_cg['name'],
-                                          vol2_in_src_cg['name']),
-                    td.SNAP_ATTACH_CMD(vol2_in_new_cg['name'],
-                                       temp_snap_name),
-                    td.LUN_CREATION_CMD(vol2_in_new_cg['name'] + '_dest',
-                                        vol2_in_new_cg['size'],
-                                        'unit_test_pool', 'thin', None),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name'] + '_dest'),
-                    td.LUN_PROPERTY_ALL_CMD(vol2_in_new_cg['name']),
-                    td.MIGRATION_CMD(6232, 2),
-
-                    td.MIGRATION_VERIFY_CMD(6231),
-                    td.MIGRATION_VERIFY_CMD(6232),
-                    td.CREATE_CONSISTENCYGROUP_CMD(new_cg['id'], [6231, 6232]),
-                    td.DELETE_CG_SNAPSHOT(temp_snap_name)
-                    ]
-        results = [SUCCEED, SUCCEED, SUCCEED, SUCCEED,
-                   td.LUN_PROPERTY(vol1_in_new_cg['name'] + '_dest',
-                                   lunid=1),
-                   td.LUN_PROPERTY(vol1_in_new_cg['name'], lunid=6231),
-                   SUCCEED, SUCCEED, SUCCEED, SUCCEED,
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'] + '_dest',
-                                   lunid=2),
-                   td.LUN_PROPERTY(vol2_in_new_cg['name'], lunid=6232),
-                   SUCCEED, output_migrate_verify, output_migrate_verify,
-                   SUCCEED, SUCCEED]
-
-        fake_cli = self.driverSetup(commands, results)
-        cg_model_update, volumes_model_update = (
-            self.driver.create_consistencygroup_from_src(
-                None, new_cg, [vol1_in_new_cg, vol2_in_new_cg],
-                cgsnapshot=None, snapshots=None,
-                source_cg=src_cg, source_vols=[vol1_in_src_cg,
-                                               vol2_in_src_cg]))
-        self.assertEqual(2, len(volumes_model_update))
-        self.assertIn('id^%s' % 6231,
-                      volumes_model_update[0]['provider_location'])
-        self.assertIn('id^%s' % 6232,
-                      volumes_model_update[1]['provider_location'])
-
-        delete_temp_snap_cmd = [
-            mock.call(*td.DELETE_CG_SNAPSHOT(temp_snap_name))]
-        fake_cli.assert_has_calls(delete_temp_snap_cmd)
-
-    @mock.patch.object(emc_vnx_cli, 'LOG')
-    @mock.patch.object(emc_vnx_cli.CommandLineHelper,
-                       'delete_cgsnapshot')
-    def test_delete_temp_cgsnapshot_failed_will_not_raise_exception(
-            self, mock_delete_cgsnapshot, mock_logger):
-        temp_snap_name = 'fake_temp'
-        self.driverSetup()
-        mock_delete_cgsnapshot.side_effect = exception.EMCVnxCLICmdError(
-            cmd='fake_cmd', rc=200, out='fake_output')
-        self.driver.cli._delete_temp_cgsnap(temp_snap_name)
-        mock_delete_cgsnapshot.assert_called_once_with(temp_snap_name)
-        self.assertTrue(mock_logger.warning.called)
-
-    @mock.patch.object(emc_vnx_cli.CreateSMPTask, 'execute',
-                       mock.Mock(side_effect=exception.EMCVnxCLICmdError(
-                           cmd='fake_cmd', rc=20, out='fake_output')))
-    @mock.patch.object(emc_vnx_cli.CreateSMPTask, 'revert',
-                       mock.Mock())
-    def test_create_consistencygroup_from_cg_roll_back(self):
-        new_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        new_cg.id = fake.CONSISTENCY_GROUP2_ID
-        vol1_in_new_cg = self.testData.test_volume_cg.copy()
-        vol1_in_new_cg.update(
-            {'name': 'volume-1_in_cg',
-             'id': fake.VOLUME3_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID,
-             'provider_location': None})
-        src_cg = fake_consistencygroup.fake_consistencyobject_obj(
-            None, **self.testData.test_cg)
-        src_cg.id = fake.CONSISTENCY_GROUP_ID
-        vol1_in_src_cg = self.testData.test_volume_cg.copy()
-        vol1_in_src_cg.update(
-            {'name': 'volume-1_in_src_cg',
-             'id': fake.VOLUME_ID,
-             'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
-             'provider_location': build_provider_location(
-                 1, 'lun', 'volume-1_in_src_cg')})
-        temp_snap_name = 'temp_snapshot_for_%s' % new_cg['id']
-        td = self.testData
-        commands = [td.CREATE_CG_SNAPSHOT(src_cg['id'], temp_snap_name),
-                    td.DELETE_CG_SNAPSHOT(temp_snap_name)]
-        results = [SUCCEED, SUCCEED]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        self.assertRaises(
-            exception.EMCVnxCLICmdError,
-            self.driver.create_consistencygroup_from_src,
-            None, new_cg, [vol1_in_new_cg],
-            cgsnapshot=None, snapshots=None,
-            source_cg=src_cg, source_vols=[vol1_in_src_cg])
-
-        rollback_cmd = [
-            mock.call(*td.DELETE_CG_SNAPSHOT(temp_snap_name))]
-        fake_cli.assert_has_calls(rollback_cmd)
-
-    def test_deregister_initiator(self):
-        fake_cli = self.driverSetup()
-        self.driver.cli.destroy_empty_sg = True
-        self.driver.cli.itor_auto_dereg = True
-        cli_helper = self.driver.cli._client
-        data = {'storage_group_name': "fakehost",
-                'storage_group_uid': "2F:D4:00:00:00:00:00:"
-                "00:00:00:FF:E5:3A:03:FD:6D",
-                'lunmap': {1: 16}}
-        cli_helper.get_storage_group = mock.Mock(
-            return_value=data)
-        lun_info = {'lun_name': "unit_test_lun",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
-        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
-        cli_helper.disconnect_host_from_storage_group = mock.Mock()
-        cli_helper.delete_storage_group = mock.Mock()
-        self.driver.terminate_connection(self.testData.test_volume,
-                                         self.testData.connector)
-        expect_cmd = [
-            mock.call('port', '-removeHBA', '-hbauid',
-                      self.testData.connector['initiator'],
-                      '-o')]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_unmanage(self):
-        self.driverSetup()
-        try:
-            self.driver.unmanage(self.testData.test_volume)
-        except NotImplementedError:
-            self.fail('Interface unmanage need to be implemented')
-
-    @mock.patch("random.shuffle", mock.Mock())
-    def test_find_available_iscsi_targets_without_pingnode(self):
-        self.configuration.iscsi_initiators = None
-        self.driverSetup()
-        port_a1 = {'Port WWN': 'fake_iqn_a1',
-                   'SP': 'A',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_a1'}
-        port_a2 = {'Port WWN': 'fake_iqn_a2',
-                   'SP': 'A',
-                   'Port ID': 2,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_a2'}
-        port_b1 = {'Port WWN': 'fake_iqn_b1',
-                   'SP': 'B',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_b1'}
-        all_targets = {'A': [port_a1, port_a2],
-                       'B': [port_b1]}
-        targets = self.driver.cli._client.find_available_iscsi_targets(
-            'fakehost',
-            {('A', 2, 0), ('B', 1, 0)},
-            all_targets)
-        self.assertIn(port_a2, targets)
-        self.assertIn(port_b1, targets)
-
-    @mock.patch.object(emc_vnx_cli.CommandLineHelper,
-                       'ping_node')
-    def test_find_available_iscsi_targets_with_pingnode(self, ping_node):
-        self.configuration.iscsi_initiators = (
-            '{"fakehost": ["10.0.0.2"]}')
-        self.driverSetup()
-        port_a1 = {'Port WWN': 'fake_iqn_a1',
-                   'SP': 'A',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_a1'}
-        port_a2 = {'Port WWN': 'fake_iqn_a2',
-                   'SP': 'A',
-                   'Port ID': 2,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_a2'}
-        port_b1 = {'Port WWN': 'fake_iqn_b1',
-                   'SP': 'B',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_b1'}
-        all_targets = {'A': [port_a1, port_a2],
-                       'B': [port_b1]}
-        ping_node.side_effect = [False, False, True]
-        targets = self.driver.cli._client.find_available_iscsi_targets(
-            'fakehost',
-            {('A', 2, 0), ('A', 1, 0), ('B', 1, 0)},
-            all_targets)
-        self.assertIn(port_a1, targets)
-        self.assertIn(port_a2, targets)
-        self.assertIn(port_b1, targets)
-
-    @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
-                'EMCVnxCliBase.get_lun_owner',
-                mock.Mock(return_value='A'))
-    @mock.patch('cinder.volume.drivers.emc.emc_vnx_cli.'
-                'CommandLineHelper.get_registered_spport_set',
-                mock.Mock())
-    @mock.patch.object(emc_vnx_cli.CommandLineHelper,
-                       'find_available_iscsi_targets')
-    def test_vnx_get_iscsi_properties(self, find_available_iscsi_targets):
-        self.driverSetup()
-        port_a1 = {'Port WWN': 'fake_iqn_a1',
-                   'SP': 'A',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_a1'}
-        port_b1 = {'Port WWN': 'fake_iqn_b1',
-                   'SP': 'B',
-                   'Port ID': 1,
-                   'Virtual Port ID': 0,
-                   'IP Address': 'fake_ip_b1'}
-        find_available_iscsi_targets.return_value = [port_a1, port_b1]
-        connect_info = self.driver.cli.vnx_get_iscsi_properties(
-            self.testData.test_volume, self.testData.connector, 1, '')
-        expected_info = {
-            'target_discovered': True,
-            'target_iqns': [
-                'fake_iqn_a1',
-                'fake_iqn_b1'],
-            'target_iqn': 'fake_iqn_a1',
-            'target_luns': [1, 1],
-            'target_lun': 1,
-            'target_portals': [
-                'fake_ip_a1:3260',
-                'fake_ip_b1:3260'],
-            'target_portal': 'fake_ip_a1:3260',
-            'volume_id': fake.VOLUME_ID}
-        self.assertEqual(expected_info, connect_info)
-
-    def test_update_migrated_volume(self):
-        self.driverSetup()
-        expected_update = {'provider_location':
-                           self.testData.test_volume2['provider_location'],
-                           'metadata': {'snapcopy': 'False'}}
-        model_update = self.driver.update_migrated_volume(
-            None, self.testData.test_volume,
-            self.testData.test_volume2, 'available')
-        self.assertDictMatch(expected_update, model_update)
-
-
-class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
-    def setUp(self):
-        super(EMCVNXCLIDArrayBasedDriverTestCase, self).setUp()
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': None,
-             'volume_backend_name': 'namedbackend'})
-
-    def generate_driver(self, conf):
-        driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
-        return driver
-
-    def test_get_volume_stats(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True)]
-        self.driverSetup(commands, results)
-        stats = self.driver.get_volume_stats(True)
-
-        self.assertEqual(VERSION, stats['driver_version'],
-                         "driver_version is incorrect")
-        self.assertEqual('iSCSI', stats['storage_protocol'],
-                         "storage_protocol is not correct")
-        self.assertEqual("EMC", stats['vendor_name'],
-                         "vendor name is not correct")
-        self.assertEqual("namedbackend", stats['volume_backend_name'],
-                         "volume backend name is not correct")
-
-        self.assertEqual(2, len(stats['pools']))
-        pool_stats1 = stats['pools'][0]
-        expected_pool_stats1 = {
-            'free_capacity_gb': 3105.303,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool|fake_serial',
-            'total_capacity_gb': 3281.146,
-            'provisioned_capacity_gb': 536.140,
-            'compression_support': 'True',
-            'deduplication_support': 'True',
-            'thin_provisioning_support': True,
-            'thick_provisioning_support': True,
-            'consistencygroup_support': 'True',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'pool_name': 'unit_test_pool',
-            'max_over_subscription_ratio': 20.0,
-            'fast_cache_enabled': True,
-            'fast_support': 'True'}
-        self.assertEqual(expected_pool_stats1, pool_stats1)
-
-        pool_stats2 = stats['pools'][1]
-        expected_pool_stats2 = {
-            'free_capacity_gb': 3984.768,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool2|fake_serial',
-            'total_capacity_gb': 4099.992,
-            'provisioned_capacity_gb': 636.240,
-            'compression_support': 'True',
-            'deduplication_support': 'True',
-            'thin_provisioning_support': True,
-            'thick_provisioning_support': True,
-            'consistencygroup_support': 'True',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'pool_name': 'unit_test_pool2',
-            'max_over_subscription_ratio': 20.0,
-            'fast_cache_enabled': False,
-            'fast_support': 'True'}
-        self.assertEqual(expected_pool_stats2, pool_stats2)
-
-    def test_get_volume_stats_wo_fastcache(self):
-        commands = (self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(False))
-        results = (self.testData.NDU_LIST_RESULT_WO_LICENSE,
-                   self.testData.POOL_GET_ALL_RESULT(False))
-        self.driverSetup(commands, results)
-
-        stats = self.driver.get_volume_stats(True)
-
-        self.assertEqual(2, len(stats['pools']))
-        pool_stats1 = stats['pools'][0]
-        expected_pool_stats1 = {
-            'free_capacity_gb': 3105.303,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool|fake_serial',
-            'total_capacity_gb': 3281.146,
-            'provisioned_capacity_gb': 536.140,
-            'compression_support': 'False',
-            'deduplication_support': 'False',
-            'thin_provisioning_support': False,
-            'thick_provisioning_support': True,
-            'consistencygroup_support': 'False',
-            'pool_name': 'unit_test_pool',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'max_over_subscription_ratio': 20.0,
-            'fast_cache_enabled': 'False',
-            'fast_support': 'False'}
-        self.assertEqual(expected_pool_stats1, pool_stats1)
-
-        pool_stats2 = stats['pools'][1]
-        expected_pool_stats2 = {
-            'free_capacity_gb': 3984.768,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool2|fake_serial',
-            'total_capacity_gb': 4099.992,
-            'provisioned_capacity_gb': 636.240,
-            'compression_support': 'False',
-            'deduplication_support': 'False',
-            'thin_provisioning_support': False,
-            'thick_provisioning_support': True,
-            'consistencygroup_support': 'False',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'pool_name': 'unit_test_pool2',
-            'max_over_subscription_ratio': 20.0,
-            'fast_cache_enabled': 'False',
-            'fast_support': 'False'}
-        self.assertEqual(expected_pool_stats2, pool_stats2)
-
-    def test_get_volume_stats_storagepool_states(self):
-        commands = (self.testData.POOL_GET_ALL_CMD(False),)
-        results = (self.testData.POOL_GET_ALL_STATES_TEST
-                   (['Initializing', 'Ready', 'Faulted',
-                     'Offline', 'Deleting']),)
-        self.driverSetup(commands, results)
-
-        stats = self.driver.get_volume_stats(True)
-        self.assertEqual(0, stats['pools'][0]['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-        self.assertNotEqual(0, stats['pools'][1]['free_capacity_gb'],
-                            "free_capacity_gb is incorrect")
-        self.assertNotEqual(0, stats['pools'][2]['free_capacity_gb'],
-                            "free_capacity_gb is incorrect")
-        self.assertEqual(0, stats['pools'][3]['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-        self.assertEqual(0, stats['pools'][4]['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'deduplicated'}))
-    def test_create_volume_deduplicated(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME)]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True)]
-
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # Case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-
-        # Verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'deduplicated', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_get_pool(self):
-        testVolume = self.testData.test_volume_with_type
-        commands = [self.testData.LUN_PROPERTY_POOL_CMD(testVolume['name'])]
-        results = [self.testData.LUN_PROPERTY(testVolume['name'], False)]
-        fake_cli = self.driverSetup(commands, results)
-        pool = self.driver.get_pool(testVolume)
-        self.assertEqual('unit_test_pool', pool)
-        fake_cli.assert_has_calls(
-            [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
-                testVolume['name']), poll=False)])
-
-    def test_get_target_pool_for_cloned_volme(self):
-        testSrcVolume = self.testData.test_volume
-        testNewVolume = self.testData.test_volume2
-        fake_cli = self.driverSetup()
-        pool = self.driver.cli.get_target_storagepool(testNewVolume,
-                                                      testSrcVolume)
-        self.assertEqual('unit_test_pool', pool)
-        self.assertFalse(fake_cli.called)
-
-    def test_get_target_pool_for_clone_legacy_volme(self):
-        testSrcVolume = self.testData.test_legacy_volume
-        testNewVolume = self.testData.test_volume2
-        commands = [self.testData.LUN_PROPERTY_POOL_CMD(testSrcVolume['name'])]
-        results = [self.testData.LUN_PROPERTY(testSrcVolume['name'], False)]
-        fake_cli = self.driverSetup(commands, results)
-        pool = self.driver.cli.get_target_storagepool(testNewVolume,
-                                                      testSrcVolume)
-        self.assertEqual('unit_test_pool', pool)
-        fake_cli.assert_has_calls(
-            [mock.call(*self.testData.LUN_PROPERTY_POOL_CMD(
-                testSrcVolume['name']), poll=False)])
-
-    def test_manage_existing_get_size(self):
-        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-userCap', '-owner',
-                       '-attachedSnapshot', '-poolName')
-        test_size = 2
-        commands = [get_lun_cmd]
-        results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
-        fake_cli = self.driverSetup(commands, results)
-        test_volume = self.testData.test_volume2.copy()
-        test_volume['host'] = "host@backendsec#unit_test_pool"
-        get_size = self.driver.manage_existing_get_size(
-            test_volume,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*get_lun_cmd, poll=True)]
-        self.assertEqual(test_size, get_size)
-        fake_cli.assert_has_calls(expected)
-
-    def test_manage_existing_get_size_incorrect_pool(self):
-        """Test manage_existing function of driver with an invalid pool."""
-
-        get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
-                       '-state', '-userCap', '-owner',
-                       '-attachedSnapshot', '-poolName')
-        commands = [get_lun_cmd]
-        results = [self.testData.LUN_PROPERTY('lun_name')]
-        fake_cli = self.driverSetup(commands, results)
-        test_volume = self.testData.test_volume2.copy()
-        test_volume['host'] = "host@backendsec#fake_pool"
-        ex = self.assertRaises(
-            exception.ManageExistingInvalidReference,
-            self.driver.manage_existing_get_size,
-            test_volume,
-            self.testData.test_existing_ref)
-        self.assertTrue(
-            re.match(r'.*not managed by the host',
-                     ex.msg))
-        expected = [mock.call(*get_lun_cmd, poll=True)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={}))
-    def test_manage_existing(self):
-        data = self.testData
-        test_volume = data.test_volume_with_type
-        lun_rename_cmd = data.LUN_RENAME_CMD(
-            '1', test_volume['name'])
-        lun_list_cmd = data.LUN_LIST_ALL_CMD('1')
-
-        commands = lun_rename_cmd, lun_list_cmd
-        results = SUCCEED, (data.LIST_LUN_1_SPECS, 0)
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.manage_existing(
-            self.testData.test_volume_with_type,
-            self.testData.test_existing_ref)
-        expected = [mock.call(*lun_rename_cmd, poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
-                new=utils.ZeroIntervalLoopingCall)
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'storagetype:provisioning': 'Compressed',
-                                'storagetype:pool': 'unit_test_pool'}))
-    def test_create_compression_volume(self):
-        commands = [self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.LUN_PROPERTY_ALL_CMD(fake.VOLUME_NAME),
-                    self.testData.NDU_LIST_CMD]
-        results = [self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.LUN_PROPERTY(fake.VOLUME_NAME, True),
-                   self.testData.NDU_LIST_RESULT]
-
-        fake_cli = self.driverSetup(commands, results)
-
-        self.driver.cli.stats['compression_support'] = 'True'
-        self.driver.cli.enablers = ['-Compression',
-                                    '-Deduplication',
-                                    '-ThinProvisioning',
-                                    '-FAST']
-        # Case
-        self.driver.create_volume(self.testData.test_volume_with_type)
-        # Verification
-        expect_cmd = [
-            mock.call(*self.testData.LUN_CREATION_CMD(
-                fake.VOLUME_NAME, 1,
-                'unit_test_pool',
-                'compressed', None, poll=False)),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=False),
-            mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
-                fake.VOLUME_NAME), poll=True),
-            mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
-                1))]
-        fake_cli.assert_has_calls(expect_cmd)
-
-    def test_get_registered_spport_set(self):
-        self.driverSetup()
-        spport_set = self.driver.cli._client.get_registered_spport_set(
-            'iqn.1993-08.org.debian:01:222', 'fakehost',
-            self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')[0])
-        self.assertEqual({('A', 2, 0), ('A', 0, 0), ('B', 2, 0)}, spport_set)
-
-    def test_validate_iscsi_port(self):
-        self.driverSetup()
-        port_list = (
-            "SP:  A\n"
-            "Port ID:  6\n"
-            "Port WWN:  iqn.fake.a6\n"
-            "iSCSI Alias:  1111.a6\n"
-            "\n"
-            "Virtual Port ID:  0\n"
-            "VLAN ID:  Disabled\n"
-            "\n"
-            "SP:  B\n"
-            "Port ID:  7\n"
-            "Port WWN:  iqn.fake.b7\n"
-            "iSCSI Alias:  0235.b7"
-            "\n"
-            "Virtual Port ID:  0\n"
-            "VLAN ID:  Disabled\n"
-            "\n"
-            "Virtual Port ID:  1\n"
-            "VLAN ID:  200\n"
-            "\n\n")
-        self.assertFalse(self.driver.cli._validate_iscsi_port(
-            'A', 5, 0, port_list))
-        self.assertTrue(self.driver.cli._validate_iscsi_port(
-            'A', 6, 0, port_list))
-        self.assertFalse(self.driver.cli._validate_iscsi_port(
-            'A', 6, 2, port_list))
-        self.assertTrue(self.driver.cli._validate_iscsi_port(
-            'B', 7, 1, port_list))
-        self.assertTrue(self.driver.cli._validate_iscsi_port(
-            'B', 7, 0, port_list))
-        self.assertFalse(self.driver.cli._validate_iscsi_port(
-            'B', 7, 2, port_list))
-
-
-class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
-    def generate_driver(self, conf):
-        return emc_cli_fc.EMCCLIFCDriver(configuration=conf)
-
-    @mock.patch(
-        "oslo_concurrency.processutils.execute",
-        mock.Mock(
-            return_value=(
-                "fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_fc_auto_reg(self):
-        # Test for auto registration
-        test_volume = self.testData.test_volume.copy()
-        test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.GETFCPORT_CMD(),
-                    ('port', '-list', '-gname', 'fakehost')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   self.testData.FC_PORTS,
-                   self.testData.FAKEHOST_PORTS]
-
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.initialize_connection(
-            test_volume,
-            self.testData.connector)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('port', '-list', '-sp'),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
-                        ':12:34:56', 'A', '0', None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
-                        ':12:34:56', 'B', '2', None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
-                        ':54:32:16', 'A', '0', None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:90'
-                        ':54:32:16', 'B', '2', None, '10.0.0.2')),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call('port', '-list', '-gname', 'fakehost')
-                    ]
-        fake_cli.assert_has_calls(expected)
-
-        # Test for manaul registration
-        self.configuration.initiator_auto_registration = False
-
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
-                    self.testData.GETFCPORT_CMD(),
-                    ('port', '-list', '-gname', 'fakehost')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost')],
-                   ('', 0),
-                   self.testData.FC_PORTS,
-                   self.testData.FAKEHOST_PORTS]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.initialize_connection(
-            test_volume,
-            self.testData.connector)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-connecthost',
-                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost', '-o', poll=False),
-                    mock.call('port', '-list', '-gname', 'fakehost')
-                    ]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch(
-        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
-        "get_device_mapping_from_network",
-        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_fc_auto_zoning(self):
-        # Test for auto zoning
-        self.configuration.zoning_mode = 'fabric'
-        self.configuration.initiator_auto_registration = False
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
-                    self.testData.GETFCPORT_CMD()]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
-                    self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
-                   ('', 0),
-                   self.testData.FC_PORTS]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.zonemanager_lookup_service = (
-            fc_service.FCSanLookupService(configuration=self.configuration))
-
-        conn_info = self.driver.initialize_connection(
-            self.testData.test_volume,
-            self.testData.connector)
-
-        self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
-                         conn_info['data']['initiator_target_map'])
-        self.assertEqual(['1122334455667777'],
-                         conn_info['data']['target_wwn'])
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call('storagegroup', '-connecthost',
-                              '-host', 'fakehost', '-gname', 'fakehost', '-o'),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call('storagegroup', '-list', '-gname', 'fakehost',
-                              poll=True),
-                    mock.call('port', '-list', '-sp')]
-        fake_cli.assert_has_calls(expected)
-
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_fc_white_list(self):
-        self.configuration.io_port_list = 'a-0,B-2'
-        test_volume = self.testData.test_volume.copy()
-        test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.GETFCPORT_CMD(),
-                    ('port', '-list', '-gname', 'fakehost')]
-        results = [[("No group", 83),
-                    self.testData.STORAGE_GROUP_HAS_MAP_ISCSI('fakehost')],
-                   self.testData.FC_PORTS,
-                   self.testData.FAKEHOST_PORTS]
-
-        fake_cli = self.driverSetup(commands, results)
-        data = self.driver.initialize_connection(
-            test_volume,
-            self.testData.connector)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call('storagegroup', '-create', '-gname', 'fakehost'),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
-                        '90:12:34:56', 'A', 0, None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:'
-                        '90:12:34:56', 'B', 2, None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
-                        ':90:54:32:16', 'A', 0, None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78'
-                        ':90:54:32:16', 'B', 2, None, '10.0.0.2')),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call('port', '-list', '-gname', 'fakehost')]
-        fake_cli.assert_has_calls(expected)
-        self.assertEqual(set(['5006016A0860080F', '5006016008600195']),
-                         set(data['data']['target_wwn']))
-
-    @mock.patch('random.randint',
-                mock.Mock(return_value=0))
-    def test_initialize_connection_fc_port_registered_wl(self):
-        self.configuration.io_port_list = 'a-0,B-2'
-        test_volume = self.testData.test_volume.copy()
-        test_volume['provider_location'] = 'system^fakesn|type^lun|id^1'
-        self.configuration.initiator_auto_registration = True
-        commands = [self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                    self.testData.GETFCPORT_CMD(),
-                    ('port', '-list', '-gname', 'fakehost')]
-        results = [self.testData.STORAGE_GROUP_ISCSI_FC_HBA('fakehost'),
-                   self.testData.FC_PORTS,
-                   self.testData.FAKEHOST_PORTS]
-
-        fake_cli = self.driverSetup(commands, results)
-        data = self.driver.initialize_connection(
-            test_volume,
-            self.testData.connector)
-
-        expected = [mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=False),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:12:34:56:12:34:56:78:90'
-                        ':12:34:56', 'A', 0, None, '10.0.0.2')),
-                    mock.call(*self.testData.set_path_cmd(
-                        'fakehost', '22:34:56:78:90:54:32:16:12:34:56:78:'
-                        '90:54:32:16', 'A', 0, None, '10.0.0.2')),
-                    mock.call(*self.testData.STORAGEGROUP_LIST_CMD('fakehost'),
-                              poll=True),
-                    mock.call('storagegroup', '-addhlu', '-hlu', 2, '-alu', 1,
-                              '-gname', 'fakehost', '-o',
-                              poll=False),
-                    mock.call('port', '-list', '-gname', 'fakehost')]
-        fake_cli.assert_has_calls(expected)
-        self.assertEqual(set(['5006016A0860080F', '5006016008600195']),
-                         set(data['data']['target_wwn']))
-
-    @mock.patch(
-        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
-        "get_device_mapping_from_network",
-        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
-    def test_terminate_connection_remove_zone_false(self):
-        self.driver = emc_cli_fc.EMCCLIFCDriver(
-            configuration=self.configuration)
-        cli_helper = self.driver.cli._client
-        data = {'storage_group_name': "fakehost",
-                'storage_group_uid': "2F:D4:00:00:00:00:00:"
-                "00:00:00:FF:E5:3A:03:FD:6D",
-                'lunmap': {1: 16, 2: 88, 3: 47}}
-        cli_helper.get_storage_group = mock.Mock(
-            return_value=data)
-        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
-        self.driver.cli.zonemanager_lookup_service = (
-            fc_service.FCSanLookupService(configuration=self.configuration))
-        connection_info = self.driver.terminate_connection(
-            self.testData.test_volume,
-            self.testData.connector)
-        self.assertFalse(connection_info['data'],
-                         'connection_info data should not be None.')
-
-        cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
-            16, self.testData.connector["host"])
-
-    @mock.patch(
-        "cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
-        "get_device_mapping_from_network",
-        mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
-    def test_terminate_connection_remove_zone_true(self):
-        self.driver = emc_cli_fc.EMCCLIFCDriver(
-            configuration=self.configuration)
-        cli_helper = self.driver.cli._client
-        data = {'storage_group_name': "fakehost",
-                'storage_group_uid': "2F:D4:00:00:00:00:00:"
-                "00:00:00:FF:E5:3A:03:FD:6D",
-                'lunmap': {}}
-        cli_helper.get_storage_group = mock.Mock(
-            return_value=data)
-        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
-        self.driver.cli.zonemanager_lookup_service = (
-            fc_service.FCSanLookupService(configuration=self.configuration))
-        connection_info = self.driver.terminate_connection(
-            self.testData.test_volume,
-            self.testData.connector)
-        self.assertIn('initiator_target_map', connection_info['data'],
-                      'initiator_target_map should be populated.')
-        self.assertEqual(EMCVNXCLIDriverTestData.i_t_map,
-                         connection_info['data']['initiator_target_map'])
-
-    def test_get_volume_stats(self):
-        commands = [self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True)]
-        results = [self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True)]
-        self.driverSetup(commands, results)
-        stats = self.driver.get_volume_stats(True)
-
-        self.assertEqual(VERSION, stats['driver_version'],
-                         "driver_version is incorrect")
-        self.assertEqual('FC', stats['storage_protocol'],
-                         "storage_protocol is incorrect")
-        self.assertEqual("EMC", stats['vendor_name'],
-                         "vendor name is incorrect")
-        self.assertEqual("namedbackend", stats['volume_backend_name'],
-                         "volume backend name is incorrect")
-
-        pool_stats = stats['pools'][0]
-
-        expected_pool_stats = {
-            'free_capacity_gb': 3105.303,
-            'reserved_percentage': 32,
-            'location_info': 'unit_test_pool|fake_serial',
-            'total_capacity_gb': 3281.146,
-            'provisioned_capacity_gb': 536.14,
-            'compression_support': 'True',
-            'deduplication_support': 'True',
-            'thin_provisioning_support': True,
-            'thick_provisioning_support': True,
-            'max_over_subscription_ratio': 20.0,
-            'consistencygroup_support': 'True',
-            'replication_enabled': False,
-            'replication_targets': [],
-            'pool_name': 'unit_test_pool',
-            'fast_cache_enabled': True,
-            'fast_support': 'True'}
-
-        self.assertEqual(expected_pool_stats, pool_stats)
-
-    def test_get_volume_stats_too_many_luns(self):
-        commands = (self.testData.NDU_LIST_CMD,
-                    self.testData.POOL_GET_ALL_CMD(True),
-                    self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD())
-        results = (self.testData.NDU_LIST_RESULT,
-                   self.testData.POOL_GET_ALL_RESULT(True),
-                   self.testData.POOL_FEATURE_INFO_POOL_LUNS(1000, 1000))
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.check_max_pool_luns_threshold = True
-        stats = self.driver.get_volume_stats(True)
-        pool_stats = stats['pools'][0]
-        self.assertEqual(0, pool_stats['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-        expect_cmd = [
-            mock.call(*self.testData.POOL_FEATURE_INFO_POOL_LUNS_CMD(),
-                      poll=False)]
-        fake_cli.assert_has_calls(expect_cmd)
-
-        self.driver.cli.check_max_pool_luns_threshold = False
-        stats = self.driver.get_volume_stats(True)
-        pool_stats = stats['pools'][0]
-        self.assertIsNotNone(stats['driver_version'],
-                             "driver_version is incorrect")
-        self.assertEqual(3105.303, pool_stats['free_capacity_gb'],
-                         "free_capacity_gb is incorrect")
-
-    def test_deregister_initiator(self):
-        fake_cli = self.driverSetup()
-        self.driver.cli.destroy_empty_sg = True
-        self.driver.cli.itor_auto_dereg = True
-        cli_helper = self.driver.cli._client
-        data = {'storage_group_name': "fakehost",
-                'storage_group_uid': "2F:D4:00:00:00:00:00:"
-                "00:00:00:FF:E5:3A:03:FD:6D",
-                'lunmap': {1: 16}}
-        cli_helper.get_storage_group = mock.Mock(
-            return_value=data)
-        lun_info = {'lun_name': "unit_test_lun",
-                    'lun_id': 1,
-                    'pool': "unit_test_pool",
-                    'attached_snapshot': "N/A",
-                    'owner': "A",
-                    'total_capacity_gb': 1.0,
-                    'state': "Ready"}
-        cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
-        cli_helper.remove_hlu_from_storagegroup = mock.Mock()
-        cli_helper.disconnect_host_from_storage_group = mock.Mock()
-        cli_helper.delete_storage_group = mock.Mock()
-        self.driver.terminate_connection(self.testData.test_volume,
-                                         self.testData.connector)
-        fc_itor_1 = '22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56'
-        fc_itor_2 = '22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16'
-        expect_cmd = [
-            mock.call('port', '-removeHBA', '-hbauid', fc_itor_1, '-o'),
-            mock.call('port', '-removeHBA', '-hbauid', fc_itor_2, '-o')]
-        fake_cli.assert_has_calls(expect_cmd)
-
-
-class EMCVNXCLIToggleSPTestData(object):
-    def FAKE_COMMAND_PREFIX(self, sp_address):
-        return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address,
-                '-user', 'sysadmin', '-password', 'sysadmin',
-                '-scope', 'global')
-
-
-@mock.patch('time.sleep')
-class EMCVNXCLIToggleSPTestCase(test.TestCase):
-    def setUp(self):
-        super(EMCVNXCLIToggleSPTestCase, self).setUp()
-        self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
-        self.configuration = mock.Mock(conf.Configuration)
-        self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
-        self.configuration.san_ip = '10.10.10.10'
-        self.configuration.san_secondary_ip = "10.10.10.11"
-        self.configuration.storage_vnx_pool_name = 'unit_test_pool'
-        self.configuration.san_login = 'sysadmin'
-        self.configuration.san_password = 'sysadmin'
-        self.configuration.default_timeout = 1
-        self.configuration.max_luns_per_storage_group = 10
-        self.configuration.destroy_empty_storage_group = 10
-        self.configuration.storage_vnx_authentication_type = "global"
-        self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
-        self.configuration.zoning_mode = None
-        self.configuration.storage_vnx_security_file_dir = ""
-        self.configuration.config_group = 'toggle-backend'
-        self.cli_client = emc_vnx_cli.CommandLineHelper(
-            configuration=self.configuration)
-        self.test_data = EMCVNXCLIToggleSPTestData()
-
-    def test_no_sp_toggle(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_SUCCESS_RETURN = ('success', 0)
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [FAKE_SUCCESS_RETURN]
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual("10.10.10.10", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                          + FAKE_COMMAND), check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_not_called()
-
-    def test_toggle_sp_with_server_unavailable(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_ERROR_MSG = """\
-Error occurred during HTTP request/response from the target: '10.244.213.142'.
-Message : HTTP/1.1 503 Service Unavailable"""
-        FAKE_SUCCESS_RETURN = ('success', 0)
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [processutils.ProcessExecutionError(
-            exit_code=255, stdout=FAKE_ERROR_MSG),
-            FAKE_SUCCESS_RETURN]
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                        + FAKE_COMMAND),
-                    check_exit_code=True),
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
-                        + FAKE_COMMAND),
-                    check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_has_calls([mock.call(30)])
-
-    def test_toggle_sp_with_server_unavailable_max_retry(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_ERROR_MSG = ("Error occurred during HTTP request/response "
-                          "from the target: '10.244.213.142'.\n"
-                          "Message : HTTP/1.1 503 Service Unavailable")
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [processutils.ProcessExecutionError(
-            exit_code=255, stdout=FAKE_ERROR_MSG)] * 5
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.assertRaisesRegex(exception.EMCSPUnavailableException,
-                                   '.*Error occurred during HTTP request',
-                                   self.cli_client.command_execute,
-                                   *FAKE_COMMAND)
-            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                        + FAKE_COMMAND),
-                    check_exit_code=True),
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
-                        + FAKE_COMMAND),
-                    check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_has_calls([mock.call(30)] * 4)
-
-    def test_toggle_sp_with_end_of_data(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_ERROR_MSG = ("Error occurred during HTTP request/response "
-                          "from the target: '10.244.213.142'.\n"
-                          "Message : HTTP/1.1 503 Service Unavailable")
-        FAKE_SUCCESS_RETURN = ('success', 0)
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [processutils.ProcessExecutionError(
-            exit_code=255, stdout=FAKE_ERROR_MSG),
-            FAKE_SUCCESS_RETURN]
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                        + FAKE_COMMAND),
-                    check_exit_code=True),
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
-                        + FAKE_COMMAND),
-                    check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_has_calls([mock.call(30)])
-
-    def test_toggle_sp_with_connection_refused(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_ERROR_MSG = """\
-A network error occurred while trying to connect: '10.244.213.142'.
-Message : Error occurred because connection refused. \
-Unable to establish a secure connection to the Management Server.
-"""
-        FAKE_SUCCESS_RETURN = ('success', 0)
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [processutils.ProcessExecutionError(
-            exit_code=255, stdout=FAKE_ERROR_MSG),
-            FAKE_SUCCESS_RETURN]
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                        + FAKE_COMMAND),
-                    check_exit_code=True),
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
-                        + FAKE_COMMAND),
-                    check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_has_calls([mock.call(30)])
-
-    def test_toggle_sp_with_connection_error(self, time_mock):
-        self.cli_client.active_storage_ip = '10.10.10.10'
-        FAKE_ERROR_MSG = """\
-A network error occurred while trying to connect: '192.168.1.56'.
-Message : Error occurred because of time out"""
-        FAKE_SUCCESS_RETURN = ('success', 0)
-        FAKE_COMMAND = ('list', 'pool')
-        SIDE_EFFECTS = [processutils.ProcessExecutionError(
-            exit_code=255, stdout=FAKE_ERROR_MSG),
-            FAKE_SUCCESS_RETURN]
-
-        with mock.patch('cinder.utils.execute') as mock_utils:
-            mock_utils.side_effect = SIDE_EFFECTS
-            self.cli_client.command_execute(*FAKE_COMMAND)
-            self.assertEqual("10.10.10.11", self.cli_client.active_storage_ip)
-            expected = [
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
-                        + FAKE_COMMAND),
-                    check_exit_code=True),
-                mock.call(
-                    *(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
-                        + FAKE_COMMAND),
-                    check_exit_code=True)]
-            mock_utils.assert_has_calls(expected)
-        time_mock.assert_has_calls([mock.call(30)])
-
-
-class EMCVNXCLIBackupTestCase(DriverTestCaseBase):
-    """Provides cli-level and client-level mock test."""
-
-    def driverSetup(self):
-        self.context = context.get_admin_context()
-        self.driver = self.generate_driver(self.configuration)
-        self.driver.cli._client = mock.Mock()
-        self.snapshot = fake_snapshot.fake_snapshot_obj(
-            self.context, **self.testData.test_snapshot)
-        volume = fake_volume.fake_volume_obj(self.context)
-        self.snapshot.volume = volume
-        return self.driver.cli._client
-
-    def generate_driver(self, conf):
-        driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
-        return driver
-
-    @patch.object(emc_vnx_cli.EMCVnxCliBase, 'terminate_connection')
-    def test_terminate_connection_snapshot(self, terminate_connection):
-        fake_client = self.driverSetup()
-        connector = self.testData.connector
-        smp_name = 'tmp-smp-' + self.snapshot['id']
-        volume = {'name': smp_name}
-        self.driver.terminate_connection_snapshot(
-            self.snapshot, connector)
-        terminate_connection.assert_called_once_with(
-            volume, connector)
-        fake_client.detach_mount_point.assert_called_once_with(
-            smp_name)
-
-    @patch.object(emc_vnx_cli.EMCVnxCliBase, 'initialize_connection')
-    def test_initialize_connection_snapshot(self, initialize_connection):
-        fake_client = self.driverSetup()
-        connector = self.testData.connector
-        smp_name = 'tmp-smp-' + self.snapshot['id']
-        self.driver.initialize_connection_snapshot(
-            self.snapshot, connector)
-        fake_client.attach_mount_point.assert_called_once_with(
-            smp_name, self.snapshot['name'])
-        volume = {'name': smp_name, 'id': self.snapshot['id']}
-        initialize_connection.assert_called_once_with(
-            volume, connector)
-
-    def test_create_export_snapshot(self):
-        fake_client = self.driverSetup()
-        connector = self.testData.connector
-        smp_name = 'tmp-smp-' + self.snapshot['id']
-        self.driver.create_export_snapshot(
-            None, self.snapshot, connector)
-        fake_client.create_mount_point.assert_called_once_with(
-            self.snapshot['volume_name'], smp_name)
-
-    @patch.object(emc_vnx_cli.EMCVnxCliBase, 'delete_volume')
-    def test_remove_export_snapshot(self, delete_volume):
-        self.driverSetup()
-        smp_name = 'tmp-smp-' + self.snapshot['id']
-        self.driver.remove_export_snapshot(None, self.snapshot)
-        volume = {'volume_type_id': None, 'name': smp_name,
-                  'provider_location': None}
-        delete_volume.assert_called_once_with(volume, True)
-
-
-class EMCVNXCLIMultiPoolsTestCase(DriverTestCaseBase):
-
-    def generate_driver(self, conf):
-        driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
-        return driver
-
-    def fake_command_execute_for_driver_setup(self, *command, **kwargv):
-        if command == ('connection', '-getport', '-address', '-vlanid'):
-            return self.testData.ALL_PORTS
-        elif command == ('storagepool', '-list', '-state'):
-            return self.testData.POOL_GET_STATE_RESULT([
-                {'pool_name': self.testData.test_pool_name, 'state': "Ready"},
-                {'pool_name': "unit_test_pool2", 'state': "Ready"},
-                {'pool_name': "unit_test_pool3", 'state': "Ready"},
-                {'pool_name': "unit_text_pool4", 'state': "Ready"}])
-        else:
-            return SUCCEED
-
-    def test_storage_pool_names_option(self):
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': "unit_test_pool, unit_test_pool3",
-             'volume_backend_name': 'namedbackend'})
-
-        driver = self.generate_driver(self.configuration)
-        self.assertEqual(set(["unit_test_pool", "unit_test_pool3"]),
-                         driver.cli.storage_pools)
-
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': "unit_test_pool2,",
-             'volume_backend_name': 'namedbackend'})
-        driver = self.generate_driver(self.configuration)
-        self.assertEqual(set(["unit_test_pool2"]),
-                         driver.cli.storage_pools)
-
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': "unit_test_pool3",
-             'volume_backend_name': 'namedbackend'})
-        driver = self.generate_driver(self.configuration)
-        self.assertEqual(set(["unit_test_pool3"]),
-                         driver.cli.storage_pools)
-
-    def test_configured_pool_does_not_exist(self):
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': "unit_test_pool2, unit_test_pool_none2",
-             'volume_backend_name': 'namedbackend'})
-        driver = self.generate_driver(self.configuration)
-        self.assertEqual(set(["unit_test_pool2"]),
-                         driver.cli.storage_pools)
-
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': "unit_test_pool_none1",
-             "unit_test_pool_none2"
-             'volume_backend_name': 'namedbackend'})
-        self.assertRaises(exception.VolumeBackendAPIException,
-                          self.generate_driver,
-                          self.configuration)
-
-    def test_no_storage_pool_is_configured(self):
-        self.configuration.safe_get = self.fake_safe_get(
-            {'storage_vnx_pool_names': None,
-             'volume_backend_name': 'namedbackend'})
-        driver = self.generate_driver(self.configuration)
-        self.assertEqual(set(),
-                         driver.cli.storage_pools)
-
-
-@patch.object(emc_vnx_cli.EMCVnxCliBase,
-              'enablers',
-              mock.PropertyMock(return_value=['-MirrorView/S']))
-class EMCVNXCLIDriverReplicationV2TestCase(DriverTestCaseBase):
-    def setUp(self):
-        super(EMCVNXCLIDriverReplicationV2TestCase, self).setUp()
-        self.backend_id = 'fake_serial'
-        self.configuration.replication_device = [{
-            'backend_id': self.backend_id,
-            'san_ip': '192.168.1.2', 'san_login': 'admin',
-            'san_password': 'admin', 'san_secondary_ip': '192.168.2.2',
-            'storage_vnx_authentication_type': 'global',
-            'storage_vnx_security_file_dir': None}]
-
-    def generate_driver(self, conf, active_backend_id=None):
-        return emc_cli_iscsi.EMCCLIISCSIDriver(
-            configuration=conf,
-            active_backend_id=active_backend_id)
-
-    def _build_mirror_name(self, volume_id):
-        return 'mirror_' + volume_id
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_create_volume_with_replication(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                    self.testData.MIRROR_ADD_IMAGE_CMD(
-                        mirror_name, '192.168.1.2', 5)]
-        results = [SUCCEED, SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers.append('-MirrorView/S')
-        with mock.patch.object(
-                emc_vnx_cli.CommandLineHelper,
-                'create_lun_with_advance_feature',
-                mock.Mock(return_value={'lun_id': 5})):
-            model_update = self.driver.create_volume(rep_volume)
-            self.assertEqual('enabled', model_update['replication_status'])
-            self.assertEqual(build_replication_data(self.configuration),
-                             model_update['replication_driver_data'])
-            self.assertDictMatch({'system': self.backend_id,
-                                  'snapcopy': 'False'},
-                                 model_update['metadata'])
-        fake_cli.assert_has_calls(
-            [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                       poll=True),
-             mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD(
-                 mirror_name, '192.168.1.2', 5), poll=True)])
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_create_replication_mirror_exists(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                    self.testData.MIRROR_ADD_IMAGE_CMD(
-                        mirror_name, '192.168.1.2', 5)]
-        results = [self.testData.MIRROR_CREATE_ERROR_RESULT(mirror_name),
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli.enablers.append('-MirrorView/S')
-        with mock.patch.object(
-                emc_vnx_cli.CommandLineHelper,
-                'create_lun_with_advance_feature',
-                mock.Mock(return_value={'lun_id': 5})):
-            model_update = self.driver.create_volume(rep_volume)
-            self.assertEqual('enabled', model_update['replication_status'])
-            self.assertEqual(build_replication_data(self.configuration),
-                             model_update['replication_driver_data'])
-        fake_cli.assert_has_calls(
-            [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                       poll=True),
-             mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD(
-                 mirror_name, '192.168.1.2', 5), poll=True)])
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_create_replication_add_image_error(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        commands = [self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                    self.testData.MIRROR_ADD_IMAGE_CMD(
-                        mirror_name, '192.168.1.2', 5),
-                    self.testData.LUN_DELETE_CMD(rep_volume.name),
-                    self.testData.MIRROR_DESTROY_CMD(mirror_name)]
-        results = [SUCCEED,
-                   ("Add Image Error", 25),
-                   SUCCEED, SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        with mock.patch.object(
-                emc_vnx_cli.CommandLineHelper,
-                'create_lun_with_advance_feature',
-                mock.Mock(return_value={'lun_id': 5})):
-            self.assertRaisesRegex(exception.EMCVnxCLICmdError,
-                                   'Add Image Error',
-                                   self.driver.create_volume,
-                                   rep_volume)
-
-        fake_cli.assert_has_calls(
-            [mock.call(*self.testData.MIRROR_CREATE_CMD(mirror_name, 5),
-                       poll=True),
-             mock.call(*self.testData.MIRROR_ADD_IMAGE_CMD(
-                 mirror_name, '192.168.1.2', 5), poll=True),
-             mock.call(*self.testData.LUN_DELETE_CMD(rep_volume.name)),
-             mock.call(*self.testData.MIRROR_DESTROY_CMD(mirror_name),
-                       poll=True)])
-
-    @mock.patch(
-        "cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
-        "get_lun_by_name",
-        mock.Mock(return_value={'lun_id': 1}))
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_failover_replication_from_primary(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        image_uid = '50:06:01:60:88:60:05:FE'
-        commands = [self.testData.MIRROR_LIST_CMD(mirror_name),
-                    self.testData.MIRROR_PROMOTE_IMAGE_CMD(
-                        mirror_name, image_uid)]
-        results = [self.testData.MIRROR_LIST_RESULT(mirror_name),
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        rep_volume.replication_driver_data = build_replication_data(
-            self.configuration)
-        rep_volume.metadata = self.testData.replication_metadata
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        back_id, model_update = self.driver.failover_host(
-            None, [rep_volume],
-            self.backend_id)
-        fake_cli.assert_has_calls([
-            mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name),
-                      poll=True),
-            mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name,
-                      image_uid), poll=False)])
-        self.assertEqual(
-            build_provider_location(
-                '1', 'lun', rep_volume.name,
-                self.backend_id),
-            model_update[0]['updates']['provider_location'])
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_failover_replication_from_secondary(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        image_uid = '50:06:01:60:88:60:05:FE'
-        commands = [self.testData.MIRROR_LIST_CMD(mirror_name),
-                    self.testData.MIRROR_PROMOTE_IMAGE_CMD(
-                        mirror_name, image_uid)]
-        results = [self.testData.MIRROR_LIST_RESULT(mirror_name),
-                   SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        rep_volume.replication_driver_data = build_replication_data(
-            self.configuration)
-        rep_volume.metadata = self.testData.replication_metadata
-        driver_data = json.loads(rep_volume.replication_driver_data)
-        driver_data['is_primary'] = False
-        rep_volume.replication_driver_data = json.dumps(driver_data)
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        with mock.patch(
-                'cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper') \
-                as fake_remote:
-            fake_remote.return_value = self.driver.cli._client
-            backend_id, data = self.driver.failover_host(
-                None, [rep_volume], 'default')
-        updates = data[0]['updates']
-        rep_status = updates['replication_status']
-        self.assertEqual('enabled', rep_status)
-        fake_cli.assert_has_calls([
-            mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name),
-                      poll=True),
-            mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name,
-                      image_uid), poll=False)])
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_failover_replication_invalid_backend_id(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        self._build_mirror_name(rep_volume.id)
-        fake_cli = self.driverSetup([], [])
-        rep_volume.replication_driver_data = build_replication_data(
-            self.configuration)
-        rep_volume.metadata = self.testData.replication_metadata
-        driver_data = json.loads(rep_volume.replication_driver_data)
-        driver_data['is_primary'] = False
-        rep_volume.replication_driver_data = json.dumps(driver_data)
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        with mock.patch(
-                'cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper') \
-                as fake_remote:
-            fake_remote.return_value = self.driver.cli._client
-            invalid = 'invalid_backend_id'
-            self.assertRaisesRegex(exception.VolumeBackendAPIException,
-                                   "Invalid secondary_id specified",
-                                   self.driver.failover_host,
-                                   None,
-                                   [rep_volume],
-                                   invalid)
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_failover_already_promoted(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        image_uid = '50:06:01:60:88:60:05:FE'
-        commands = [self.testData.MIRROR_LIST_CMD(mirror_name),
-                    self.testData.MIRROR_PROMOTE_IMAGE_CMD(
-                        mirror_name, image_uid)]
-        results = [self.testData.MIRROR_LIST_RESULT(mirror_name),
-                   self.testData.MIRROR_PROMOTE_IMAGE_ERROR_RESULT()]
-        fake_cli = self.driverSetup(commands, results)
-        rep_volume.replication_driver_data = build_replication_data(
-            self.configuration)
-        rep_volume.metadata = self.testData.replication_metadata
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        new_backend_id, model_updates = self.driver.failover_host(
-            None, [rep_volume], self.backend_id)
-        self.assertEqual(rep_volume.id, model_updates[0]['volume_id'])
-        self.assertEqual('error',
-                         model_updates[0]['updates']['replication_status'])
-
-        fake_cli.assert_has_calls([
-            mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name),
-                      poll=True),
-            mock.call(*self.testData.MIRROR_PROMOTE_IMAGE_CMD(mirror_name,
-                      image_uid), poll=False)])
-
-    @mock.patch(
-        "cinder.volume.volume_types."
-        "get_volume_type_extra_specs",
-        mock.Mock(return_value={'replication_enabled': '<is> True'}))
-    def test_delete_volume_with_rep(self):
-        rep_volume = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        mirror_name = self._build_mirror_name(rep_volume.id)
-        image_uid = '50:06:01:60:88:60:05:FE'
-        commands = [self.testData.MIRROR_LIST_CMD(mirror_name),
-                    self.testData.MIRROR_FRACTURE_IMAGE_CMD(mirror_name,
-                                                            image_uid),
-                    self.testData.MIRROR_REMOVE_IMAGE_CMD(mirror_name,
-                                                          image_uid),
-                    self.testData.MIRROR_DESTROY_CMD(mirror_name)]
-        results = [self.testData.MIRROR_LIST_RESULT(mirror_name),
-                   SUCCEED, SUCCEED, SUCCEED]
-        fake_cli = self.driverSetup(commands, results)
-        self.driver.cli._mirror._secondary_client.command_execute = fake_cli
-        vol = EMCVNXCLIDriverTestData.convert_volume(
-            self.testData.test_volume_replication)
-        vol.replication_driver_data = build_replication_data(
-            self.configuration)
-        with mock.patch.object(
-                emc_vnx_cli.CommandLineHelper,
-                'delete_lun',
-                mock.Mock(return_value=None)):
-            self.driver.delete_volume(vol)
-        expected = [mock.call(*self.testData.MIRROR_LIST_CMD(mirror_name),
-                              poll=False),
-                    mock.call(*self.testData.MIRROR_FRACTURE_IMAGE_CMD(
-                        mirror_name, image_uid), poll=False),
-                    mock.call(*self.testData.MIRROR_REMOVE_IMAGE_CMD(
-                        mirror_name, image_uid), poll=False),
-                    mock.call(*self.testData.MIRROR_DESTROY_CMD(mirror_name),
-                              poll=False)]
-        fake_cli.assert_has_calls(expected)
-
-    def test_build_client_with_invalid_id(self):
-        self.driverSetup([], [])
-        self.assertRaisesRegex(
-            exception.VolumeBackendAPIException,
-            'replication_device with backend_id .* is missing.',
-            self.driver.cli._build_client,
-            'invalid_backend_id')
-
-    def test_build_client_with_id(self):
-        self.driverSetup([], [])
-        cli_client = self.driver.cli._build_client(
-            active_backend_id='fake_serial')
-        self.assertEqual('192.168.1.2', cli_client.active_storage_ip)
-        self.assertEqual('192.168.1.2', cli_client.primary_storage_ip)
-
-    def test_extract_provider_location_type(self):
-        self.assertEqual(
-            'lun',
-            emc_vnx_cli.EMCVnxCliBase.extract_provider_location(
-                'system^FNM11111|type^lun|id^1|version^05.03.00', 'type'))
-
-    def test_extract_provider_location_type_none(self):
-        self.assertIsNone(
-            emc_vnx_cli.EMCVnxCliBase.extract_provider_location(
-                None, 'type'))
-
-    def test_extract_provider_location_type_empty_str(self):
-        self.assertIsNone(
-            emc_vnx_cli.EMCVnxCliBase.extract_provider_location(
-                '', 'type'))
-
-    def test_extract_provider_location_type_not_available(self):
-        self.assertIsNone(
-            emc_vnx_cli.EMCVnxCliBase.extract_provider_location(
-                'system^FNM11111|id^1', 'type'))
-
-    def test_extract_provider_location_type_error_format(self):
-        self.assertIsNone(
-            emc_vnx_cli.EMCVnxCliBase.extract_provider_location(
-                'abc^|def|^gh|^^|^|', 'type'))
-
-
-VNXError = emc_vnx_cli.VNXError
-
-
-class VNXErrorTest(test.TestCase):
-
-    def test_has_error(self):
-        output = "The specified snapshot name is already in use. (0x716d8005)"
-        self.assertTrue(VNXError.has_error(output))
-
-    def test_has_error_with_specific_error(self):
-        output = "The specified snapshot name is already in use. (0x716d8005)"
-        has_error = VNXError.has_error(output, VNXError.SNAP_NAME_EXISTED)
-        self.assertTrue(has_error)
-
-        has_error = VNXError.has_error(output, VNXError.LUN_ALREADY_EXPANDED)
-        self.assertFalse(has_error)
-
-    def test_has_error_not_found(self):
-        output = "Cannot find the consistency group."
-        has_error = VNXError.has_error(output)
-        self.assertTrue(has_error)
-
-        has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
-        self.assertTrue(has_error)
-
-    def test_has_error_not_exist(self):
-        output = "The specified snapshot does not exist."
-        has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
-        self.assertTrue(has_error)
-
-        output = "The (pool lun) may not exist."
-        has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
-        self.assertTrue(has_error)
-
-    def test_has_error_multi_line(self):
-        output = """Could not retrieve the specified (pool lun).
-                    The (pool lun) may not exist."""
-        has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
-        self.assertTrue(has_error)
-
-    def test_has_error_regular_string_false(self):
-        output = "Cannot unbind LUN because it's contained in a Storage Group."
-        has_error = VNXError.has_error(output, VNXError.GENERAL_NOT_FOUND)
-        self.assertFalse(has_error)
-
-    def test_has_error_multi_errors(self):
-        output = "Cannot unbind LUN because it's contained in a Storage Group."
-        has_error = VNXError.has_error(output,
-                                       VNXError.LUN_IN_SG,
-                                       VNXError.GENERAL_NOT_FOUND)
-        self.assertTrue(has_error)
-
-        output = "Cannot unbind LUN because it's contained in a Storage Group."
-        has_error = VNXError.has_error(output,
-                                       VNXError.LUN_ALREADY_EXPANDED,
-                                       VNXError.LUN_NOT_MIGRATING)
-        self.assertFalse(has_error)
-
-
-VNXProvisionEnum = emc_vnx_cli.VNXProvisionEnum
-
-
-class VNXProvisionEnumTest(test.TestCase):
-    def test_get_opt(self):
-        opt = VNXProvisionEnum.get_opt(VNXProvisionEnum.DEDUPED)
-        self.assertEqual('-type Thin -deduplication on',
-                         ' '.join(opt))
-
-    def test_get_opt_not_available(self):
-        self.assertRaises(ValueError, VNXProvisionEnum.get_opt, 'na')
-
-
-VNXTieringEnum = emc_vnx_cli.VNXTieringEnum
-
-
-class VNXTieringEnumTest(test.TestCase):
-    def test_get_opt(self):
-        opt = VNXTieringEnum.get_opt(VNXTieringEnum.HIGH_AUTO)
-        self.assertEqual(
-            '-initialTier highestAvailable -tieringPolicy autoTier',
-            ' '.join(opt))
-
-    def test_get_opt_not_available(self):
-        self.assertRaises(ValueError, VNXTieringEnum.get_opt, 'na')
-
-
-VNXLun = emc_vnx_cli.VNXLun
-
-
-class VNXLunTest(test.TestCase):
-    def test_lun_id_setter_str_input(self):
-        lun = VNXLun()
-        lun.lun_id = '5'
-        self.assertEqual(5, lun.lun_id)
-
-    def test_lun_id_setter_dict_input(self):
-        lun = VNXLun()
-        lun.lun_id = {'lun_id': 12}
-        self.assertEqual(12, lun.lun_id)
-
-    def test_lun_id_setter_str_error(self):
-        lun = VNXLun()
-        self.assertRaises(ValueError, setattr, lun, 'lun_id', '12a')
-
-    def test_lun_provision_default(self):
-        lun = VNXLun()
-        lun.provision = {}
-        self.assertEqual(VNXProvisionEnum.THICK, lun.provision)
-
-    def test_lun_provision_thin(self):
-        lun = VNXLun()
-        lun.provision = {'is_thin_lun': True,
-                         'is_compressed': False,
-                         'dedup_state': False}
-        self.assertEqual(VNXProvisionEnum.THIN, lun.provision)
-
-    def test_lun_provision_compressed(self):
-        lun = VNXLun()
-        lun.provision = {'is_thin_lun': True,
-                         'is_compressed': True,
-                         'dedup_state': False}
-        self.assertEqual(VNXProvisionEnum.COMPRESSED, lun.provision)
-
-    def test_lun_provision_dedup(self):
-        lun = VNXLun()
-        lun.provision = {'is_thin_lun': True,
-                         'is_compressed': False,
-                         'dedup_state': True}
-        self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision)
-
-    def test_lun_provision_str_not_valid(self):
-        lun = VNXLun()
-        self.assertRaises(ValueError, setattr, lun, 'provision', 'invalid')
-
-    def test_lun_provision_plain_str(self):
-        lun = VNXLun()
-        lun.provision = VNXProvisionEnum.DEDUPED
-        self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision)
-
-    def test_lun_tier_default(self):
-        lun = VNXLun()
-        self.assertEqual(VNXTieringEnum.HIGH_AUTO, lun.tier)
-
-    def test_lun_tier_invalid_str(self):
-        lun = VNXLun()
-        self.assertRaises(ValueError, setattr, lun, 'tier', 'invalid')
-
-    def test_lun_tier_plain_str(self):
-        lun = VNXLun()
-        lun.tier = VNXTieringEnum.NO_MOVE
-        self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier)
-
-    def test_lun_tier_highest_available(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'Auto Tier',
-                    'initial_tier': 'Highest Available'}
-        self.assertEqual(VNXTieringEnum.HIGH_AUTO, lun.tier)
-
-    def test_lun_tier_auto(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'Auto Tier',
-                    'initial_tier': 'Optimize Pool'}
-        self.assertEqual(VNXTieringEnum.AUTO, lun.tier)
-
-    def test_lun_tier_high(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'Highest Available',
-                    'initial_tier': 'Highest Available'}
-        self.assertEqual(VNXTieringEnum.HIGH, lun.tier)
-
-    def test_lun_tier_low(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'Lowest Available',
-                    'initial_tier': 'Lowest Available'}
-        self.assertEqual(VNXTieringEnum.LOW, lun.tier)
-
-    def test_lun_tier_no_move_high_tier(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'No Movement',
-                    'initial_tier': 'Highest Available'}
-        self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier)
-
-    def test_lun_tier_no_move_optimize_pool(self):
-        lun = VNXLun()
-        lun.tier = {'tiering_policy': 'No Movement',
-                    'initial_tier': 'Optimize Pool'}
-        self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier)
-
-    def test_update(self):
-        lun = VNXLun()
-        lun.lun_id = 19
-        lun.update({
-            'lun_name': 'test_lun',
-            'lun_id': 19,
-            'total_capacity_gb': 1.0,
-            'is_thin_lun': True,
-            'is_compressed': False,
-            'dedup_state': True,
-            'tiering_policy': 'No Movement',
-            'initial_tier': 'Optimize Pool'})
-        self.assertEqual(1.0, lun.capacity)
-        self.assertEqual(VNXProvisionEnum.DEDUPED, lun.provision)
-        self.assertEqual(VNXTieringEnum.NO_MOVE, lun.tier)
-
-
-Dict = emc_vnx_cli.Dict
-
-
-class DictTest(test.TestCase):
-    def test_get_attr(self):
-        result = Dict()
-        result['a'] = 'A'
-        self.assertEqual('A', result.a)
-        self.assertEqual('A', result['a'])
-
-    def test_get_attr_not_exists(self):
-        result = Dict()
-        self.assertRaises(AttributeError, getattr, result, 'a')
-
-
-VNXCliParser = emc_vnx_cli.VNXCliParser
-PropertyDescriptor = emc_vnx_cli.PropertyDescriptor
-
-
-class DemoParser(VNXCliParser):
-    A = PropertyDescriptor('-a', 'Prop A (name)', 'prop_a')
-    B = PropertyDescriptor('-b', 'Prop B:')
-    C = PropertyDescriptor('-c', 'Prop C')
-    ID = PropertyDescriptor(None, 'ID:')
-
-
-class VNXCliParserTest(test.TestCase):
-    def test_get_property_options(self):
-        options = DemoParser.get_property_options()
-        self.assertEqual('-a -b -c', ' '.join(options))
-
-    def test_parse(self):
-        output = """
-                ID: test
-                Prop A (Name): ab (c)
-                Prop B: d ef
-                """
-        parsed = DemoParser.parse(
-            output,
-            [DemoParser.A, DemoParser.ID, DemoParser.C])
-
-        self.assertEqual('ab (c)', parsed.prop_a)
-        self.assertIsNone(parsed.prop_c)
-        self.assertEqual('test', parsed.id)
-        self.assertRaises(AttributeError, getattr, parsed, 'prop_b')
-
-
-VNXLunProperties = emc_vnx_cli.VNXLunProperties
-
-
-class VNXLunPropertiesTest(test.TestCase):
-
-    def test_parse(self):
-        output = """
-                LOGICAL UNIT NUMBER 19
-                Name:  test_lun
-                User Capacity (Blocks):  2097152
-                User Capacity (GBs):  1.000
-                Pool Name:  Pool4File
-                Is Thin LUN:  Yes
-                Is Compressed:  No
-                Deduplication State:  Off
-                Deduplication Status:  OK(0x0)
-                Tiering Policy:  No Movement
-                Initial Tier:  Optimize Pool
-                """
-        parser = VNXLunProperties()
-        parsed = parser.parse(output)
-        self.assertEqual('test_lun', parsed.lun_name)
-        self.assertEqual(19, parsed.lun_id)
-        self.assertEqual(1.0, parsed.total_capacity_gb)
-        self.assertTrue(parsed.is_thin_lun)
-        self.assertFalse(parsed.is_compressed)
-        self.assertFalse(parsed.dedup_state)
-        self.assertEqual('No Movement', parsed.tiering_policy)
-        self.assertEqual('Optimize Pool', parsed.initial_tier)
-        self.assertIsNone(parsed['state'])
-
-
-VNXPoolProperties = emc_vnx_cli.VNXPoolProperties
-
-
-class VNXPoolPropertiesTest(test.TestCase):
-    def test_parse(self):
-        output = """
-                Pool Name:  Pool4File
-                Pool ID:  1
-                Raid Type:  Mixed
-                Percent Full Threshold:  70
-                Description:
-                Disk Type:  Mixed
-                State:  Ready
-                Status:  OK(0x0)
-                Current Operation:  None
-                Current Operation State:  N/A
-                Current Operation Status:  N/A
-                Current Operation Percent Completed:  0
-                Raw Capacity (Blocks):  6398264602
-                Raw Capacity (GBs):  3050.930
-                User Capacity (Blocks):  4885926912
-                User Capacity (GBs):  2329.792
-                Consumed Capacity (Blocks):  1795516416
-                Consumed Capacity (GBs):  856.169
-                Available Capacity (Blocks):  3090410496
-                Available Capacity (GBs):  1473.623
-                Percent Full:  36.749
-                Total Subscribed Capacity (Blocks):  5666015232
-                Total Subscribed Capacity (GBs):  2701.767
-                Percent Subscribed:  115.966
-                Oversubscribed by (Blocks):  780088320
-                Oversubscribed by (GBs):  371.975
-                """
-        parser = VNXPoolProperties()
-        pool = parser.parse(output)
-        self.assertEqual('Ready', pool.state)
-        self.assertEqual(1, pool.pool_id)
-        self.assertEqual(2329.792, pool.total_capacity_gb)
-        self.assertEqual(1473.623, pool.free_capacity_gb)
-        self.assertIsNone(pool.fast_cache_enabled)
-        self.assertEqual('Pool4File', pool.pool_name)
-        self.assertEqual(2701.767, pool.provisioned_capacity_gb)
-        self.assertEqual(70, pool.pool_full_threshold)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/__init__.py b/cinder/tests/unit/volume/drivers/emc/vnx/__init__.py
new file mode 100644
index 00000000000..1edccdd63ba
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/__init__.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+import mock
+
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops
+
+fake_vnx = mock.Mock()
+fake_storops.exception = fake_exception
+fake_storops.vnx = fake_vnx
+sys.modules['storops'] = fake_storops
+sys.modules['storops.vnx'] = fake_vnx
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/fake_enum.py b/cinder/tests/unit/volume/drivers/emc/vnx/fake_enum.py
new file mode 100644
index 00000000000..40a755bc467
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/fake_enum.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2016 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import enum
+import six
+
+
+class Enum(enum.Enum):
+    @classmethod
+    def verify(cls, value, allow_none=True):
+        if value is None and not allow_none:
+            raise ValueError(
+                'None is not allowed here for %s.') % cls.__name__
+        elif value is not None and not isinstance(value, cls):
+            raise ValueError('%(value)s is not an instance of %(name)s.') % {
+                'value': value, 'name': cls.__name__}
+
+    @classmethod
+    def get_all(cls):
+        return list(cls)
+
+    @classmethod
+    def get_opt(cls, value):
+        option_map = cls.get_option_map()
+        if option_map is None:
+            raise NotImplementedError(
+                'Option map is not defined for %s.') % cls.__name__
+
+        ret = option_map.get(value, None)
+        if ret is None:
+            raise ValueError('%(value)s is not a valid option for %(name)s.'
+                             ) % {'value': value, 'name': cls.__name__}
+        return ret
+
+    @classmethod
+    def parse(cls, value):
+        if isinstance(value, six.string_types):
+            ret = cls.from_str(value)
+        elif isinstance(value, six.integer_types):
+            ret = cls.from_int(value)
+        elif isinstance(value, cls):
+            ret = value
+        elif value is None:
+            ret = None
+        else:
+            raise ValueError(
+                'Not supported value type: %s.') % type(value)
+        return ret
+
+    def is_equal(self, value):
+        if isinstance(value, six.string_types):
+            ret = self.value.lower() == value.lower()
+        else:
+            ret = self.value == value
+        return ret
+
+    @classmethod
+    def from_int(cls, value):
+        ret = None
+        int_index = cls.get_int_index()
+        if int_index is not None:
+            try:
+                ret = int_index[value]
+            except IndexError:
+                pass
+        else:
+            try:
+                ret = next(i for i in cls.get_all() if i.is_equal(value))
+            except StopIteration:
+                pass
+        if ret is None:
+            raise ValueError
+        return ret
+
+    @classmethod
+    def from_str(cls, value):
+        ret = None
+        if value is not None:
+            for item in cls.get_all():
+                if item.is_equal(value):
+                    ret = item
+                    break
+            else:
+                cls._raise_invalid_value(value)
+        return ret
+
+    @classmethod
+    def _raise_invalid_value(cls, value):
+        msg = ('%(value)s is not a valid value for %(name)s.'
+               ) % {'value': value, 'name': cls.__name__}
+        raise ValueError(msg)
+
+    @classmethod
+    def get_option_map(cls):
+        raise None
+
+    @classmethod
+    def get_int_index(cls):
+        return None
+
+    @classmethod
+    def values(cls):
+        return [m.value for m in cls.__members__.values()]
+
+    @classmethod
+    def enum_name(cls):
+        return cls.__name__
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/fake_exception.py b/cinder/tests/unit/volume/drivers/emc/vnx/fake_exception.py
new file mode 100644
index 00000000000..dbddd8dc7bf
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/fake_exception.py
@@ -0,0 +1,172 @@
+# Copyright (c) 2016 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+class StoropsException(Exception):
+    message = 'Storops Error.'
+
+
+class VNXException(StoropsException):
+    message = "VNX Error."
+
+
+class VNXStorageGroupError(VNXException):
+    pass
+
+
+class VNXAttachAluError(VNXException):
+    pass
+
+
+class VNXAluAlreadyAttachedError(VNXAttachAluError):
+    message = (
+        'LUN already exists in the specified storage group',
+        'Requested LUN has already been added to this Storage Group')
+
+
+class VNXDetachAluError(VNXStorageGroupError):
+    pass
+
+
+class VNXDetachAluNotFoundError(VNXDetachAluError):
+    message = 'No such Host LUN in this Storage Group'
+
+
+class VNXCreateStorageGroupError(VNXStorageGroupError):
+    pass
+
+
+class VNXStorageGroupNameInUseError(VNXCreateStorageGroupError):
+    message = 'Storage Group name already in use'
+
+
+class VNXNoHluAvailableError(VNXStorageGroupError):
+    pass
+
+
+class VNXMigrationError(VNXException):
+    pass
+
+
+class VNXTargetNotReadyError(VNXMigrationError):
+    message = 'The destination LUN is not available for migration'
+
+
+class VNXSnapError(VNXException):
+    pass
+
+
+class VNXDeleteAttachedSnapError(VNXSnapError):
+    error_code = 0x716d8003
+
+
+class VNXCreateSnapError(VNXException):
+    message = 'Cannot create the snapshot.'
+
+
+class VNXAttachSnapError(VNXSnapError):
+    message = 'Cannot attach the snapshot.'
+
+
+class VNXDetachSnapError(VNXSnapError):
+    message = 'Cannot detach the snapshot.'
+
+
+class VNXSnapAlreadyMountedError(VNXSnapError):
+    error_code = 0x716d8055
+
+
+class VNXSnapNameInUseError(VNXSnapError):
+    error_code = 0x716d8005
+
+
+class VNXSnapNotExistsError(VNXSnapError):
+    message = 'The specified snapshot does not exist.'
+
+
+class VNXLunError(VNXException):
+    pass
+
+
+class VNXCreateLunError(VNXLunError):
+    pass
+
+
+class VNXLunNameInUseError(VNXCreateLunError):
+    error_code = 0x712d8d04
+
+
+class VNXLunExtendError(VNXLunError):
+    pass
+
+
+class VNXLunExpandSizeError(VNXLunExtendError):
+    error_code = 0x712d8e04
+
+
+class VNXLunPreparingError(VNXLunError):
+    error_code = 0x712d8e0e
+
+
+class VNXLunNotFoundError(VNXLunError):
+    message = 'Could not retrieve the specified (pool lun).'
+
+
+class VNXDeleteLunError(VNXLunError):
+    pass
+
+
+class VNXCompressionError(VNXLunError):
+    pass
+
+
+class VNXCompressionAlreadyEnabledError(VNXCompressionError):
+    message = 'Compression on the specified LUN is already turned on.'
+
+
+class VNXConsistencyGroupError(VNXException):
+    pass
+
+
+class VNXCreateConsistencyGroupError(VNXConsistencyGroupError):
+    pass
+
+
+class VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError):
+    error_code = 0x716d8021
+
+
+class VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError):
+    message = 'Cannot find the consistency group'
+
+
+class VNXPingNodeError(VNXException):
+    pass
+
+
+class VNXMirrorException(VNXException):
+    pass
+
+
+class VNXMirrorNameInUseError(VNXMirrorException):
+    message = 'Mirror name already in use'
+
+
+class VNXMirrorPromotePrimaryError(VNXMirrorException):
+    message = 'Cannot remove or promote a primary image.'
+
+
+class VNXMirrorNotFoundError(VNXMirrorException):
+    message = 'Mirror not found'
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/fake_storops.py b/cinder/tests/unit/volume/drivers/emc/vnx/fake_storops.py
new file mode 100644
index 00000000000..6f66972481a
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/fake_storops.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2016 EMC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_enum
+
+
+class VNXSystem(object):
+    pass
+
+
+class VNXEnum(fake_enum.Enum):
+    pass
+
+
+class VNXSPEnum(VNXEnum):
+    SP_A = 'SP A'
+    SP_B = 'SP B'
+    CONTROL_STATION = 'Celerra'
+
+
+class VNXProvisionEnum(VNXEnum):
+    # value of spec "provisioning:type"
+    THIN = 'thin'
+    THICK = 'thick'
+    COMPRESSED = 'compressed'
+    DEDUPED = 'deduplicated'
+
+
+class VNXMigrationRate(VNXEnum):
+    LOW = 'low'
+    MEDIUM = 'medium'
+    HIGH = 'high'
+    ASAP = 'asap'
+
+
+class VNXTieringEnum(VNXEnum):
+    NONE = 'none'
+    HIGH_AUTO = 'starthighthenauto'
+    AUTO = 'auto'
+    HIGH = 'highestavailable'
+    LOW = 'lowestavailable'
+    NO_MOVE = 'nomovement'
+
+
+class VNXMirrorViewRecoveryPolicy(VNXEnum):
+    MANUAL = 'manual'
+    AUTO = 'automatic'
+
+
+class VNXMirrorViewSyncRate(VNXEnum):
+    HIGH = 'high'
+    MEDIUM = 'medium'
+    LOW = 'low'
+
+
+class VNXMirrorImageState(VNXEnum):
+    SYNCHRONIZED = 'Synchronized'
+    OUT_OF_SYNC = 'Out-of-Sync'
+    SYNCHRONIZING = 'Synchronizing'
+    CONSISTENT = 'Consistent'
+    SCRAMBLED = 'Scrambled'
+    INCOMPLETE = 'Incomplete'
+    LOCAL_ONLY = 'Local Only'
+    EMPTY = 'Empty'
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/mocked_cinder.yaml b/cinder/tests/unit/volume/drivers/emc/vnx/mocked_cinder.yaml
new file mode 100644
index 00000000000..149b5ca24ce
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/mocked_cinder.yaml
@@ -0,0 +1,442 @@
+###########################################################
+# Common
+###########################################################
+
+volume: &volume_base
+  _type: 'volume'
+  _properties: &volume_base_properties
+    status: 'creating'
+    size: 1
+    id:
+      _uuid: volume_id
+    provider_auth: 'None'
+    host: 'host@backendsec#unit_test_pool'
+    project_id:
+      _uuid: project_id
+    provider_location: &provider_location
+      _build_provider_location:  &provider_location_dict
+        id: 1
+        type: 'lun'
+        system: 'fake_serial'
+        base_lun_name: 'test'
+        version: '07.00.00'
+    display_name: 'volume-1'
+    display_description: 'test volume'
+    volume_type_id:
+    consistencygroup_id:
+    volume_attachment:
+      _properties: {}
+    volume_metadata:
+      _properties: {}
+
+host: &host_base
+  _properties:
+     host: 'host@backendsec#unit_test_pool'
+
+consistency_group: &cg_base
+  _type: 'cg'
+  _properties: &cg_base_properties
+    id:
+      _uuid: consistency_group_id
+    status: 'creating'
+    name: 'cg_name'
+    host: 'host@backend#unit_test_pool'
+
+consistency_group_with_type: &cg_base_with_type
+  _type: 'cg'
+  _properties:
+    <<: *cg_base_properties
+    volume_type_id: 'type1'
+
+snapshot: &snapshot_base
+  _type: 'snapshot'
+  _properties: &snapshot_base_properties
+    id:
+      _uuid: snapshot_id
+    status: available
+    name: 'snapshot_name'
+    volume:
+      _type: 'volume'
+      _properties:
+        <<: *volume_base_properties
+        name: 'attached_volume_name'
+    volume_name: 'attached_volume_name'
+
+cg_snapshot: &cg_snapshot_base
+  _type: 'cg_snapshot'
+  _properties: &cg_snapshot_base_properties
+    id:
+      _uuid: cgsnapshot_id
+    status: 'creating'
+
+
+###########################################################
+# TestCommonAdapter, TestISCSIAdapter, TestFCAdapter
+###########################################################
+test_mock_driver_input_inner:
+  volume: *volume_base
+
+test_create_volume: &test_create_volume
+  volume: *volume_base
+
+test_create_volume_error: *test_create_volume
+
+test_create_thick_volume: *test_create_volume
+
+test_migrate_volume:
+  volume: *volume_base
+
+test_migrate_volume_host_assisted:
+  volume: *volume_base
+
+test_delete_volume_not_force: &test_delete_volume_not_force
+  volume: *volume_base
+
+test_delete_volume_force: *test_delete_volume_not_force
+
+
+test_retype_need_migration_when_host_changed:
+  volume: *volume_base
+  host:
+    _properties:
+       host: 'host@backendsec#another_pool'
+
+test_retype_need_migration_for_smp_volume:
+  volume:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      provider_location:
+        _build_provider_location:
+          <<: *provider_location_dict
+          type: 'smp'
+  host: *host_base
+
+test_retype_need_migration_when_provision_changed:
+  volume: *volume_base
+  host: *host_base
+
+test_retype_not_need_migration_when_provision_changed:
+  volume: *volume_base
+  host: *host_base
+
+test_retype_not_need_migration:
+  volume: *volume_base
+  host: *host_base
+
+test_retype_need_migration:
+  volume:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      volume_type_id:
+        _uuid: volume_type_id
+  host: *host_base
+
+test_retype_lun_has_snap:
+  volume: *volume_base
+  host: *host_base
+
+test_retype_turn_on_compression_change_tier:
+  volume: *volume_base
+  host: *host_base
+
+test_retype_change_tier:
+  volume: *volume_base
+  host: *host_base
+
+test_create_consistencygroup:
+  cg: *cg_base
+
+test_delete_consistencygroup:
+  cg: *cg_base
+
+test_delete_consistencygroup_with_volume:
+  cg: *cg_base
+  vol1: *volume_base
+  vol2: *volume_base
+
+test_delete_consistencygroup_error:
+  cg: *cg_base
+  vol1: *volume_base
+  vol2: *volume_base
+
+test_delete_consistencygroup_volume_error:
+  cg: *cg_base
+  vol1: *volume_base
+  vol2: *volume_base
+
+test_extend_volume:
+  volume: *volume_base
+
+test_create_snapshot_adapter:
+  snapshot: *snapshot_base
+
+test_delete_snapshot_adapter:
+  snapshot: *snapshot_base
+
+test_create_cgsnapshot: &cg_snap_and_snaps
+  cg_snap: *cg_snapshot_base
+  snap1: *snapshot_base
+  snap2: *snapshot_base
+
+test_delete_cgsnapshot: *cg_snap_and_snaps
+
+test_manage_existing_lun_no_exist:
+  volume: *volume_base
+
+test_manage_existing_invalid_pool:
+  volume: *volume_base
+
+test_manage_existing_get_size:
+  volume: *volume_base
+
+test_manage_existing_type_mismatch:
+  volume:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_manage_existing:
+  volume:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_manage_existing_smp:
+  volume: *volume_base
+
+test_create_cloned_volume:
+  volume: *volume_base
+  src_vref:
+    _type: volume
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume2_id
+      size: 2
+
+test_create_cloned_volume_snapcopy:
+  volume:
+    _type: volume
+    _properties:
+      <<: *volume_base_properties
+  src_vref:
+    _type: volume
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume2_id
+      size: 2
+
+test_create_volume_from_snapshot:
+  volume: *volume_base
+  snapshot: *snapshot_base
+
+test_create_volume_from_snapshot_snapcopy:
+  volume: *volume_base
+  snapshot: *snapshot_base
+
+test_get_base_lun_name:
+  volume: *volume_base
+
+test_create_cg_from_cgsnapshot:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume_id
+  vol2:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume2_id
+  cg: *cg_base
+  cg_snap: *cg_snapshot_base
+  snap1:
+    _type: 'snapshot'
+    _properties:
+      <<: *snapshot_base_properties
+      id:
+        _uuid: snapshot_id
+  snap2:
+    _type: 'snapshot'
+    _properties:
+      <<: *snapshot_base_properties
+      id:
+        _uuid: snapshot2_id
+
+test_create_cloned_cg:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: consistency_group_id
+  cg: *cg_base
+  src_cg:
+    _type: 'cg'
+    _properties:
+      <<: *cg_base_properties
+      id:
+        _uuid: consistency_group2_id
+      name: 'src_cg_name'
+
+  src_vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: consistency_group2_id
+
+test_assure_host_access:
+  volume: *volume_base
+
+test_assure_host_access_without_auto_register_new_sg:
+  volume: *volume_base
+
+test_assure_host_access_without_auto_register:
+  volume: *volume_base
+
+test_auto_register_initiator:
+  volume: *volume_base
+
+test_auto_register_initiator_no_white_list:
+  volume: *volume_base
+
+test_remove_host_access:
+  volume: *volume_base
+
+test_remove_host_access_sg_absent:
+  volume: *volume_base
+
+test_remove_host_access_volume_not_in_sg:
+  volume: *volume_base
+
+test_update_consistencygroup:
+  cg: *cg_base
+  volume_add:
+    <<: *volume_base
+    _properties:
+      <<: *volume_base_properties
+      provider_location:
+        _build_provider_location:
+          <<: *provider_location_dict
+          id: 1
+  volume_remove:
+    <<: *volume_base
+    _properties:
+      <<: *volume_base_properties
+      provider_location:
+        _build_provider_location:
+          <<: *provider_location_dict
+          id: 2
+
+test_create_export_snapshot:
+  snapshot: *snapshot_base
+
+test_remove_export_snapshot:
+  snapshot: *snapshot_base
+
+test_initialize_connection_snapshot:
+  snapshot: *snapshot_base
+
+test_terminate_connection_snapshot:
+  snapshot: *snapshot_base
+
+test_setup_lun_replication:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume_id
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_cleanup_replication:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume2_id
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_failover_host:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume3_id
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_failover_host_invalid_backend_id:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume4_id
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_failover_host_failback:
+  vol1:
+    _type: 'volume'
+    _properties:
+      <<: *volume_base_properties
+      id:
+        _uuid: volume5_id
+      volume_type_id:
+        _uuid: volume_type_id
+
+test_get_pool_name:
+  volume: *volume_base
+
+test_update_migrated_volume:
+  volume: *volume_base
+  new_volume: *volume_base
+
+test_update_migrated_volume_smp:
+  volume: *volume_base
+  new_volume:
+    <<: *volume_base
+    _properties:
+      <<: *volume_base_properties
+      provider_location:
+        _build_provider_location:
+          <<: *provider_location_dict
+          type: smp
+
+
+###########################################################
+# TestUtils
+###########################################################
+
+test_validate_cg_type:
+  cg: *cg_base_with_type
+
+
+###########################################################
+# TestClient
+###########################################################
+
+test_get_lun_id:
+  volume: *volume_base
+
+test_get_lun_id_without_provider_location:
+  volume:
+    <<: *volume_base
+    _properties:
+      <<: *volume_base_properties
+      provider_location:
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/mocked_vnx.yaml b/cinder/tests/unit/volume/drivers/emc/vnx/mocked_vnx.yaml
new file mode 100644
index 00000000000..64b5fa96c56
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/mocked_vnx.yaml
@@ -0,0 +1,2017 @@
+###########################################################
+# Example:
+# vnx:
+#   _properties: # properties
+#     serial: serial_1
+#     name: lun_1
+#     state:
+#       _side_effect: [Ready, Offline] # side effect for property
+#
+#   _methods: # methods
+#     get_pool: *pool_1 # return value of method
+#     get_lun:
+#       _raise:
+#         GetLunError: Unkown Error # method raise exception
+#     get_cg:
+#       _side_effect: [cg_1, cg_2] # side effect for method
+#
+###########################################################
+
+###########################################################
+# Common
+###########################################################
+lun_base: &lun_base
+  _properties: &lun_base_prop
+    name: lun_name
+    lun_id: lun_id
+    wwn: 'fake_wwn'
+    poll: False
+    operation: None
+    state: Ready
+    existed: true
+
+cg_base:
+  _properties: &cg_base_prop
+    fake_prop: fake_prop_value
+
+cg_snap_base:
+  _properties: &cg_snap_base_prop
+    id: 'cg_snap_id'
+
+pool_base: &pool_base
+  _properties: &pool_base_prop
+    name: pool_name
+    pool_id: 0
+    state: Ready
+    user_capacity_gbs: 1311
+    total_subscribed_capacity_gbs: 131
+    available_capacity_gbs: 132
+    percent_full_threshold: 70
+    fast_cache: True
+
+pool_feature_base:
+  _properties: &pool_feature_base_prop
+    max_pool_luns: 3000
+    total_pool_luns: 151
+
+vnx_base: &vnx_base
+  _properties: &vnx_base_prop
+    serial: fake_serial
+
+snapshot_base: &snapshot_base
+  _properties: &snapshot_base_prop
+    status:
+    existed: true
+    name: snapshot_name
+    state:
+
+sg: &sg_base
+  _properties: &sg_base_prop
+    existed: true
+    name: sg_name
+
+spa: &spa
+  _enum:
+    VNXSPEnum: SP A
+
+spb: &spb
+  _enum:
+    VNXSPEnum: SP B
+
+iscsi_port_base: &iscsi_port_base
+  _type: 'VNXPort'
+  _properties: &iscsi_port_base_prop
+    sp: *spa
+    port_id: 0
+    vport_id: 0
+
+all_iscsi_ports: &all_iscsi_ports
+  - &iscsi_port_a-0-0
+    <<: *iscsi_port_base
+    _properties:
+      <<: *iscsi_port_base_prop
+      port_id: 0
+      vport_id: 0
+  - &iscsi_port_a-0-1
+    <<: *iscsi_port_base
+    _properties:
+      <<: *iscsi_port_base_prop
+      port_id: 0
+      vport_id: 1
+  - &iscsi_port_a-1-0
+    <<: *iscsi_port_base
+    _properties:
+      <<: *iscsi_port_base_prop
+      port_id: 1
+      vport_id: 0
+  - &iscsi_port_b-0-1
+    <<: *iscsi_port_base
+    _properties:
+      <<: *iscsi_port_base_prop
+      sp: *spb
+      port_id: 0
+      vport_id: 1
+
+fc_port_base: &fc_port_base
+  _type: 'VNXPort'
+  _properties: &fc_port_base_prop
+    sp: *spa
+    port_id: 1
+    vport_id: None
+    wwn: 'wwn'
+    link_status: 'Up'
+    port_status: 'Online'
+
+all_fc_ports: &all_fc_ports
+  - &fc_port_a-1
+    <<: *fc_port_base
+    _properties:
+      <<: *fc_port_base_prop
+      port_id: 1
+      wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A1'
+  - &fc_port_a-2
+    <<: *fc_port_base
+    _properties:
+      <<: *fc_port_base_prop
+      port_id: 2
+      wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A2'
+      port_status: 'Offline'
+  - &fc_port_b-2
+    <<: *fc_port_base
+    _properties:
+      <<: *fc_port_base_prop
+      sp: *spb
+      port_id: 2
+      wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B2'
+
+mirror_base: &mirror_base
+  _properties: &mirror_base_prop
+    image_state:
+      _type: VNXMirrorImageState
+      value: 'SYNCHRONIZED'
+
+###########################################################
+# TestClient
+###########################################################
+test_create_lun: &test_create_lun
+  lun: &lun_test_create_lun
+    _properties:
+      <<: *lun_base_prop
+      name: lun1
+    _methods:
+      update:
+      with_no_poll: _context
+
+  pool: &pool_test_create_lun
+    _properties:
+      <<: *pool_base_prop
+      name: pool1
+    _methods:
+      create_lun: *lun_test_create_lun
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: *pool_test_create_lun
+
+test_create_lun_error: &test_create_lun_error
+  pool: &pool_test_create_lun_error
+    _properties:
+      <<: *pool_base_prop
+    _methods:
+      create_lun:
+        _raise:
+          VNXCreateLunError: Unkown Error
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: *pool_test_create_lun_error
+
+test_is_lun_io_ready_false:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+      state: Initializing
+    _methods:
+      update:
+      with_no_poll: _context
+
+test_is_lun_io_ready_true:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+      state: Ready
+      operation: None
+    _methods:
+      update:
+      with_no_poll: _context
+
+test_is_lun_io_ready_exception:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+      state: Deleting
+    _methods:
+      update:
+      with_no_poll: _context
+
+test_create_lun_in_cg:
+  cg: &cg_test_create_lun_in_cg
+    _properties:
+      <<: *cg_base_prop
+    _methods:
+      add_member:
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_create_lun
+      get_pool: *pool_test_create_lun
+      get_cg: *cg_test_create_lun_in_cg
+
+test_create_lun_compression:
+  lun: &lun_test_create_lun_compression
+    _properties:
+      <<: *lun_base_prop
+      name: lun2
+    _methods:
+      update:
+      with_no_poll: _context
+
+  pool: &pool_test_create_lun_compression
+    _properties:
+      <<: *pool_base_prop
+    _methods:
+      create_lun: *lun_test_create_lun_compression
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: *pool_test_create_lun_compression
+
+test_create_lun_already_existed:
+  lun: &lun_test_create_lun_already_existed
+    _properties:
+      <<: *lun_base_prop
+      name: lun3
+    _methods:
+      update:
+      with_no_poll: _context
+
+  pool: &pool_test_create_lun_already_existed
+    _properties:
+      <<: *pool_base_prop
+    _methods:
+      create_lun:
+        _raise:
+          VNXLunNameInUseError: Lun already exists(0x712d8d04)
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_create_lun_already_existed
+      get_pool: *pool_test_create_lun_already_existed
+
+test_migrate_lun:
+  lun: &lun_migrate
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      migrate:
+
+  vnx:
+    _methods:
+      get_lun: *lun_migrate
+
+test_migrate_lun_with_retry:
+  lun: &lun_migrate_retry
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      migrate:
+        _raise:
+          VNXTargetNotReadyError: 'The destination LUN is not available for migration'
+
+  vnx:
+    _methods:
+      get_lun: *lun_migrate_retry
+
+test_session_finished_faulted:
+  session: &session_faulted
+    _properties:
+      existed: true
+      current_state: 'FAULTED'
+  vnx:
+    _methods:
+      get_lun: *lun_base
+      get_migration_session: *session_faulted
+
+test_session_finished_migrating:
+  session: &session_migrating
+    _properties:
+      existed: true
+      current_state: 'MIGRATING'
+  vnx:
+    _methods:
+      get_lun: *lun_base
+      get_migration_session: *session_migrating
+
+test_session_finished_not_existed:
+  session: &session_not_existed
+    _properties:
+      existed: false
+  vnx:
+    _methods:
+      get_lun: *lun_base
+      get_migration_session: *session_not_existed
+
+test_migrate_lun_error:
+  lun1: &lun_migrate_error
+    <<: *lun_base_prop
+    _methods:
+      migrate:
+        _raise:
+          VNXMigrationError: 'Unknown Error'
+  vnx:
+    _methods:
+      get_lun: *lun_migrate_error
+
+test_verify_migration:
+  lun1: &src_lun
+    _propertis:
+      <<: *lun_base_prop
+  lun2: &dst_lun
+    _properties:
+      poll: false
+      wwn: 'fake_wwn'
+  session: &session_verify
+    _properties:
+      existed: false
+  vnx:
+    _methods:
+      get_lun:
+        _side_effect: [*src_lun, *dst_lun]
+      get_migration_session: *session_verify
+
+test_verify_migration_false:
+  vnx:
+    _methods:
+      get_lun:
+        _side_effect: [*src_lun, *dst_lun]
+      get_migration_session: *session_verify
+
+test_cleanup_migration:
+  session: &session_cancel
+    _properties:
+      existed: true
+  lun: &lun_cancel_migrate
+    _methods:
+      cancel_migrate:
+  vnx:
+    _methods:
+      get_migration_session: *session_cancel
+      get_lun: *lun_cancel_migrate
+
+test_get_lun_by_name:
+  lun: &lun_test_get_lun_by_name
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 888
+      name: lun_name_test_get_lun_by_name
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_get_lun_by_name
+
+test_delete_lun: &test_delete_lun
+  lun: &lun_test_delete_lun
+    _properties:
+      <<: *lun_base_prop
+      name: lun_test_delete_lun
+      is_snap_mount_point: False
+    _methods:
+      delete:
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_delete_lun
+
+test_delete_smp: &test_delete_smp
+  snapshot: &snapshot_test_delete_smp
+    _properties:
+      name: snapshot_test_delete_smp
+    _methods:
+      delete:
+
+  lun: &lun_test_delete_smp
+    _properties:
+      <<: *lun_base_prop
+      name: lun_test_delete_smp
+      attached_snapshot: *snapshot_test_delete_smp
+      is_snap_mount_point: True
+    _methods:
+      delete:
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_delete_smp
+
+test_delete_lun_not_exist:
+  lun: &lun_test_delete_lun_not_exist
+    _properties:
+      <<: *lun_base_prop
+      name: lun_test_delete_lun_not_exist
+      is_snap_mount_point: False
+    _methods:
+      delete:
+        _raise:
+          VNXLunNotFoundError:
+            Lun to delete doesn't exist.
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_delete_lun_not_exist
+
+test_delete_lun_exception:
+  lun: &lun_test_delete_lun_exception
+    _properties:
+      <<: *lun_base_prop
+      name: lun_test_delete_lun_exception
+      is_snap_mount_point: False
+    _methods:
+      delete:
+        _raise:
+          VNXDeleteLunError:
+            General lun delete error.
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_delete_lun_exception
+
+test_create_cg: &test_create_cg
+  cg: &cg_for_create
+    _properties:
+      existed: True
+    _methods:
+      update:
+      with_no_poll: _context
+
+  vnx:
+    _methods:
+      create_cg: *cg_for_create
+
+test_create_cg_already_existed:
+  vnx:
+    _methods:
+      create_cg:
+        _raise:
+          VNXConsistencyGroupNameInUseError: Already in use
+      get_cg: *cg_for_create
+
+test_delete_cg:
+  cg: &cg_for_deletion
+    _methods:
+      delete:
+  vnx:
+    _methods:
+      get_cg: *cg_for_deletion
+
+test_delete_cg_not_existed:
+  cg: &cg_delete_no_existed
+    _methods:
+      delete:
+        _raise:
+          VNXConsistencyGroupNotFoundError: CG not found
+  vnx:
+    _methods:
+      get_cg: *cg_delete_no_existed
+
+test_expand_lun: &test_expand_lun
+  lun: &lun_test_expand_lun
+    _properties:
+      <<: *lun_base_prop
+      name: lun_test_expand_lun
+      total_capacity_gb: 10
+    _methods:
+      expand:
+      update:
+      with_no_poll: _context
+  vnx:
+    _properties: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_expand_lun
+
+test_expand_lun_not_poll: *test_expand_lun
+
+test_expand_lun_already_expanded:
+  lun: &lun_test_expand_lun_already_expanded
+    _properties:
+      <<: *lun_base_prop
+      total_capacity_gb: 10
+    _methods:
+      update:
+      with_no_poll: _context
+      expand:
+        _raise:
+          VNXLunExpandSizeError:
+            LUN already expanded.
+  vnx:
+    _properties: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_expand_lun_already_expanded
+
+test_expand_lun_not_ops_ready:
+  lun: &lun_test_expand_lun_not_ops_ready
+    _properties:
+      <<: *lun_base_prop
+      total_capacity_gb: 10
+      operation: 'None'
+    _methods:
+      update:
+      with_no_poll: _context
+      expand:
+        _raise:
+          VNXLunPreparingError:
+            LUN operation not ready.
+  vnx:
+    _properties: *vnx_base_prop
+    _methods:
+      get_lun: *lun_test_expand_lun_not_ops_ready
+
+test_create_snapshot: &test_create_snapshot
+  lun: &lun_test_create_snapshot
+    <<: *lun_base
+    _methods:
+      create_snap:
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_create_snapshot
+
+test_create_snapshot_snap_name_exist_error:
+  lun: &lun_test_create_snapshot_snap_name_exist_error
+    <<: *lun_base
+    _methods:
+      create_snap:
+        _raise:
+          VNXSnapNameInUseError: Snapshot name is in use.
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_create_snapshot_snap_name_exist_error
+
+test_delete_snapshot: &test_delete_snapshot
+  snapshot: &snapshot_test_delete_snapshot
+    <<: *snapshot_base
+    _methods:
+      delete:
+
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_snap: *snapshot_test_delete_snapshot
+
+test_delete_snapshot_delete_attached_error:
+  snapshot: &snapshot_test_delete_snapshot_delete_attached_error
+    <<: *snapshot_base
+    _methods:
+      delete:
+        _raise:
+          VNXDeleteAttachedSnapError:
+            Snapshot is attached to a LUN.
+
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_snap: *snapshot_test_delete_snapshot_delete_attached_error
+
+test_copy_snapshot:
+  snap: &snap_copy
+    _methods:
+      copy:
+  vnx:
+    _methods:
+      get_snap: *snap_copy
+
+test_create_mount_point:
+  lun: &lun_mount_point
+    _methods:
+      create_mount_point:
+  vnx:
+    _methods:
+      get_lun: *lun_mount_point
+
+test_attach_mount_point:
+  lun: &lun_attach_snap
+    _methods:
+      attach_snap:
+  vnx:
+    _methods:
+      get_lun: *lun_attach_snap
+
+test_detach_mount_point:
+  lun: &lun_detach
+    _methods:
+      detach_snap:
+  vnx:
+    _methods:
+      get_lun: *lun_detach
+
+test_modify_snapshot:
+  snap: &snap_modify
+    _methods:
+      modify:
+
+  vnx:
+    _methods:
+      get_snap: *snap_modify
+
+test_create_cg_snapshot: &test_create_cg_snapshot
+  cg_snap: &cg_snap_exist
+    _properties:
+      existed: True
+    _methods:
+      update:
+      with_no_poll: _context
+  cg: &cg_test_create_cg_snapshot
+    _methods:
+      create_snap: *cg_snap_exist
+  vnx:
+    _methods:
+      get_cg: *cg_test_create_cg_snapshot
+
+test_create_cg_snapshot_already_existed:
+  cg: &cg_create_cg_snapshot_in_use_error
+    _methods:
+      with_no_poll: _context
+      create_snap:
+        _raise:
+          VNXSnapNameInUseError: 'Already in use'
+  vnx:
+    _methods:
+      get_cg: *cg_create_cg_snapshot_in_use_error
+      get_snap: *cg_snap_exist
+
+test_delete_cg_snapshot: *test_delete_snapshot
+
+test_create_sg:
+  sg: &sg_test_create_sg
+    <<: *sg_base
+  vnx:
+    <<: *vnx_base
+    _methods:
+      create_sg: *sg_test_create_sg
+
+test_create_sg_name_in_use:
+  vnx:
+    <<: *vnx_base
+    _methods:
+      create_sg:
+        _raise:
+          VNXStorageGroupNameInUseError: Storage group name is in use.
+      get_sg:
+
+test_get_storage_group:
+  sg: &sg_test_get_storage_group
+    <<: *sg_base
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_sg: *sg_test_get_storage_group
+
+test_register_initiator:
+  sg: &sg_test_register_initiator
+    <<: *sg_base
+    _methods:
+      connect_hba:
+      update:
+      with_poll: _context
+  vnx: *vnx_base
+
+test_register_initiator_exception:
+  sg: &sg_test_register_initiator_exception
+    <<: *sg_base
+    _methods:
+      connect_hba:
+        _raise:
+          VNXStorageGroupError: set_path error.
+      update:
+      with_poll: _context
+  vnx: *vnx_base
+
+test_ping_node:
+  iscsi_port: &iscsi_port_test_ping_node
+    <<: *iscsi_port_base
+    _methods:
+      ping_node:
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_iscsi_port: *iscsi_port_test_ping_node
+
+test_ping_node_fail:
+  iscsi_port: &iscsi_port_test_ping_node_fail
+    <<: *iscsi_port_base
+    _methods:
+      ping_node:
+        _raise:
+          VNXPingNodeError: Failed to ping node.
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_iscsi_port: *iscsi_port_test_ping_node_fail
+
+test_add_lun_to_sg:
+  sg: &sg_test_add_lun_to_sg
+    <<: *sg_base
+    _methods:
+      attach_alu: 1
+  vnx: *vnx_base
+
+test_add_lun_to_sg_alu_already_attached:
+  sg: &sg_test_add_lun_to_sg_alu_already_attached
+    <<: *sg_base
+    _methods:
+      attach_alu:
+        _raise:
+          VNXAluAlreadyAttachedError: ALU is already attached.
+      get_hlu: 1
+  vnx: *vnx_base
+
+test_add_lun_to_sg_alu_in_use:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 1
+  sg: &sg_test_add_lun_to_sg_alu_in_use
+    <<: *sg_base
+    _methods:
+      attach_alu:
+        _raise:
+          VNXNoHluAvailableError: No HLU available.
+      get_hlu: 1
+  vnx: *vnx_base
+
+test_update_consistencygroup_no_lun_in_cg:
+  cg:
+    _properties:
+      <<: *cg_base_prop
+      lun_list: []
+    _methods:
+      replace_member:
+
+  lun_1:
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 1
+
+  lun_2:
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 2
+
+  vnx: *vnx_base
+
+test_update_consistencygroup_lun_in_cg:
+  lun_1: &lun_1_test_update_consistencygroup_lun_in_cg
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 1
+
+  lun_2:
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 2
+
+  cg:
+    _properties:
+      <<: *cg_base_prop
+      lun_list:
+        - *lun_1_test_update_consistencygroup_lun_in_cg
+    _methods:
+      replace_member:
+
+  vnx: *vnx_base
+
+test_update_consistencygroup_remove_all:
+  lun_1: &lun_1_test_update_consistencygroup_remove_all
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 1
+
+  cg:
+    _properties:
+      <<: *cg_base_prop
+      lun_list:
+        - *lun_1_test_update_consistencygroup_remove_all
+    _methods:
+      delete_member:
+
+  vnx: *vnx_base
+
+test_create_export_snapshot:
+
+test_remove_export_snapshot:
+
+test_initialize_connection_snapshot:
+  lun: &lun_test_initialize_connection_snapshot
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 100
+
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_initialize_connection_snapshot
+
+test_terminate_connection_snapshot:
+  lun: &lun_test_terminate_connection_snapshot
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 100
+
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_terminate_connection_snapshot
+
+test_get_available_ip:
+  vnx:
+    _properties:
+      alive_sp_ip: '192.168.1.5'
+
+test_create_mirror:
+  vnx:
+    _methods:
+      get_lun: *lun_base
+      create_mirror_view: *mirror_base
+
+test_create_mirror_already_created:
+  vnx:
+    _methods:
+      get_lun: *lun_base
+      create_mirror_view:
+        _raise:
+          VNXMirrorNameInUseError: 'name in use'
+      get_mirror_view: *mirror_base
+
+test_delete_mirror:
+  mirror: &mirror_test_delete_mirror
+    _methods:
+      delete:
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_test_delete_mirror
+
+
+test_delete_mirror_already_deleted:
+  mirror: &mirror_delete_error
+    _methods:
+      delete:
+        _raise:
+          VNXMirrorNotFoundError: 'not found'
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_delete_error
+
+test_add_image:
+  mirror: &mirror_test_add_image
+    _methods:
+      add_image:
+      update:
+      with_no_poll: _context
+      with_poll: _context
+    _properties:
+      secondary_image:
+        _properties:
+          state:
+            _enum:
+              VNXMirrorImageState: 'Synchronized'
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_test_add_image
+
+test_remove_image:
+  mirror: &mirror_remove_image
+    _methods:
+      remove_image:
+
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_remove_image
+
+test_fracture_image:
+  mirror: &mirror_fracture_image
+    _methods:
+      fracture_image:
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_fracture_image
+
+test_sync_image:
+  mirror: &mirror_sync_image
+    _properties:
+      <<: *mirror_base_prop
+      secondary_image:
+        _properties:
+          state:
+            _enum:
+              VNXMirrorImageState: 'SYNCHRONIZED'
+    _methods:
+      sync_image:
+      with_no_poll: _context
+      update:
+
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_sync_image
+
+test_promote_image:
+  mirror: &mirror_promote_image
+    _methods:
+      promote_image:
+
+  vnx:
+    _methods:
+      get_mirror_view: *mirror_promote_image
+
+test_get_lun_id:
+
+test_get_lun_id_without_provider_location:
+  lun: &test_get_lun_id_without_provider_location
+    <<: *lun_base
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 1
+
+  vnx:
+    _methods:
+      get_lun: *test_get_lun_id_without_provider_location
+
+###########################################################
+# TestCommonAdapter
+###########################################################
+test_create_volume: *test_create_lun
+
+test_create_volume_error: *test_create_lun_error
+
+test_create_thick_volume: *test_create_lun
+
+test_migrate_volume:
+  lun: &src_lun_1
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 4
+      wwn: 'src_wwn'
+      poll: false
+    _methods:
+      migrate:
+      update:
+      with_no_poll: _context
+
+
+  lun2: &lun_migrate_1
+    _properties:
+      lun_id: 5
+      wwn: 'dst_wwn'
+    _methods:
+      cancel_migrate:
+
+  lun3: &lun_not_existed
+    _properties:
+      wwn:
+
+  session: &session
+    _properties:
+      existed: false
+  pool: &migrate_pool
+    _methods:
+      create_lun: *src_lun_1
+  vnx:
+    _methods:
+      get_lun:
+        _side_effect: [*lun_migrate_1, *src_lun_1, *src_lun_1, *lun_not_existed]
+      get_migration_session: *session
+      get_pool: *migrate_pool
+
+test_migrate_volume_host_assisted:
+  vnx:
+    _methods:
+
+test_create_cloned_volume:
+  snap: &snap_for_clone
+    _methods:
+      delete:
+  smp: &smp_migrate
+    _properties:
+      <<: *lun_base_prop
+      lun_id: 4
+      wwn: 'src_wwn'
+      poll: false
+    _methods:
+      migrate:
+      update:
+      with_no_poll: _context
+  lun2: &lun_migrate_2
+    _properties:
+      lun_id: 5
+      wwn: 'dst_wwn'
+    _methods:
+      cancel_migrate:
+      create_snap:
+      create_mount_point:
+      attach_snap:
+  lun3: &lun_not_existed_2
+    _properties:
+      wwn:
+  session: &session_2
+    _properties:
+      existed: false
+  pool: &migrate_pool_2
+    _methods:
+      create_lun: *smp_migrate
+  vnx:
+    _properties:
+      serial: fake_serial
+    _methods:
+      get_lun:
+        _side_effect: [*lun_migrate_2,
+        *lun_migrate_2, *smp_migrate, *lun_migrate_2,
+        *lun_not_existed_2, *smp_migrate, *smp_migrate,
+        *lun_not_existed_2]
+      get_migration_session: *session_2
+      get_pool: *migrate_pool_2
+      get_snap: *snap_for_clone
+
+test_create_cloned_volume_snapcopy:
+  lun: &lun_for_snapcopy
+    _methods:
+      create_mount_point:
+      create_snap:
+  smp: &smp_for_snapcopy
+    _properties:
+      lun_id: 11
+    _methods:
+      attach_snap:
+  vnx:
+    _properties:
+      serial: fake_serial
+    _methods:
+      get_lun:
+        _side_effect: [*lun_for_snapcopy, *lun_for_snapcopy,
+        *smp_for_snapcopy, *smp_for_snapcopy]
+      get_pool: *pool_base
+
+test_create_volume_from_snapshot:
+  lun: &lun_from_snapshot
+    _properties:
+      lun_id: 16
+    _methods:
+      create_mount_point:
+  smp: &smp_from_lun
+    _methods:
+      attach_snap:
+  vnx:
+    _properties:
+      serial: fake_serial
+    _methods:
+      get_lun:
+        _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun,
+        *dst_lun, *src_lun_1, *src_lun_1, *lun_not_existed]
+      get_pool: *pool_test_create_lun
+      get_migration_session: *session
+
+test_create_volume_from_snapshot_snapcopy:
+  snap: &snap_for_snapcopy
+    _methods:
+      copy:
+      modify:
+  vnx:
+    _properties:
+      serial: fake_serial
+    _methods:
+      get_snap:
+        _side_effect: [*snap_for_snapcopy, *snap_for_snapcopy]
+      get_lun:
+        _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun]
+
+test_parse_pools: &test_parse_pools
+  pool1: &pool_test_parse_pools_1
+    _properties:
+      <<: *pool_base_prop
+      name: 'pool5'
+  pool2: &pool_test_parse_pools_2
+    _properties:
+      <<: *pool_base_prop
+      name: 'pool6'
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2]
+
+test_parse_pools_one_invalid_pool: *test_parse_pools
+
+test_parse_pools_all_invalid_pools: *test_parse_pools
+
+test_get_enabler_stats: &test_get_enabler_stats
+  vnx: &vnx_test_get_enabler_stats
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      is_compression_enabled: True
+      is_dedup_enabled: True
+      is_fast_cache_enabled: True
+      is_thin_enabled: True
+      is_snap_enabled: True
+      is_auto_tiering_enabled: True
+
+test_get_pool_stats:
+  pool_feature: &pool_feature_test_get_pool_stats
+    _properties:
+      <<: *pool_feature_base_prop
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2]
+      get_pool_feature: *pool_feature_test_get_pool_stats
+      is_auto_tiering_enabled: True
+
+test_get_pool_stats_max_luns_reached:
+  pool_feature: &pool_feature_test_get_pool_stats_max_luns_reached
+    _properties:
+      <<: *pool_feature_base_prop
+      total_pool_luns: 3001
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2]
+      get_pool_feature: *pool_feature_test_get_pool_stats_max_luns_reached
+      is_auto_tiering_enabled: True
+
+test_get_pool_stats_with_reserved:
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2]
+      get_pool_feature: *pool_feature_test_get_pool_stats
+      is_auto_tiering_enabled: True
+
+test_get_pool_stats_offline:
+  pool1: &pool_test_get_pool_stats_offline_1
+    _properties:
+      <<: *pool_base_prop
+      name: 'pool7'
+      state: 'Offline'
+  pool2: &pool_test_get_pool_stats_offline_2
+    _properties:
+      <<: *pool_base_prop
+      name: 'pool8'
+      state: 'Offline'
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: [*pool_test_get_pool_stats_offline_1, *pool_test_get_pool_stats_offline_2]
+      get_pool_feature: *pool_feature_test_get_pool_stats
+      is_compression_enabled: False
+      is_dedup_enabled: True
+      is_fast_cache_enabled: False
+      is_thin_enabled: True
+      is_snap_enabled: False
+      is_auto_tiering_enabled: True
+
+test_update_volume_stats: *test_get_enabler_stats
+
+test_append_volume_stats:
+  vnx:
+    _properties:
+      serial: fake_serial
+
+test_delete_volume_not_force: *test_delete_lun
+test_delete_volume_force: *test_delete_lun
+
+test_enable_compression:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      enable_compression:
+
+test_enable_compression_on_compressed_lun:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      enable_compression:
+        _raise:
+            VNXCompressionAlreadyEnabledError:
+
+test_lun_has_snapshot_false:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      get_snap: []
+
+test_lun_has_snapshot_true:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      get_snap: ['fake_snap']
+
+test_get_vnx_enabler_status:
+  vnx:
+    _methods:
+      is_dedup_enabled: True
+      is_compression_enabled: False
+      is_thin_enabled: True
+      is_fast_cache_enabled: True
+      is_auto_tiering_enabled: False
+      is_snap_enabled: True
+
+test_retype_type_invalid:
+  vnx:
+    _methods:
+      is_dedup_enabled: True
+      is_compression_enabled: True
+      is_thin_enabled: True
+      is_fast_cache_enabled: True
+      is_auto_tiering_enabled: True
+      is_snap_enabled: True
+
+test_retype_need_migration:
+  lun: &lun_retype_need_migration
+    _properties:
+      <<: *lun_base_prop
+    _methods:
+      get_snap: []
+      with_no_poll: _context
+      update:
+  vnx:
+    _methods:
+      get_lun:
+        _side_effect: [*lun_retype_need_migration]
+
+test_retype_turn_on_compression_change_tier:
+  lun: &lun_retype_turn_on_compression_change_tier
+    _properties:
+      <<: *lun_base_prop
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thin'
+      tier:
+        _enum:
+          VNXTieringEnum: 'auto'
+    _methods:
+      enable_compression:
+      get_snap: []
+      with_no_poll: _context
+      update:
+  vnx:
+    _methods:
+      get_lun: *lun_retype_turn_on_compression_change_tier
+
+test_retype_lun_has_snap:
+  lun: &lun_retype_lun_has_snap
+    _properties:
+      <<: *lun_base_prop
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thick'
+      tier:
+        _enum:
+          VNXTieringEnum: 'auto'
+    _methods:
+      get_snap: ['fake_snap']
+      with_no_poll: _context
+      update:
+  vnx:
+    _methods:
+      get_lun: *lun_retype_lun_has_snap
+
+test_retype_change_tier:
+  lun: &lun_retype_change_tier
+    _properties:
+      <<: *lun_base_prop
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thick'
+      tier:
+        _enum:
+          VNXTieringEnum: 'nomovement'
+    _methods:
+      with_no_poll: _context
+      update:
+  vnx:
+    _methods:
+      get_lun: *lun_retype_change_tier
+
+test_create_consistencygroup: *test_create_cg
+
+test_delete_consistencygroup:
+  vnx:
+    _methods:
+      get_cg: *cg_for_deletion
+
+test_delete_consistencygroup_with_volume:
+  vnx:
+    _methods:
+      get_cg: *cg_for_deletion
+      get_lun: *lun_test_delete_lun
+
+test_delete_consistencygroup_error:
+  cg: &cg_delete_error
+    _methods:
+      delete:
+        _raise:
+          VNXConsistencyGroupError: Unable to delete cg
+  vnx:
+    _methods:
+      get_cg: *cg_delete_error
+
+test_delete_consistencygroup_volume_error:
+  vnx:
+    _methods:
+      get_cg: *cg_for_deletion
+      get_lun: *lun_test_delete_lun_exception
+
+test_extend_volume: *test_expand_lun
+
+test_create_snapshot_adapter: *test_create_snapshot
+
+test_delete_snapshot_adapter: *test_delete_snapshot
+
+test_create_cgsnapshot: *test_create_cg_snapshot
+
+test_delete_cgsnapshot:
+  cg_snap: &cg_snap_delete
+    _methods:
+      delete:
+  vnx:
+    _methods:
+      get_snap: *cg_snap_delete
+
+test_create_cg_from_cgsnapshot:
+  snap: &copied_cg_snap
+    _methods:
+      copy:
+      modify:
+  smp: &smp_from_src_lun
+    _properties:
+      lun_id: 12
+    _methods:
+      attach_snap:
+
+  lun: &src_lun_in_cg
+    _methods:
+      create_mount_point: *smp_from_src_lun
+  vnx:
+    _properties:
+    _methods:
+      get_snap:
+        _side_effect: [*copied_cg_snap, *copied_cg_snap,
+        *snapshot_test_delete_snapshot]
+      get_lun:
+        _side_effect: [*src_lun_in_cg, *smp_from_src_lun,
+        *smp_from_src_lun, *lun_migrate, *src_lun, *dst_lun]
+      get_pool: *pool_test_create_lun
+      get_migration_session: *session_verify
+      create_cg: *cg_for_create
+
+test_create_cloned_cg:
+  vnx:
+    _properties:
+    _methods:
+      get_cg: *cg_test_create_cg_snapshot
+      get_snap: *snapshot_test_delete_snapshot
+      get_lun:
+        _side_effect: [*src_lun_in_cg, *smp_from_src_lun,
+        *smp_from_src_lun, *lun_migrate, *src_lun, *dst_lun]
+      get_pool: *pool_test_create_lun
+      get_migration_session: *session_verify
+      create_cg: *cg_for_create
+
+test_validate_ports_iscsi: &test_validate_ports_iscsi
+  iscsi_port_a-0-0: *iscsi_port_a-0-0
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_iscsi_port: *all_iscsi_ports
+
+test_validate_ports_iscsi_invalid: *test_validate_ports_iscsi
+
+test_validate_ports_iscsi_not_exist: *test_validate_ports_iscsi
+
+test_validate_ports_fc: &test_validate_ports_fc
+  fc_port_a-1: *fc_port_a-1
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_fc_port: *all_fc_ports
+
+test_validate_ports_fc_invalid: *test_validate_ports_fc
+
+test_validate_ports_fc_not_exist: *test_validate_ports_fc
+
+test_manage_existing_lun_no_exist:
+  lun: &lun_manage_lun_not_exist
+    _properties:
+      existed: False
+  vnx:
+    _methods:
+      get_lun: *lun_manage_lun_not_exist
+
+test_manage_existing_invalid_ref:
+  lun: *lun_manage_lun_not_exist
+
+test_manage_existing_invalid_pool:
+  lun: &lun_manage_in_other_pool
+    _properties:
+      existed: True
+      pool_name: 'unmanaged_pool'
+  vnx:
+    _methods:
+      get_lun: *lun_manage_in_other_pool
+
+test_manage_existing_get_size:
+  lun: &lun_manage_get_size
+    _properties:
+      existed: True
+      pool_name: 'unit_test_pool'
+      total_capacity_gb: 5
+  vnx:
+    _methods:
+      get_lun: *lun_manage_get_size
+
+test_manage_existing_type_mismatch:
+  lun: &lun_manage_type_mismatch
+    _properties:
+      existed: True
+      pool_name: 'unit_test_pool'
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thick'
+      tier:
+        _enum:
+          VNXTieringEnum: 'highestavailable'
+      total_capacity_gb: 5
+  vnx:
+    _methods:
+      get_lun: *lun_manage_type_mismatch
+
+test_manage_existing:
+  lun: &lun_manage_existing
+    _properties:  &lun_manage_existing_props
+      lun_id: 1
+      existed: True
+      pool_name: 'unit_test_pool'
+      provision:
+        _enum:
+          VNXProvisionEnum: 'deduplicated'
+      tier:
+        _enum:
+          VNXTieringEnum: 'auto'
+      total_capacity_gb: 5
+      primary_lun: 'N/A'
+      is_snap_mount_point: False
+    _methods:
+      rename:
+
+test_manage_existing_smp:
+  lun: &manage_existing_smp
+    _properties:
+      lun_id: 2
+      existed: True
+      pool_name: 'unit_test_pool'
+      primary_lun: 'src_lun'
+      is_snap_mount_point: True
+    _methods:
+      rename:
+  vnx:
+    _methods:
+      get_lun: *manage_existing_smp
+
+test_assure_storage_group:
+  sg: &sg_test_assure_storage_group
+    _properties:
+      <<: *sg_base_prop
+      existed: True
+    _methods:
+      update:
+      with_poll: _context
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_sg: *sg_test_assure_storage_group
+
+test_assure_storage_group_create_new:
+  sg: &sg_test_assure_storage_group_create_new
+    _properties:
+      <<: *sg_base_prop
+      existed: False
+    _methods:
+      update:
+      with_poll: _context
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_sg: *sg_test_assure_storage_group_create_new
+      create_sg: *sg_test_assure_storage_group_create_new
+
+test_assure_host_access:
+  sg: &sg_test_assure_host_access
+    <<: *sg_base
+    _methods:
+      update:
+      with_poll: _context
+  lun: &lun_test_assure_host_access
+    <<: *lun_base
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_assure_host_access
+
+test_assure_host_access_without_auto_register_new_sg: &test_assure_host_access_without_auto_register_new_sg
+  sg: &sg_test_assure_host_access_without_auto_register_new_sg
+    <<: *sg_base
+    _methods:
+      update:
+      connect_host:
+      with_poll: _context
+  lun: &lun_test_assure_host_access_without_auto_register_new_sg
+    <<: *lun_base
+  vnx:
+    <<: *vnx_base
+    _methods:
+      get_lun: *lun_test_assure_host_access_without_auto_register_new_sg
+
+test_assure_host_access_without_auto_register: *test_assure_host_access_without_auto_register_new_sg
+
+test_auto_register_initiator: &test_auto_register_initiator
+  allowed_ports: *all_iscsi_ports
+  reg_ports: [*iscsi_port_a-0-0]
+  sg: &sg_auto_register_initiator
+    _properties:
+      <<: *sg_base_prop
+      initiator_uid_list: ['iqn-reg-1', 'iqn-reg-2']
+    _methods:
+      get_ports: [*iscsi_port_a-0-0]
+  vnx:
+    <<: *vnx_base
+
+test_auto_register_initiator_no_white_list: *test_auto_register_initiator
+
+test_build_provider_location:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+
+test_remove_host_access:
+  sg: &sg_remove_host_access
+    _properties:
+      existed: True
+    _methods:
+      detach_alu:
+  vnx:
+    _methods:
+      get_sg: *sg_remove_host_access
+      get_lun: *lun_base
+
+test_set_extra_spec_defaults:
+  vnx:
+    <<: *vnx_base_prop
+    _methods:
+       is_auto_tiering_enabled: True
+
+test_remove_host_access_sg_absent:
+  sg: &sg_remove_host_access_sg_absent
+    _properties:
+      existed: False
+  vnx:
+    _methods:
+      get_sg: *sg_remove_host_access_sg_absent
+      get_lun: *lun_base
+
+test_setup_lun_replication:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+  lun:
+    _properties:
+      lun_id: 222
+      wwn: fake_wwn
+
+test_cleanup_replication:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+    _methods:
+      is_mirror_view_sync_enabled: True
+
+test_build_mirror_view:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+    _methods:
+      is_mirror_view_sync_enabled: True
+
+test_build_mirror_view_no_device:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+
+test_build_mirror_view_2_device:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+    _methods:
+      is_mirror_view_sync_enabled: True
+
+test_build_mirror_view_no_enabler:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+    _methods:
+      is_mirror_view_sync_enabled: False
+
+test_build_mirror_view_failover_false:
+  vnx:
+    _properties:
+      serial: 'vnx-serial'
+    _methods:
+      is_mirror_view_sync_enabled: True
+
+test_failover_host:
+  lun1:
+    _properties:
+      lun_id: 11
+
+test_failover_host_invalid_backend_id:
+
+test_failover_host_failback:
+  lun1:
+    _properties:
+      lun_id: 22
+
+test_get_pool_name:
+  lun: &lun_test_get_pool_name
+    _properties:
+      <<: *lun_base_prop
+      pool_name: pool_1
+    _methods:
+      with_no_poll: _context
+      update:
+
+  vnx:
+    _methods:
+      get_lun: *lun_test_get_pool_name
+
+test_normalize_config_naviseccli_path:
+
+test_normalize_config_naviseccli_path_none:
+
+test_normalize_config_pool_names:
+
+test_normalize_config_pool_names_none:
+
+test_normalize_config_pool_names_empty_list:
+
+test_normalize_config_io_port_list:
+
+test_normalize_config_io_port_list_none:
+
+test_normalize_config_io_port_list_empty_list:
+
+
+###########################################################
+# TestISCSIAdapter
+###########################################################
+test_parse_ports_iscsi: &test_parse_ports_iscsi
+  connection_port: &port_test_parse_ports_iscsi
+
+    _properties:
+      existed: False
+  vnx:
+    _methods:
+      get_sg: *sg_remove_host_access_sg_absent
+      get_lun: *lun_base
+
+test_remove_host_access_volume_not_in_sg:
+  sg: &remove_host_access_volume_not_in_sg
+    _properties: *sg_base_prop
+    _methods:
+      detach_alu:
+        _raises: VNXDetachAluNotFoundError
+  vnx:
+    _methods:
+      get_sg: *remove_host_access_volume_not_in_sg
+      get_lun: *lun_base
+
+test_terminate_connection_cleanup_remove_sg:
+  sg:
+    _properties: *sg_base_prop
+    _methods:
+      remove:
+      disconnect_host:
+      get_alu_hlu_map: {}
+
+test_terminate_connection_cleanup_sg_absent:
+  sg:
+    _properties:
+      existed: False
+
+test_terminate_connection_cleanup_deregister:
+  sg:
+    _properties: *sg_base_prop
+    _methods:
+      remove:
+      disconnect_host:
+      get_alu_hlu_map: {}
+  vnx:
+    _methods:
+      remove_hba:
+
+test_terminate_connection_cleanup_sg_is_not_empty:
+  sg:
+    _properties: *sg_base_prop
+    _methods:
+      get_alu_hlu_map: {'1': '1'}
+
+test_update_consistencygroup:
+
+test_update_migrated_volume:
+
+test_update_migrated_volume_smp:
+
+test_normalize_config_iscsi_initiators:
+
+test_normalize_config_iscsi_initiators_none:
+
+test_normalize_config_iscsi_initiators_empty_str:
+
+test_normalize_config_iscsi_initiators_not_dict:
+
+
+###########################################################
+# TestISCSIAdapter
+###########################################################
+test_update_volume_stats_iscsi:
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_iscsi_port: *all_iscsi_ports
+
+test_build_terminate_connection_return_data_iscsi:
+
+
+###########################################################
+# TestFCAdapter
+###########################################################
+test_build_terminate_connection_return_data_without_autozone:
+
+test_build_terminate_connection_return_data_sg_absent:
+ sg:
+   _properties:
+     <<: *sg_base_prop
+     existed: False
+
+test_build_terminate_connection_return_data_auto_zone:
+  sg:
+    _properties:
+      <<: *sg_base_prop
+      name: 'fake_host'
+      fc_ports:
+        - *fc_port_a-1
+    _methods:
+      get_alu_hlu_map: {}
+  vnx:
+    _methods:
+      get_fc_port: *all_fc_ports
+
+test_mock_vnx_objects_foo: *test_create_lun
+
+test_get_tgt_list_and_initiator_tgt_map_allow_port_only:
+  sg:
+    _properties:
+      <<: *sg_base_prop
+      fc_ports:
+        - *fc_port_a-1
+        - <<: *fc_port_base
+          _properties:
+            <<: *fc_port_base_prop
+            sp: *spb
+            port_id: 1
+            wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1'
+        - *fc_port_b-2
+  adapter:
+    _properties:
+      allowed_ports:
+        - <<: *fc_port_base
+          _properties:
+            <<: *fc_port_base_prop
+            sp: *spb
+            port_id: 1
+            wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1'
+        - *fc_port_b-2
+
+  vnx:
+    _methods:
+      get_fc_port: *all_fc_ports
+
+
+##########################################################
+# TestTaskflow
+##########################################################
+test_copy_snapshot_task:
+  vnx:
+    _methods:
+      get_snap: *snap_copy
+
+test_copy_snapshot_task_revert:
+  snap: &snap_copy_error
+    _methods:
+      copy:
+        _raise:
+          VNXSnapError: Unable to copy snap
+      delete:
+  vnx:
+    _methods:
+      get_snap: *snap_copy_error
+
+test_create_smp_task:
+  smp: &smp
+    _properties:
+      lun_id: 15
+  lun: &lun_create_smp
+    _methods:
+      create_mount_point: *smp
+  vnx:
+    _methods:
+      get_lun:
+        _side_effect: [*lun_create_smp, *smp]
+
+test_create_smp_task_revert:
+  lun: &lun_create_smp_error
+    _methods:
+      create_mount_point:
+        _raise:
+          VNXCreateLunError: 'Unable to create mount point'
+      delete:
+    _properties:
+      is_snap_mount_point: False
+  vnx:
+    _methods:
+      get_lun: *lun_create_smp_error
+
+test_attach_snap_task:
+  vnx:
+    _methods:
+      get_lun: *lun_attach_snap
+
+test_attach_snap_task_revert:
+  lun: &lun_attach_snap_error
+    _methods:
+      attach_snap:
+        _raise:
+          VNXAttachSnapError: 'Unable to attach snapshot'
+      detach_snap:
+  vnx:
+    _methods:
+      get_lun: *lun_attach_snap_error
+
+test_create_snapshot_task:
+  lun: &lun_snap
+    _methods:
+      create_snap:
+  vnx:
+    _methods:
+      get_lun: *lun_snap
+
+test_create_snapshot_task_revert:
+  snap: &snap_delete
+    _methods:
+      delete:
+  lun: &lun_snap_error
+    _methods:
+      create_snap:
+        _raise:
+          VNXCreateSnapError: 'Unable to create snap'
+  vnx:
+    _methods:
+      get_lun: *lun_snap_error
+      get_snap: *snap_delete
+
+test_allow_read_write_task:
+  vnx:
+    _methods:
+      get_snap: *snap_modify
+
+test_allow_read_write_task_revert:
+  snap: &snap_modify_error
+    _methods:
+      modify:
+        _raise:
+          VNXSnapError: Unable to modify snap
+  vnx:
+    _methods:
+      get_snap: *snap_modify_error
+
+test_wait_migrations_task:
+  vnx:
+
+test_create_consistency_group_task:
+  vnx:
+
+test_create_consistency_group_task_revert:
+  vnx:
+
+test_create_cg_snapshot_task: *test_create_cg_snapshot
+
+
+test_create_cg_snapshot_task_revert:
+  cg: &create_cg_snapshot_error
+    _methods:
+      create_snap:
+        _raise:
+          VNXCreateSnapError: 'Create failed'
+  snap: &snap_create_cg_revert
+    _methods:
+      delete:
+  vnx:
+    _methods:
+      get_cg: *create_cg_snapshot_error
+      get_snap: *snap_create_cg_revert
+
+###########################################################
+# TestExtraSpecs
+###########################################################
+test_generate_extra_specs_from_lun:
+  lun:
+    _properties:
+      provision:
+        _enum:
+          VNXProvisionEnum: 'compressed'
+      tier:
+        _enum:
+          VNXTieringEnum: 'highestavailable'
+
+  deduped_lun:
+    _properties:
+      provision:
+        _enum:
+          VNXProvisionEnum: 'deduplicated'
+      tier:
+        _enum:
+          VNXTieringEnum: 'auto'
+
+test_extra_specs_match_with_lun:
+  lun:
+    _properties:
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thin'
+      tier:
+        _enum:
+          VNXTieringEnum: 'nomovement'
+
+  deduped_lun:
+    _properties:
+      provision:
+        _enum:
+          VNXProvisionEnum: 'deduplicated'
+      tier:
+        _enum:
+          VNXTieringEnum: 'nomovement'
+
+test_extra_specs_not_match_with_lun:
+  lun:
+    _properties:
+      provision:
+        _enum:
+          VNXProvisionEnum: 'thick'
+      tier:
+        _enum:
+          VNXTieringEnum: 'lowestavailable'
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py b/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py
new file mode 100644
index 00000000000..96d9998685f
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/res_mock.py
@@ -0,0 +1,441 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import six
+
+from cinder.tests.unit.consistencygroup import fake_cgsnapshot
+from cinder.tests.unit.consistencygroup import fake_consistencygroup
+from cinder.tests.unit import fake_constants
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as lib_ex
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
+from cinder.tests.unit.volume.drivers.emc.vnx import utils
+from cinder.volume.drivers.emc.vnx import adapter
+from cinder.volume.drivers.emc.vnx import client
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import utils as vnx_utils
+
+SYMBOL_TYPE = '_type'
+SYMBOL_PROPERTIES = '_properties'
+SYMBOL_METHODS = '_methods'
+SYMBOL_SIDE_EFFECT = '_side_effect'
+SYMBOL_RAISE = '_raise'
+SYMBOL_CONTEXT = '_context'
+UUID = '_uuid'
+SYMBOL_ENUM = '_enum'
+
+
+def _is_driver_object(obj_body):
+    return isinstance(obj_body, dict) and SYMBOL_PROPERTIES in obj_body
+
+
+class DriverResourceMock(dict):
+    fake_func_mapping = {}
+
+    def __init__(self, yaml_file):
+        yaml_dict = utils.load_yaml(yaml_file)
+        if not isinstance(yaml_dict, dict):
+            return
+        for case_name, case_res in yaml_dict.items():
+            if not isinstance(case_res, dict):
+                continue
+            self[case_name] = {}
+            for obj_name, obj_body in case_res.items():
+                self[case_name][obj_name] = self._parse_driver_object(obj_body)
+
+    def _parse_driver_object(self, obj_body):
+        if isinstance(obj_body, dict):
+            obj_body = {k: self._parse_driver_object(v)
+                        for k, v in obj_body.items()}
+            if _is_driver_object(obj_body):
+                return self._create_object(obj_body)
+            else:
+                return obj_body
+        elif isinstance(obj_body, list):
+            return map(self._parse_driver_object, obj_body)
+        else:
+            return obj_body
+
+    def _create_object(self, obj_body):
+        props = obj_body[SYMBOL_PROPERTIES]
+        for prop_name, prop_value in props.items():
+            if isinstance(prop_value, dict) and prop_value:
+                # get the first key as the convert function
+                func_name = list(prop_value.keys())[0]
+                if func_name.startswith('_'):
+                    func = getattr(self, func_name)
+                    props[prop_name] = func(prop_value[func_name])
+
+        if (SYMBOL_TYPE in obj_body and
+                obj_body[SYMBOL_TYPE] in self.fake_func_mapping):
+            return self.fake_func_mapping[obj_body[SYMBOL_TYPE]](**props)
+        else:
+            return props
+
+    @staticmethod
+    def _uuid(uuid_key):
+        uuid_key = uuid_key.upper()
+        return getattr(fake_constants, uuid_key)
+
+
+def _fake_volume_wrapper(*args, **kwargs):
+    expected_attrs_key = {'volume_attachment': 'volume_attachment',
+                          'volume_metadata': 'metadata'}
+    return fake_volume.fake_volume_obj(
+        None,
+        expected_attrs=[
+            v for (k, v) in expected_attrs_key.items() if k in kwargs],
+        **kwargs)
+
+
+def _fake_cg_wrapper(*args, **kwargs):
+    return fake_consistencygroup.fake_consistencyobject_obj(None, **kwargs)
+
+
+def _fake_snapshot_wrapper(*args, **kwargs):
+    return fake_snapshot.fake_snapshot_obj(None,
+                                           expected_attrs=(
+                                               ['volume'] if 'volume' in kwargs
+                                               else None),
+                                           **kwargs)
+
+
+def _fake_cg_snapshot_wrapper(*args, **kwargs):
+    return fake_cgsnapshot.fake_cgsnapshot_obj(None, **kwargs)
+
+
+class EnumBuilder(object):
+    def __init__(self, enum_dict):
+        enum_dict = enum_dict[SYMBOL_ENUM]
+        for k, v in six.iteritems(enum_dict):
+            self.klazz = k
+            self.value = v
+
+    def __call__(self, *args, **kwargs):
+        return getattr(storops, self.klazz).parse(self.value)
+
+
+class CinderResourceMock(DriverResourceMock):
+    # fake_func in the mapping should be like func(*args, **kwargs)
+    fake_func_mapping = {'volume': _fake_volume_wrapper,
+                         'cg': _fake_cg_wrapper,
+                         'snapshot': _fake_snapshot_wrapper,
+                         'cg_snapshot': _fake_cg_snapshot_wrapper}
+
+    def __init__(self, yaml_file):
+        super(CinderResourceMock, self).__init__(yaml_file)
+
+    @staticmethod
+    def _build_provider_location(props):
+        return vnx_utils.build_provider_location(
+            props.get('system'), props.get('type'),
+            six.text_type(props.get('id')),
+            six.text_type(props.get('base_lun_name')),
+            props.get('version'))
+
+
+class ContextMock(object):
+    """Mocks the return value of a context function."""
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, exc_valu, exc_tb):
+        pass
+
+
+class MockBase(object):
+    """Base object of all the Mocks.
+
+    This mock convert the dict to object when the '_type' is
+    included in the dict
+    """
+
+    def _is_mock_object(self, yaml_info):
+        return (isinstance(yaml_info, dict) and
+                (SYMBOL_PROPERTIES in yaml_info or
+                 SYMBOL_METHODS in yaml_info))
+
+    def _is_object_with_type(self, yaml_dict):
+        return isinstance(yaml_dict, dict) and SYMBOL_TYPE in yaml_dict
+
+    def _is_object_with_enum(self, yaml_dict):
+        return isinstance(yaml_dict, dict) and SYMBOL_ENUM in yaml_dict
+
+    def _build_mock_object(self, yaml_dict):
+        if self._is_object_with_type(yaml_dict):
+            return FakePort(yaml_dict)
+        elif self._is_object_with_enum(yaml_dict):
+            return EnumBuilder(yaml_dict)()
+        elif self._is_mock_object(yaml_dict):
+            return StorageObjectMock(yaml_dict)
+        elif isinstance(yaml_dict, dict):
+            return {k: self._build_mock_object(v)
+                    for k, v in yaml_dict.items()}
+        elif isinstance(yaml_dict, list):
+            return [self._build_mock_object(each) for each in yaml_dict]
+        else:
+            return yaml_dict
+
+
+class StorageObjectMock(object):
+    PROPS = 'props'
+
+    def __init__(self, yaml_dict):
+        self.__dict__[StorageObjectMock.PROPS] = {}
+        props = yaml_dict.get(SYMBOL_PROPERTIES, None)
+        if props:
+            for k, v in props.items():
+                setattr(self, k, StoragePropertyMock(k, v)())
+
+        methods = yaml_dict.get(SYMBOL_METHODS, None)
+        if methods:
+            for k, v in methods.items():
+                setattr(self, k, StorageMethodMock(k, v))
+
+    def __setattr__(self, key, value):
+        self.__dict__[StorageObjectMock.PROPS][key] = value
+
+    def __getattr__(self, item):
+        try:
+            super(StorageObjectMock, self).__getattr__(item)
+        except AttributeError:
+            return self.__dict__[StorageObjectMock.PROPS][item]
+        except KeyError:
+            raise KeyError('%(item)s not exist in mock object.'
+                           ) % {'item': item}
+
+
+class FakePort(StorageObjectMock):
+
+    def __eq__(self, other):
+        o_sp = other.sp
+        o_port_id = other.port_id
+        o_vport_id = other.vport_id
+
+        ret = True
+        ret &= self.sp == o_sp
+        ret &= self.port_id == o_port_id
+        ret &= self.vport_id == o_vport_id
+
+        return ret
+
+    def __hash__(self):
+        return hash((self.sp, self.port_id, self.vport_id))
+
+
+class StoragePropertyMock(mock.PropertyMock, MockBase):
+    def __init__(self, name, property_body):
+        return_value = property_body
+        side_effect = None
+
+        # only support return_value and side_effect for property
+        if (isinstance(property_body, dict) and
+                SYMBOL_SIDE_EFFECT in property_body):
+            side_effect = self._build_mock_object(
+                property_body[SYMBOL_SIDE_EFFECT])
+            return_value = None
+
+        if side_effect is not None:
+            super(StoragePropertyMock, self).__init__(
+                name=name,
+                side_effect=side_effect)
+        else:
+            return_value = self._build_mock_object(return_value)
+
+            super(StoragePropertyMock, self).__init__(
+                name=name,
+                return_value=return_value)
+
+
+class StorageMethodMock(mock.Mock, MockBase):
+    def __init__(self, name, method_body):
+        return_value = method_body
+        exception = None
+        side_effect = None
+
+        # support return_value, side_effect and exception for method
+        if isinstance(method_body, dict):
+            if (SYMBOL_SIDE_EFFECT in method_body or
+                    SYMBOL_RAISE in method_body):
+                exception = method_body.get(SYMBOL_RAISE, None)
+                side_effect = method_body.get(SYMBOL_SIDE_EFFECT, None)
+                return_value = None
+
+        if exception is not None:
+            ex = None
+            if isinstance(exception, dict) and exception:
+                ex_name = list(exception.keys())[0]
+                ex_tmp = [getattr(ex_module, ex_name, None)
+                          for ex_module in [lib_ex, common]]
+                try:
+                    ex = [each for each in ex_tmp if each is not None][0]
+                    super(StorageMethodMock, self).__init__(
+                        name=name,
+                        side_effect=ex(exception[ex_name]))
+                except IndexError:
+                    raise KeyError('Exception %(ex_name)s not found.'
+                                   % {'ex_name': ex_name})
+            else:
+                raise KeyError('Invalid Exception body, should be a dict.')
+        elif side_effect is not None:
+            super(StorageMethodMock, self).__init__(
+                name=name,
+                side_effect=self._build_mock_object(side_effect))
+        elif return_value is not None:
+            super(StorageMethodMock, self).__init__(
+                name=name,
+                return_value=(ContextMock() if return_value == SYMBOL_CONTEXT
+                              else self._build_mock_object(return_value)))
+        else:
+            super(StorageMethodMock, self).__init__(
+                name=name, return_value=None)
+
+
+class StorageResourceMock(dict, MockBase):
+    def __init__(self, yaml_file):
+        yaml_dict = utils.load_yaml(yaml_file)
+        if not isinstance(yaml_dict, dict):
+            return
+        for section, sec_body in yaml_dict.items():
+            if isinstance(sec_body, dict):
+                self[section] = {obj_name: self._build_mock_object(obj_body)
+                                 for obj_name, obj_body
+                                 in sec_body.items()}
+            else:
+                self[section] = {}
+
+
+cinder_res = CinderResourceMock('mocked_cinder.yaml')
+DRIVER_RES_MAPPING = {
+    'TestResMock': cinder_res,
+    'TestCommonAdapter': cinder_res,
+    'TestISCSIAdapter': cinder_res,
+    'TestFCAdapter': cinder_res,
+    'TestUtils': cinder_res,
+    'TestClient': cinder_res
+}
+
+
+def mock_driver_input(func):
+    @six.wraps(func)
+    def decorated(cls, *args, **kwargs):
+        return func(cls,
+                    DRIVER_RES_MAPPING[cls.__class__.__name__][func.__name__],
+                    *args, **kwargs)
+    return decorated
+
+
+vnx_res = StorageResourceMock('mocked_vnx.yaml')
+STORAGE_RES_MAPPING = {
+    'TestResMock': StorageResourceMock('test_res_mock.yaml'),
+    'TestCondition': vnx_res,
+    'TestClient': vnx_res,
+    'TestCommonAdapter': vnx_res,
+    'TestISCSIAdapter': vnx_res,
+    'TestFCAdapter': vnx_res,
+    'TestTaskflow': vnx_res,
+    'TestExtraSpecs': vnx_res,
+}
+DEFAULT_STORAGE_RES = 'vnx'
+
+
+def _build_client():
+    return client.Client(ip='192.168.1.2',
+                         username='sysadmin',
+                         password='sysadmin',
+                         scope='global',
+                         naviseccli=None,
+                         sec_file=None)
+
+
+def patch_client(func):
+    @six.wraps(func)
+    @utils.patch_looping_call
+    def decorated(cls, *args, **kwargs):
+        storage_res = (
+            STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
+        with utils.patch_vnxsystem as patched_vnx:
+            if DEFAULT_STORAGE_RES in storage_res:
+                patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES]
+            client = _build_client()
+        return func(cls, client, storage_res, *args, **kwargs)
+    return decorated
+
+
+PROTOCOL_COMMON = 'Common'
+PROTOCOL_MAPPING = {
+    PROTOCOL_COMMON: adapter.CommonAdapter,
+    common.PROTOCOL_ISCSI: adapter.ISCSIAdapter,
+    common.PROTOCOL_FC: adapter.FCAdapter
+}
+
+
+def patch_adapter_init(protocol):
+    def inner_patch_adapter(func):
+        @six.wraps(func)
+        @utils.patch_looping_call
+        def decorated(cls, *args, **kwargs):
+            storage_res = (
+                STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
+            with utils.patch_vnxsystem as patched_vnx:
+                if DEFAULT_STORAGE_RES in storage_res:
+                    patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES]
+                adapter = PROTOCOL_MAPPING[protocol](cls.configuration)
+            return func(cls, adapter, storage_res, *args, **kwargs)
+        return decorated
+    return inner_patch_adapter
+
+
+def _patch_adapter_prop(adapter, client):
+    try:
+        adapter.serial_number = client.get_serial()
+    except KeyError:
+        adapter.serial_number = 'faked_serial_number'
+
+
+def patch_adapter(protocol):
+    def inner_patch_adapter(func):
+        @six.wraps(func)
+        @utils.patch_looping_call
+        def decorated(cls, *args, **kwargs):
+            storage_res = (
+                STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
+            with utils.patch_vnxsystem:
+                client = _build_client()
+                adapter = PROTOCOL_MAPPING[protocol](cls.configuration, None)
+            if DEFAULT_STORAGE_RES in storage_res:
+                client.vnx = storage_res[DEFAULT_STORAGE_RES]
+            adapter.client = client
+            _patch_adapter_prop(adapter, client)
+            return func(cls, adapter, storage_res, *args, **kwargs)
+        return decorated
+    return inner_patch_adapter
+
+
+patch_common_adapter = patch_adapter(PROTOCOL_COMMON)
+patch_iscsi_adapter = patch_adapter(common.PROTOCOL_ISCSI)
+patch_fc_adapter = patch_adapter(common.PROTOCOL_FC)
+
+
+def mock_storage_resources(func):
+    @six.wraps(func)
+    def decorated(cls, *args, **kwargs):
+        storage_res = (
+            STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
+        return func(cls, storage_res, *args, **kwargs)
+    return decorated
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py
new file mode 100644
index 00000000000..617c57440d5
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_adapter.py
@@ -0,0 +1,1304 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import mock
+import re
+
+from cinder import exception
+from cinder.objects import fields
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
+    as storops_ex
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+from cinder.tests.unit.volume.drivers.emc.vnx import utils
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.emc.vnx import adapter
+from cinder.volume.drivers.emc.vnx import client
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import utils as vnx_utils
+
+
+class TestCommonAdapter(test.TestCase):
+
+    def setUp(self):
+        super(TestCommonAdapter, self).setUp()
+        self.configuration = conf.Configuration(None)
+        vnx_utils.init_ops(self.configuration)
+        self.configuration.san_ip = '192.168.1.1'
+        self.configuration.storage_vnx_authentication_type = 'global'
+
+    def tearDown(self):
+        super(TestCommonAdapter, self).tearDown()
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_volume(self, vnx_common, _ignore, mocked_input):
+        volume = mocked_input['volume']
+        volume.host.split('#')[1]
+        model_update = vnx_common.create_volume(volume)
+        self.assertEqual('False', model_update.get('metadata')['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_volume_error(self, vnx_common, _ignore, mocked_input):
+        self.assertRaises(storops_ex.VNXCreateLunError,
+                          vnx_common.create_volume,
+                          mocked_input['volume'])
+
+    @utils.patch_extra_specs({'provisioning:type': 'thick'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_thick_volume(self, vnx_common, _ignore, mocked_input):
+        volume = mocked_input['volume']
+        expected_pool = volume.host.split('#')[1]
+        vnx_common.create_volume(volume)
+        vnx_common.client.vnx.get_pool.assert_called_with(
+            name=expected_pool)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_migrate_volume(self, vnx_common, mocked, cinder_input):
+        volume = cinder_input['volume']
+        host = {'capabilities':
+                {'location_info': 'pool_name|fake_serial',
+                 'storage_protocol': 'iscsi'},
+                'host': 'hostname@backend_name#pool_name'}
+        vnx_common.serial_number = 'fake_serial'
+        migrated, update = vnx_common.migrate_volume(None, volume, host)
+        self.assertTrue(migrated)
+        self.assertEqual('False', update['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_migrate_volume_host_assisted(self, vnx_common, mocked,
+                                          cinder_input):
+        volume1 = cinder_input['volume']
+        host = {
+            'capabilities': {
+                'location_info': 'pool_name|fake_serial',
+                'storage_protocol': 'iscsi'},
+            'host': 'hostname@backend_name#pool_name'}
+        vnx_common.serial_number = 'new_serial'
+        migrated, update = vnx_common.migrate_volume(None, volume1, host)
+        self.assertFalse(migrated)
+        self.assertIsNone(update)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_cloned_volume(
+            self, vnx_common, mocked, cinder_input):
+        volume = cinder_input['volume']
+        src_vref = cinder_input['src_vref']
+        model_update = vnx_common.create_cloned_volume(volume, src_vref)
+        self.assertEqual('False', model_update['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_cloned_volume_snapcopy(
+            self, vnx_common, mocked, cinder_input):
+        volume = cinder_input['volume']
+        volume.metadata = {'snapcopy': 'True'}
+        src_vref = cinder_input['src_vref']
+        model_update = vnx_common.create_cloned_volume(volume, src_vref)
+        self.assertEqual('True', model_update['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_volume_from_snapshot(
+            self, vnx_common, mocked, cinder_input):
+        volume = cinder_input['volume']
+        snapshot = cinder_input['snapshot']
+        snapshot.volume = volume
+        update = vnx_common.create_volume_from_snapshot(volume, snapshot)
+        self.assertEqual('False', update['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_volume_from_snapshot_snapcopy(
+            self, vnx_common, mocked, cinder_input):
+        volume = cinder_input['volume']
+        volume.metadata = {'snapcopy': 'True'}
+        snapshot = cinder_input['snapshot']
+        snapshot.volume = volume
+        update = vnx_common.create_volume_from_snapshot(volume, snapshot)
+        self.assertEqual('True', update['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_cg_from_cgsnapshot(self, vnx_common, mocked,
+                                       cinder_input):
+        group = cinder_input['cg']
+        volumes = [cinder_input['vol1']]
+        cg_snap = cinder_input['cg_snap']
+        snaps = [cinder_input['snap1']]
+
+        model_update, volume_updates = vnx_common.create_cg_from_cgsnapshot(
+            None, group, volumes, cg_snap, snaps)
+        self.assertIsNone(model_update)
+        self.assertIsNotNone(
+            re.findall('id^12',
+                       volume_updates[0]['provider_location']))
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_cloned_cg(self, vnx_common, mocked,
+                              cinder_input):
+        group = cinder_input['cg']
+        src_group = cinder_input['src_cg']
+        volumes = [cinder_input['vol1']]
+        src_volumes = [cinder_input['src_vol1']]
+        model_update, volume_updates = vnx_common.create_cloned_cg(
+            None, group, volumes, src_group, src_volumes)
+        self.assertIsNone(model_update)
+        self.assertIsNotNone(
+            re.findall('id^12',
+                       volume_updates[0]['provider_location']))
+
+    @res_mock.patch_common_adapter
+    def test_parse_pools(self, vnx_common, mocked):
+        vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool6']
+        parsed = vnx_common.parse_pools()
+        self.assertEqual(
+            len(vnx_common.config.storage_vnx_pool_names),
+            len(parsed))
+        pools = vnx_common.client.get_pools()
+        self.assertEqual(pools, parsed)
+
+    @res_mock.patch_common_adapter
+    def test_parse_pools_one_invalid_pool(self, vnx_common, mocked):
+        vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool7']
+        parsed = vnx_common.parse_pools()
+        pools = vnx_common.client.get_pools()
+        self.assertIn(parsed[0], pools)
+
+    @res_mock.patch_common_adapter
+    def test_parse_pools_all_invalid_pools(self, vnx_common, mocked):
+        vnx_common.config.storage_vnx_pool_names = ['pool7', 'pool8']
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          vnx_common.parse_pools)
+
+    @res_mock.patch_common_adapter
+    def test_get_enabler_stats(self, vnx_common, mocked):
+        stats = vnx_common.get_enabler_stats()
+        self.assertTrue(stats['compression_support'])
+        self.assertTrue(stats['fast_support'])
+        self.assertTrue(stats['deduplication_support'])
+        self.assertTrue(stats['thin_provisioning_support'])
+        self.assertTrue(stats['consistencygroup_support'])
+
+    @res_mock.patch_common_adapter
+    def test_get_pool_stats(self, vnx_common, mocked):
+        pools = vnx_common.client.vnx.get_pool()
+        vnx_common.config.storage_vnx_pool_names = [
+            pool.name for pool in pools]
+        stats = {
+            'compression_support': True,
+            'fast_support': True,
+            'deduplication_support': True,
+            'thin_provisioning_support': True,
+            'consistencygroup_support': True,
+
+        }
+        pool_stats = vnx_common.get_pool_stats(stats)
+        self.assertEqual(2, len(pool_stats))
+        for stat in pool_stats:
+            self.assertTrue(stat['fast_cache_enabled'])
+            self.assertTrue(stat['pool_name'] in [pools[0].name,
+                                                  pools[1].name])
+            self.assertFalse(stat['replication_enabled'])
+            self.assertEqual([], stat['replication_targets'])
+
+    @res_mock.patch_common_adapter
+    def test_get_pool_stats_offline(self, vnx_common, mocked):
+        vnx_common.config.storage_vnx_pool_names = []
+        pool_stats = vnx_common.get_pool_stats()
+        for stat in pool_stats:
+            self.assertTrue(stat['fast_cache_enabled'])
+            self.assertEqual(0, stat['free_capacity_gb'])
+
+    @res_mock.patch_common_adapter
+    def test_get_pool_stats_max_luns_reached(self, vnx_common, mocked):
+        pools = vnx_common.client.vnx.get_pool()
+        vnx_common.config.storage_vnx_pool_names = [
+            pool.name for pool in pools]
+        stats = {
+            'compression_support': True,
+            'fast_support': True,
+            'deduplication_support': True,
+            'thin_provisioning_support': True,
+            'consistencygroup_support': True,
+
+        }
+        pool_stats = vnx_common.get_pool_stats(stats)
+        for stat in pool_stats:
+            self.assertTrue(stat['fast_cache_enabled'])
+            self.assertEqual(0, stat['free_capacity_gb'])
+
+    @res_mock.patch_common_adapter
+    def test_get_pool_stats_with_reserved(self, vnx_common, mocked):
+        pools = vnx_common.client.vnx.get_pool()
+        vnx_common.config.storage_vnx_pool_names = [
+            pool.name for pool in pools]
+        stats = {
+            'compression_support': True,
+            'fast_support': True,
+            'deduplication_support': True,
+            'thin_provisioning_support': True,
+            'consistencygroup_support': True,
+
+        }
+        vnx_common.reserved_percentage = 15
+        pool_stats = vnx_common.get_pool_stats(stats)
+        for stat in pool_stats:
+            self.assertTrue(stat['fast_cache_enabled'])
+            self.assertIsNot(0, stat['free_capacity_gb'])
+            self.assertEqual(15, stat['reserved_percentage'])
+
+    @res_mock.patch_common_adapter
+    def test_update_volume_stats(self, vnx_common, mocked):
+        with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'):
+            stats = vnx_common.update_volume_stats()
+        self.assertEqual(
+            adapter.CommonAdapter.VERSION, stats['driver_version'])
+        self.assertEqual(adapter.CommonAdapter.VENDOR, stats['vendor_name'])
+        pools_stats = stats['pools']
+        for stat in pools_stats:
+            self.assertFalse(stat['replication_enabled'])
+            self.assertEqual([], stat['replication_targets'])
+
+    @res_mock.patch_common_adapter
+    def test_append_volume_stats(self, vnx_common, mocked):
+        device = utils.get_replication_device()
+        vnx_common.config.replication_device = [device]
+        vnx_common.mirror_view = utils.build_fake_mirror_view()
+        stats = {}
+        vnx_common.append_replication_stats(stats)
+        self.assertTrue(stats['replication_enabled'])
+        self.assertEqual(1, stats['replication_count'])
+        self.assertEqual(['sync'], stats['replication_type'])
+        self.assertEqual([device['backend_id']],
+                         stats['replication_targets'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_volume_not_force(self, vnx_common, mocked, mocked_input):
+        vnx_common.force_delete_lun_in_sg = False
+        vnx_common.delete_volume(mocked_input['volume'])
+        lun = vnx_common.client.vnx.get_lun()
+        lun.delete.assert_called_with(force_detach=True, detach_from_sg=False)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_volume_force(self, vnx_common, mocked, mocked_input):
+        vnx_common.force_delete_lun_in_sg = True
+        vnx_common.delete_volume(mocked_input['volume'])
+        lun = vnx_common.client.vnx.get_lun()
+        lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
+
+    @utils.patch_extra_specs_validate(side_effect=exception.InvalidVolumeType(
+        reason='fake_reason'))
+    @res_mock.patch_common_adapter
+    def test_retype_type_invalid(self, vnx_common, mocked):
+        self.assertRaises(exception.InvalidVolumeType,
+                          vnx_common.retype,
+                          None, None,
+                          {'extra_specs': 'fake_spec'},
+                          None, None)
+
+    @mock.patch.object(client.Client, 'get_vnx_enabler_status')
+    @utils.patch_extra_specs_validate(return_value=True)
+    @utils.patch_extra_specs({'storagetype:tiering': 'auto',
+                              'provisioning:type': 'thin'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_retype_need_migration(
+            self, vnx_common, mocked, driver_in,
+            enabler_status):
+        new_type = {
+            'extra_specs': {'provisioning:type': 'deduplicated',
+                            'storagetype:tiering': 'starthighthenauto'}}
+        volume = driver_in['volume']
+        host = driver_in['host']
+        fake_migrate_return = (True, ['fake_model_update'])
+        vnx_common._migrate_volume = mock.Mock(
+            return_value=fake_migrate_return)
+        ret = vnx_common.retype(None, volume, new_type, None, host)
+        self.assertEqual(fake_migrate_return, ret)
+        vnx_common._migrate_volume.assert_called_once_with(
+            volume, host, common.ExtraSpecs(new_type['extra_specs']))
+
+    @mock.patch.object(client.Client, 'get_vnx_enabler_status')
+    @utils.patch_extra_specs_validate(return_value=True)
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_retype_turn_on_compression_change_tier(
+            self, vnx_common, mocked, driver_in,
+            enabler_status):
+        new_type = {
+            'extra_specs': {'provisioning:type': 'compressed',
+                            'storagetype:tiering': 'starthighthenauto'}}
+        volume = driver_in['volume']
+        host = driver_in['host']
+        lun = mocked['lun']
+        vnx_common.client.get_lun = mock.Mock(return_value=lun)
+        ret = vnx_common.retype(None, volume, new_type, None, host)
+        self.assertTrue(ret)
+        lun.enable_compression.assert_called_once_with(ignore_thresholds=True)
+        self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, lun.tier)
+
+    @mock.patch.object(client.Client, 'get_vnx_enabler_status')
+    @utils.patch_extra_specs_validate(return_value=True)
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_retype_lun_has_snap(
+            self, vnx_common, mocked, driver_in,
+            enabler_status):
+        new_type = {
+            'extra_specs': {'provisioning:type': 'thin',
+                            'storagetype:tiering': 'auto'}}
+        volume = driver_in['volume']
+        host = driver_in['host']
+        new_type = {
+            'extra_specs': {'provisioning:type': 'thin',
+                            'storagetype:tiering': 'auto'}}
+        ret = vnx_common.retype(None, volume, new_type, None, host)
+        self.assertFalse(ret)
+        new_type = {
+            'extra_specs': {'provisioning:type': 'compressed',
+                            'storagetype:tiering': 'auto'}}
+        ret = vnx_common.retype(None, volume, new_type, None, host)
+        self.assertFalse(ret)
+
+    @mock.patch.object(client.Client, 'get_vnx_enabler_status')
+    @utils.patch_extra_specs_validate(return_value=True)
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_retype_change_tier(
+            self, vnx_common, mocked, driver_in,
+            enabler_status):
+        new_type = {
+            'extra_specs': {'storagetype:tiering': 'auto'}}
+        volume = driver_in['volume']
+        host = driver_in['host']
+        lun = mocked['lun']
+        vnx_common.client.get_lun = mock.Mock(return_value=lun)
+        ret = vnx_common.retype(None, volume, new_type, None, host)
+        self.assertTrue(ret)
+        self.assertEqual(storops.VNXTieringEnum.AUTO, lun.tier)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_consistencygroup(self, vnx_common, mocked, mocked_input):
+        cg = mocked_input['cg']
+        model_update = vnx_common.create_consistencygroup(None, group=cg)
+        self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE,
+                         model_update['status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_consistencygroup(self, vnx_common, mocked, mocked_input):
+        cg = mocked_input['cg']
+        model_update, vol_update_list = vnx_common.delete_consistencygroup(
+            None, group=cg, volumes=[])
+        self.assertEqual(cg.status,
+                         model_update['status'])
+        self.assertEqual([], vol_update_list)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_consistencygroup_with_volume(
+            self, vnx_common, mocked, mocked_input):
+        cg = mocked_input['cg']
+        vol1 = mocked_input['vol1']
+        vol2 = mocked_input['vol2']
+        model_update, vol_update_list = vnx_common.delete_consistencygroup(
+            None, group=cg, volumes=[vol1, vol2])
+        self.assertEqual(cg.status,
+                         model_update['status'])
+        for update in vol_update_list:
+            self.assertEqual(fields.ConsistencyGroupStatus.DELETED,
+                             update['status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_consistencygroup_error(self, vnx_common,
+                                           mocked, mocked_input):
+        cg = mocked_input['cg']
+        self.assertRaises(
+            storops_ex.VNXConsistencyGroupError,
+            vnx_common.delete_consistencygroup,
+            context=None, group=cg, volumes=[])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_consistencygroup_volume_error(self, vnx_common,
+                                                  mocked, mocked_input):
+        cg = mocked_input['cg']
+        vol1 = mocked_input['vol1']
+        vol2 = mocked_input['vol2']
+        model_update, vol_update_list = vnx_common.delete_consistencygroup(
+            None, group=cg, volumes=[vol1, vol2])
+        self.assertEqual(cg.status,
+                         model_update['status'])
+        for update in vol_update_list:
+            self.assertEqual(fields.ConsistencyGroupStatus.ERROR_DELETING,
+                             update['status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_extend_volume(self, common_adapter, _ignore, mocked_input):
+        common_adapter.extend_volume(mocked_input['volume'], 10)
+
+        lun = common_adapter.client.vnx.get_lun()
+        lun.expand.assert_called_once_with(10, ignore_thresholds=True)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_snapshot_adapter(self, common_adapter, _ignore,
+                                     mocked_input):
+        common_adapter.create_snapshot(mocked_input['snapshot'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_snapshot_adapter(self, common_adapter, _ignore,
+                                     mocked_input):
+        common_adapter.delete_snapshot(mocked_input['snapshot'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_cgsnapshot(self, common_adapter, mocked, mocked_input):
+        cg_snap = mocked_input['cg_snap']
+        snap1 = mocked_input['snap1']
+        snap2 = mocked_input['snap2']
+        model_update, snapshots_model_update = (
+            common_adapter.create_cgsnapshot(None, cg_snap, [snap1, snap2]))
+        self.assertEqual('available', model_update['status'])
+        for update in snapshots_model_update:
+            self.assertEqual(fields.SnapshotStatus.AVAILABLE, update['status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_delete_cgsnapshot(self, common_adapter, mocked, mocked_input):
+        model_update, snapshot_updates = (
+            common_adapter.delete_cgsnapshot(
+                None, mocked_input['cg_snap'],
+                [mocked_input['snap1'], mocked_input['snap2']]))
+        self.assertEqual('deleted', model_update['status'])
+        for snap in snapshot_updates:
+            self.assertEqual(fields.SnapshotStatus.DELETED, snap['status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing_lun_no_exist(
+            self, common_adapter, _ignore, mocked_input):
+        self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            common_adapter.manage_existing_get_size,
+            mocked_input['volume'], {'source-name': 'fake'})
+        common_adapter.client.vnx.get_lun.assert_called_once_with(
+            name='fake', lun_id=None)
+
+    @res_mock.patch_common_adapter
+    def test_manage_existing_invalid_ref(
+            self, common_adapter, _ignore):
+        self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            common_adapter.manage_existing_get_size,
+            None, {'invalidkey': 'fake'})
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing_invalid_pool(
+            self, common_adapter, _ignore, mocked_input):
+        self.assertRaises(
+            exception.ManageExistingInvalidReference,
+            common_adapter.manage_existing_get_size,
+            mocked_input['volume'], {'source-id': '6'})
+        common_adapter.client.vnx.get_lun.assert_called_once_with(
+            lun_id='6', name=None)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing_get_size(
+            self, common_adapter, mocked_res, mocked_input):
+        size = common_adapter.manage_existing_get_size(
+            mocked_input['volume'], {'source-name': 'test_lun'})
+        self.assertEqual(size, mocked_res['lun'].total_capacity_gb)
+
+    @utils.patch_extra_specs({'provisioning:type': 'thin',
+                              'storagetype:tiering': 'auto'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing_type_mismatch(
+            self, common_adapter, mocked_res, mocked_input):
+        self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
+                          common_adapter.manage_existing,
+                          mocked_input['volume'],
+                          {'source-name': 'test_lun'})
+
+    @utils.patch_extra_specs({'provisioning:type': 'deduplicated'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing(
+            self, common_adapter, mocked_res, mocked_input):
+        test_lun = mocked_res['lun']
+        common_adapter.client.get_lun = mock.Mock(return_value=test_lun)
+        lun_name = mocked_input['volume'].name
+        common_adapter._build_provider_location = mock.Mock(
+            return_value="fake_pl")
+        pl = common_adapter.manage_existing(
+            mocked_input['volume'],
+            {'source-name': 'test_lun'})
+        common_adapter._build_provider_location.assert_called_with(
+            lun_type='lun',
+            lun_id=1,
+            base_lun_name=lun_name)
+        self.assertEqual('fake_pl', pl['provider_location'])
+        test_lun.rename.assert_called_once_with(
+            lun_name)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_manage_existing_smp(
+            self, common_adapter, mocked_res, mocked_input):
+        common_adapter._build_provider_location = mock.Mock(
+            return_value="fake_pl")
+        pl = common_adapter.manage_existing(
+            mocked_input['volume'], {'source-name': 'test_lun'})
+        common_adapter._build_provider_location.assert_called_with(
+            lun_id=2, lun_type='smp', base_lun_name='src_lun')
+        self.assertEqual('fake_pl', pl['provider_location'])
+
+    @res_mock.patch_common_adapter
+    def test_assure_storage_group(self, common_adapter, mocked_res):
+        host = common.Host('host', ['initiators'])
+        common_adapter.assure_storage_group(host)
+
+    @res_mock.patch_common_adapter
+    def test_assure_storage_group_create_new(self, common_adapter, mocked_res):
+        host = common.Host('host', ['initiators'])
+        common_adapter.assure_storage_group(host)
+        common_adapter.client.vnx.create_sg.assert_called_once_with(host.name)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_assure_host_access(self, common_adapter,
+                                mocked_res, mocked_input):
+        common_adapter.config.initiator_auto_registration = True
+        common_adapter.max_retries = 3
+        common_adapter.auto_register_initiator = mock.Mock()
+        common_adapter.client.add_lun_to_sg = mock.Mock()
+        sg = mocked_res['sg']
+        host = common.Host('host', ['initiators'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        lun = common_adapter.client.get_lun()
+        common_adapter.assure_host_access(sg, host, volume, True)
+        common_adapter.auto_register_initiator.assert_called_once_with(
+            sg, host)
+        common_adapter.client.add_lun_to_sg.assert_called_once_with(
+            sg, lun, common_adapter.max_retries)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_assure_host_access_without_auto_register_new_sg(
+            self, common_adapter, mocked_res, mocked_input):
+        common_adapter.config.initiator_auto_registration = False
+        common_adapter.max_retries = 3
+        common_adapter.client.add_lun_to_sg = mock.Mock()
+        sg = mocked_res['sg']
+        host = common.Host('host', ['initiators'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        lun = common_adapter.client.get_lun()
+        common_adapter.assure_host_access(sg, host, volume, True)
+        sg.connect_host.assert_called_once_with(host.name)
+        common_adapter.client.add_lun_to_sg.assert_called_once_with(
+            sg, lun, common_adapter.max_retries)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_assure_host_access_without_auto_register(
+            self, common_adapter, mocked_res, mocked_input):
+        common_adapter.config.initiator_auto_registration = False
+        common_adapter.max_retries = 3
+        common_adapter.client.add_lun_to_sg = mock.Mock()
+        sg = mocked_res['sg']
+        host = common.Host('host', ['initiators'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        lun = common_adapter.client.get_lun()
+        common_adapter.assure_host_access(sg, host, volume, False)
+        sg.connect_host.assert_not_called()
+        common_adapter.client.add_lun_to_sg.assert_called_once_with(
+            sg, lun, common_adapter.max_retries)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_auto_register_initiator(
+            self, common_adapter, mocked_res, mocked_input):
+        common_adapter.client.register_initiator = mock.Mock()
+
+        common_adapter.config.io_port_list = ['a-0-0', 'a-0-1', 'a-1-0',
+                                              'b-0-1']
+        allowed_ports = mocked_res['allowed_ports']
+        common_adapter.allowed_ports = allowed_ports
+        reg_ports = mocked_res['reg_ports']
+        sg = mocked_res['sg']
+        host = common.Host('host', ['iqn-host-1', 'iqn-reg-2'])
+        common_adapter.auto_register_initiator(sg, host)
+
+        initiator_port_map = {'iqn-host-1': set(allowed_ports),
+                              'iqn-reg-2': set(allowed_ports) - set(reg_ports)}
+        common_adapter.client.register_initiator.assert_called_once_with(
+            sg, host, initiator_port_map)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_auto_register_initiator_no_white_list(
+            self, common_adapter, mocked_res, mocked_input):
+        for io_port_list in (None, ):
+            common_adapter.client.register_initiator = mock.Mock()
+
+            common_adapter.config.io_port_list = io_port_list
+            allowed_ports = mocked_res['allowed_ports']
+            common_adapter.allowed_ports = allowed_ports
+            sg = mocked_res['sg']
+            host = common.Host('host', ['iqn-host-1', 'iqn-reg-2'])
+            common_adapter.auto_register_initiator(sg, host)
+
+            initiator_port_map = {'iqn-host-1': set(allowed_ports)}
+            common_adapter.client.register_initiator.assert_called_once_with(
+                sg, host, initiator_port_map)
+
+    @res_mock.patch_common_adapter
+    def test_build_provider_location(self, common_adapter, mocked_res):
+        common_adapter.serial_number = 'vnx-serial'
+        pl = common_adapter._build_provider_location(
+            lun_id='fake_id', lun_type='smp', base_lun_name='fake_name')
+        expected_pl = vnx_utils.build_provider_location(
+            system='vnx-serial',
+            lun_type='smp',
+            lun_id='fake_id',
+            base_lun_name='fake_name',
+            version=common_adapter.VERSION)
+        self.assertEqual(expected_pl, pl)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_remove_host_access(
+            self, common_adapter, mocked_res, mocked_input):
+        host = common.Host('fake_host', ['fake_initiator'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        sg = mocked_res['sg']
+        common_adapter.remove_host_access(volume, host, sg)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_remove_host_access_sg_absent(
+            self, common_adapter, mocked_res, mocked_input):
+        host = common.Host('fake_host', ['fake_initiator'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        sg = mocked_res['sg']
+        common_adapter.remove_host_access(volume, host, sg)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_remove_host_access_volume_not_in_sg(
+            self, common_adapter, mocked_res, mocked_input):
+        host = common.Host('fake_host', ['fake_initiator'])
+        cinder_volume = mocked_input['volume']
+        volume = common.Volume(cinder_volume.name, cinder_volume.id,
+                               common_adapter.client.get_lun_id(cinder_volume))
+        sg = mocked_res['sg']
+        common_adapter.remove_host_access(volume, host, sg)
+
+    @res_mock.patch_common_adapter
+    def test_terminate_connection_cleanup_sg_absent(
+            self, common_adapter, mocked_res):
+        common_adapter.destroy_empty_sg = True
+        common_adapter.itor_auto_dereg = True
+        host = common.Host('fake_host', ['fake_initiator'])
+        sg = mocked_res['sg']
+        common_adapter.terminate_connection_cleanup(host, sg)
+
+    @res_mock.patch_common_adapter
+    def test_terminate_connection_cleanup_remove_sg(
+            self, common_adapter, mocked_res):
+        common_adapter.destroy_empty_sg = True
+        common_adapter.itor_auto_dereg = False
+        host = common.Host('fake_host', ['fake_initiator'])
+        sg = mocked_res['sg']
+        common_adapter.terminate_connection_cleanup(host, sg)
+
+    @res_mock.patch_common_adapter
+    def test_terminate_connection_cleanup_deregister(
+            self, common_adapter, mocked_res):
+        common_adapter.destroy_empty_sg = True
+        common_adapter.itor_auto_dereg = True
+        host = common.Host('fake_host', ['fake_initiator1', 'fake_initiator2'])
+        sg = mocked_res['sg']
+        common_adapter.terminate_connection_cleanup(host, sg)
+        common_adapter.client.vnx.remove_hba.assert_any_call(
+            'fake_initiator1')
+        common_adapter.client.vnx.remove_hba.assert_any_call(
+            'fake_initiator2')
+
+    @res_mock.patch_common_adapter
+    def test_terminate_connection_cleanup_sg_is_not_empty(
+            self, common_adapter, mocked_res):
+        common_adapter.destroy_empty_sg = True
+        common_adapter.itor_auto_dereg = True
+        host = common.Host('fake_host', ['fake_initiator'])
+        sg = mocked_res['sg']
+        common_adapter.terminate_connection_cleanup(host, sg)
+
+    @res_mock.patch_common_adapter
+    def test_set_extra_spec_defaults(self, common_adapter, mocked_res):
+        common_adapter.set_extra_spec_defaults()
+        self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO,
+                         common.ExtraSpecs.TIER_DEFAULT)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_update_consistencygroup(self, common_adapter, mocked_res,
+                                     mocked_input):
+        common_adapter.client.update_consistencygroup = mock.Mock()
+        cg = mocked_input['cg']
+        common_adapter.client.get_cg = mock.Mock(return_value=cg)
+
+        common_adapter.update_consistencygroup(None, cg,
+                                               [mocked_input['volume_add']],
+                                               [mocked_input['volume_remove']])
+
+        common_adapter.client.update_consistencygroup.assert_called_once_with(
+            cg, [1], [2])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_create_export_snapshot(self, common_adapter, mocked_res,
+                                    mocked_input):
+        common_adapter.client.create_mount_point = mock.Mock()
+        snapshot = mocked_input['snapshot']
+        common_adapter.create_export_snapshot(None, snapshot, None)
+        common_adapter.client.create_mount_point.assert_called_once_with(
+            snapshot.volume_name, 'tmp-smp-' + snapshot.id)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_remove_export_snapshot(self, common_adapter, mocked_res,
+                                    mocked_input):
+        common_adapter.client.delete_lun = mock.Mock()
+        snapshot = mocked_input['snapshot']
+        common_adapter.remove_export_snapshot(None, snapshot)
+        common_adapter.client.delete_lun.assert_called_once_with(
+            'tmp-smp-' + snapshot.id)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_initialize_connection_snapshot(self, common_adapter, mocked_res,
+                                            mocked_input):
+        common_adapter.client.attach_snapshot = mock.Mock()
+        common_adapter._initialize_connection = mock.Mock()
+
+        snapshot = mocked_input['snapshot']
+        smp_name = 'tmp-smp-' + snapshot.id
+        common_adapter.initialize_connection_snapshot(snapshot, None)
+        common_adapter.client.attach_snapshot.assert_called_once_with(
+            smp_name, snapshot.name)
+        lun = mocked_res['lun']
+        called_volume = common_adapter._initialize_connection.call_args[0][0]
+        self.assertEqual((smp_name, snapshot.id, lun.lun_id),
+                         (called_volume.name, called_volume.id,
+                             called_volume.vnx_lun_id))
+        self.assertIsNone(
+            common_adapter._initialize_connection.call_args[0][1])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_terminate_connection_snapshot(self, common_adapter, mocked_res,
+                                           mocked_input):
+        common_adapter.client.detach_snapshot = mock.Mock()
+        common_adapter._terminate_connection = mock.Mock()
+
+        snapshot = mocked_input['snapshot']
+        smp_name = 'tmp-smp-' + snapshot.id
+        common_adapter.terminate_connection_snapshot(snapshot, None)
+        lun = mocked_res['lun']
+        called_volume = common_adapter._terminate_connection.call_args[0][0]
+        self.assertEqual((smp_name, snapshot.id, lun.lun_id),
+                         (called_volume.name, called_volume.id,
+                             called_volume.vnx_lun_id))
+        self.assertIsNone(common_adapter._terminate_connection.call_args[0][1])
+        common_adapter.client.detach_snapshot.assert_called_once_with(
+            smp_name)
+
+    @utils.patch_extra_specs({'replication_enabled': '<is> True'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_setup_lun_replication(self, common_adapter,
+                                   mocked_res, mocked_input):
+        vol1 = mocked_input['vol1']
+        fake_mirror = utils.build_fake_mirror_view()
+        fake_mirror.secondary_client.create_lun.return_value = (
+            mocked_res['lun'])
+        common_adapter.mirror_view = fake_mirror
+        rep_update = common_adapter.setup_lun_replication(
+            vol1, 111)
+        fake_mirror.create_mirror.assert_called_once_with(
+            'mirror_' + vol1.id, 111)
+        fake_mirror.add_image.assert_called_once_with(
+            'mirror_' + vol1.id, mocked_res['lun'].lun_id)
+        self.assertEqual(fields.ReplicationStatus.ENABLED,
+                         rep_update['replication_status'])
+
+    @utils.patch_extra_specs({'replication_enabled': '<is> True'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_cleanup_replication(self, common_adapter,
+                                 mocked_res, mocked_input):
+        fake_mirror = utils.build_fake_mirror_view()
+        vol1 = mocked_input['vol1']
+        with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
+            fake.return_value = fake_mirror
+            common_adapter.cleanup_lun_replication(vol1)
+            fake_mirror.destroy_mirror.assert_called_once_with(
+                'mirror_' + vol1.id, vol1.name)
+
+    @res_mock.patch_common_adapter
+    def test_build_mirror_view(self, common_adapter,
+                               mocked_res):
+        common_adapter.config.replication_device = [
+            utils.get_replication_device()]
+        with utils.patch_vnxsystem:
+            mirror = common_adapter.build_mirror_view(
+                common_adapter.config)
+        self.assertIsNotNone(mirror)
+
+    @res_mock.patch_common_adapter
+    def test_build_mirror_view_no_device(
+            self, common_adapter, mocked_res):
+        common_adapter.config.replication_device = []
+        mirror = common_adapter.build_mirror_view(
+            common_adapter.config)
+        self.assertIsNone(mirror)
+
+    @res_mock.patch_common_adapter
+    def test_build_mirror_view_2_device(self, common_adapter, mocked_res):
+        device = utils.get_replication_device()
+        device1 = device.copy()
+        common_adapter.config.replication_device = [device, device1]
+        self.assertRaises(exception.InvalidInput,
+                          common_adapter.build_mirror_view,
+                          common_adapter.config)
+
+    @res_mock.patch_common_adapter
+    def test_build_mirror_view_no_enabler(self, common_adapter, mocked_res):
+        common_adapter.config.replication_device = [
+            utils.get_replication_device()]
+        self.assertRaises(exception.InvalidInput,
+                          common_adapter.build_mirror_view,
+                          common_adapter.config)
+
+    @res_mock.patch_common_adapter
+    def test_build_mirror_view_failover_false(self, common_adapter,
+                                              mocked_res):
+        common_adapter.config.replication_device = [
+            utils.get_replication_device()]
+        with utils.patch_vnxsystem:
+            failover_mirror = common_adapter.build_mirror_view(
+                common_adapter.config, failover=False)
+        self.assertIsNotNone(failover_mirror)
+
+    @utils.patch_extra_specs({'replication_enabled': '<is> True'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_failover_host(self, common_adapter, mocked_res, mocked_input):
+        device = utils.get_replication_device()
+        common_adapter.config.replication_device = [device]
+        vol1 = mocked_input['vol1']
+        lun1 = mocked_res['lun1']
+        with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
+            fake_mirror = utils.build_fake_mirror_view()
+            fake_mirror.secondary_client.get_lun.return_value = lun1
+            fake_mirror.secondary_client.get_serial.return_value = (
+                device['backend_id'])
+            fake.return_value = fake_mirror
+            backend_id, updates = common_adapter.failover_host(
+                None, [vol1], device['backend_id'])
+            fake_mirror.promote_image.assert_called_once_with(
+                'mirror_' + vol1.id)
+            fake_mirror.secondary_client.get_serial.assert_called_with()
+            fake_mirror.secondary_client.get_lun.assert_called_with(
+                name=vol1.name)
+        self.assertEqual(device['backend_id'], backend_id)
+        for update in updates:
+            self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
+                             update['updates']['replication_status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_failover_host_invalid_backend_id(self, common_adapter,
+                                              mocked_res, mocked_input):
+        common_adapter.config.replication_device = [
+            utils.get_replication_device()]
+        vol1 = mocked_input['vol1']
+        self.assertRaises(exception.InvalidInput,
+                          common_adapter.failover_host,
+                          None, [vol1], 'new_id')
+
+    @utils.patch_extra_specs({'replication_enabled': '<is> True'})
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_failover_host_failback(self, common_adapter, mocked_res,
+                                    mocked_input):
+        device = utils.get_replication_device()
+        common_adapter.config.replication_device = [device]
+        vol1 = mocked_input['vol1']
+        lun1 = mocked_res['lun1']
+        with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
+            fake_mirror = utils.build_fake_mirror_view()
+            fake_mirror.secondary_client.get_lun.return_value = lun1
+            fake_mirror.secondary_client.get_serial.return_value = (
+                device['backend_id'])
+            fake.return_value = fake_mirror
+            backend_id, updates = common_adapter.failover_host(
+                None, [vol1], 'default')
+            fake_mirror.promote_image.assert_called_once_with(
+                'mirror_' + vol1.id)
+            fake_mirror.secondary_client.get_serial.assert_called_with()
+            fake_mirror.secondary_client.get_lun.assert_called_with(
+                name=vol1.name)
+        self.assertEqual('default', backend_id)
+        for update in updates:
+            self.assertEqual(fields.ReplicationStatus.ENABLED,
+                             update['updates']['replication_status'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_get_pool_name(self, common_adapter, mocked_res, mocked_input):
+        self.assertEqual(mocked_res['lun'].pool_name,
+                         common_adapter.get_pool_name(mocked_input['volume']))
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_update_migrated_volume(self, common_adapter, mocked_res,
+                                    mocked_input):
+        data = common_adapter.update_migrated_volume(
+            None, mocked_input['volume'], mocked_input['new_volume'])
+        self.assertEqual(mocked_input['new_volume'].provider_location,
+                         data['provider_location'])
+        self.assertEqual('False', data['metadata']['snapcopy'])
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_common_adapter
+    def test_update_migrated_volume_smp(self, common_adapter, mocked_res,
+                                        mocked_input):
+        data = common_adapter.update_migrated_volume(
+            None, mocked_input['volume'], mocked_input['new_volume'])
+        self.assertEqual(mocked_input['new_volume'].provider_location,
+                         data['provider_location'])
+        self.assertEqual('True', data['metadata']['snapcopy'])
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_naviseccli_path(self, common_adapter,
+                                              mocked_res):
+        old_value = common_adapter.config.naviseccli_path
+        common_adapter._normalize_config()
+        self.assertEqual(old_value, common_adapter.config.naviseccli_path)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_naviseccli_path_none(self, common_adapter,
+                                                   mocked_res):
+        common_adapter.config.naviseccli_path = ""
+        common_adapter._normalize_config()
+        self.assertIsNone(common_adapter.config.naviseccli_path)
+
+        common_adapter.config.naviseccli_path = "   "
+        common_adapter._normalize_config()
+        self.assertIsNone(common_adapter.config.naviseccli_path)
+
+        common_adapter.config.naviseccli_path = None
+        common_adapter._normalize_config()
+        self.assertIsNone(common_adapter.config.naviseccli_path)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_pool_names(self, common_adapter,
+                                         mocked_res):
+        common_adapter.config.storage_vnx_pool_names = [
+            'pool_1', '  pool_2   ', '', '   ']
+        common_adapter._normalize_config()
+        self.assertEqual(['pool_1', 'pool_2'],
+                         common_adapter.config.storage_vnx_pool_names)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_pool_names_none(self, common_adapter,
+                                              mocked_res):
+        common_adapter.config.storage_vnx_pool_names = None
+        common_adapter._normalize_config()
+        self.assertIsNone(common_adapter.config.storage_vnx_pool_names)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_pool_names_empty_list(self, common_adapter,
+                                                    mocked_res):
+        common_adapter.config.storage_vnx_pool_names = []
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          common_adapter._normalize_config)
+
+        common_adapter.config.storage_vnx_pool_names = ['  ', '']
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          common_adapter._normalize_config)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_io_port_list(self, common_adapter,
+                                           mocked_res):
+        common_adapter.config.io_port_list = [
+            'a-0-1', '  b-1   ', '', '   ']
+        common_adapter._normalize_config()
+        self.assertEqual(['A-0-1', 'B-1'],
+                         common_adapter.config.io_port_list)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_io_port_list_none(self, common_adapter,
+                                                mocked_res):
+        common_adapter.config.io_port_list = None
+        common_adapter._normalize_config()
+        self.assertIsNone(common_adapter.config.io_port_list)
+
+    @res_mock.patch_common_adapter
+    def test_normalize_config_io_port_list_empty_list(self, common_adapter,
+                                                      mocked_res):
+        common_adapter.config.io_port_list = []
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          common_adapter._normalize_config)
+
+        common_adapter.config.io_port_list = ['  ', '']
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          common_adapter._normalize_config)
+
+
+class TestISCSIAdapter(test.TestCase):
+    STORAGE_PROTOCOL = common.PROTOCOL_ISCSI
+
+    def setUp(self):
+        super(TestISCSIAdapter, self).setUp()
+        self.configuration = conf.Configuration(None)
+        vnx_utils.init_ops(self.configuration)
+        self.configuration.storage_protocol = self.STORAGE_PROTOCOL
+
+    def tearDown(self):
+        super(TestISCSIAdapter, self).tearDown()
+
+    @res_mock.patch_iscsi_adapter
+    def test_validate_ports_iscsi(self, vnx_iscsi, mocked):
+        all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
+        valid_ports = vnx_iscsi.validate_ports(all_iscsi_ports, ['A-0-0'])
+        self.assertEqual([mocked['iscsi_port_a-0-0']], valid_ports)
+
+    @res_mock.patch_iscsi_adapter
+    def test_validate_ports_iscsi_invalid(self, vnx_iscsi, mocked):
+        invalid_white_list = ['A-0-0', 'A-B-0']
+        all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
+        self.assertRaisesRegex(
+            exception.VolumeBackendAPIException,
+            'Invalid iscsi ports %s specified for io_port_list.'
+            % 'A-B-0',
+            vnx_iscsi.validate_ports,
+            all_iscsi_ports,
+            invalid_white_list)
+
+    @res_mock.patch_iscsi_adapter
+    def test_validate_ports_iscsi_not_exist(self, vnx_iscsi, mocked):
+        nonexistent_ports = ['A-0-0', 'A-6-1']
+        all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets()
+        self.assertRaisesRegex(
+            exception.VolumeBackendAPIException,
+            'Invalid iscsi ports %s specified for io_port_list'
+            % 'A-6-1',
+            vnx_iscsi.validate_ports,
+            all_iscsi_ports,
+            nonexistent_ports)
+
+    @res_mock.patch_iscsi_adapter
+    def test_update_volume_stats_iscsi(self, vnx_iscsi, mocked):
+        with mock.patch.object(adapter.CommonAdapter, 'update_volume_stats',
+                               return_value={'storage_protocol':
+                                             self.STORAGE_PROTOCOL}):
+            stats = vnx_iscsi.update_volume_stats()
+        self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol'])
+        self.assertEqual('VNXISCSIDriver', stats['volume_backend_name'])
+
+    @res_mock.patch_iscsi_adapter
+    def test_build_terminate_connection_return_data_iscsi(
+            self, vnx_iscsi, mocked):
+        re = vnx_iscsi.build_terminate_connection_return_data(None, None)
+        self.assertIsNone(re)
+
+    @res_mock.patch_iscsi_adapter
+    def test_normalize_config_iscsi_initiators(
+            self, vnx_iscsi, mocked):
+        vnx_iscsi.config.iscsi_initiators = (
+            '{"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]}')
+        vnx_iscsi._normalize_config()
+        expected = {"host1": ["10.0.0.1", "10.0.0.2"],
+                    "host2": ["10.0.0.3"]}
+        self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators)
+
+        vnx_iscsi.config.iscsi_initiators = '{}'
+        vnx_iscsi._normalize_config()
+        expected = {}
+        self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators)
+
+    @res_mock.patch_iscsi_adapter
+    def test_normalize_config_iscsi_initiators_none(
+            self, vnx_iscsi, mocked):
+        vnx_iscsi.config.iscsi_initiators = None
+        vnx_iscsi._normalize_config()
+        self.assertIsNone(vnx_iscsi.config.iscsi_initiators)
+
+    @res_mock.patch_iscsi_adapter
+    def test_normalize_config_iscsi_initiators_empty_str(
+            self, vnx_iscsi, mocked):
+        vnx_iscsi.config.iscsi_initiators = ''
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          vnx_iscsi._normalize_config)
+
+        vnx_iscsi.config.iscsi_initiators = '   '
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          vnx_iscsi._normalize_config)
+
+    @res_mock.patch_iscsi_adapter
+    def test_normalize_config_iscsi_initiators_not_dict(
+            self, vnx_iscsi, mocked):
+        vnx_iscsi.config.iscsi_initiators = '["a", "b"]'
+        self.assertRaises(exception.InvalidConfigurationValue,
+                          vnx_iscsi._normalize_config)
+
+
+class TestFCAdapter(test.TestCase):
+    STORAGE_PROTOCOL = common.PROTOCOL_FC
+
+    def setUp(self):
+        super(TestFCAdapter, self).setUp()
+        self.configuration = conf.Configuration(None)
+        vnx_utils.init_ops(self.configuration)
+        self.configuration.storage_protocol = self.STORAGE_PROTOCOL
+
+    def tearDown(self):
+        super(TestFCAdapter, self).tearDown()
+
+    @res_mock.patch_fc_adapter
+    def test_validate_ports_fc(self, vnx_fc, mocked):
+        all_fc_ports = vnx_fc.client.get_fc_targets()
+        valid_ports = vnx_fc.validate_ports(all_fc_ports, ['A-1'])
+        self.assertEqual([mocked['fc_port_a-1']], valid_ports)
+
+    @res_mock.patch_fc_adapter
+    def test_validate_ports_fc_invalid(self, vnx_fc, mocked):
+        invalid_white_list = ['A-1', 'A-B']
+        all_fc_ports = vnx_fc.client.get_fc_targets()
+        self.assertRaisesRegex(
+            exception.VolumeBackendAPIException,
+            'Invalid fc ports %s specified for io_port_list.'
+            % 'A-B',
+            vnx_fc.validate_ports,
+            all_fc_ports,
+            invalid_white_list)
+
+    @res_mock.patch_fc_adapter
+    def test_validate_ports_fc_not_exist(self, vnx_fc, mocked):
+        nonexistent_ports = ['A-1', 'A-6']
+        all_fc_ports = vnx_fc.client.get_fc_targets()
+        self.assertRaisesRegex(
+            exception.VolumeBackendAPIException,
+            'Invalid fc ports %s specified for io_port_list'
+            % 'A-6',
+            vnx_fc.validate_ports,
+            all_fc_ports,
+            nonexistent_ports)
+
+    @res_mock.patch_fc_adapter
+    def test_update_volume_stats(self, vnx_fc, mocked):
+        with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'):
+            stats = vnx_fc.update_volume_stats()
+        self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol'])
+        self.assertEqual('VNXFCDriver', stats['volume_backend_name'])
+
+    @mock.patch.object(vnx_utils, 'convert_to_tgt_list_and_itor_tgt_map')
+    @res_mock.patch_fc_adapter
+    def test_build_terminate_connection_return_data_auto_zone(
+            self, vnx_fc, mocked, converter):
+        vnx_fc.lookup_service = mock.Mock()
+        get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network
+
+        itor_tgt_map = {
+            'wwn1': ['wwnt1', 'wwnt2', 'wwnt3'],
+            'wwn2': ['wwnt1', 'wwnt2']
+        }
+        converter.return_value = ([], itor_tgt_map)
+        host = common.Host('fake_host',
+                           ['fake_hba1'],
+                           wwpns=['wwn1', 'wwn2'])
+        sg = mocked['sg']
+        re = vnx_fc.build_terminate_connection_return_data(host, sg)
+        get_mapping.assert_called_once_with(
+            ['wwn1', 'wwn2'], ['5006016636E01CA1'])
+        self.assertEqual(itor_tgt_map,
+                         re['data']['initiator_target_map'])
+
+    @res_mock.patch_fc_adapter
+    def test_build_terminate_connection_return_data_sg_absent(
+            self, vnx_fc, mocked):
+        sg = mocked['sg']
+        re = vnx_fc.build_terminate_connection_return_data(None, sg)
+        self.assertEqual('fibre_channel', re['driver_volume_type'])
+        self.assertEqual({}, re['data'])
+
+    @res_mock.patch_fc_adapter
+    def test_build_terminate_connection_return_data_without_autozone(
+            self, vnx_fc, mocked):
+        self.lookup_service = None
+        re = vnx_fc.build_terminate_connection_return_data(None, None)
+        self.assertEqual('fibre_channel', re['driver_volume_type'])
+        self.assertEqual({}, re['data'])
+
+    @res_mock.patch_fc_adapter
+    def test_get_tgt_list_and_initiator_tgt_map_allow_port_only(
+            self, vnx_fc, mocked):
+        sg = mocked['sg']
+        host = common.Host('fake_host',
+                           ['fake_hba1'],
+                           wwpns=['wwn1', 'wwn2'])
+        mapping = {
+            'san_1': {'initiator_port_wwn_list': ['wwn1'],
+                      'target_port_wwn_list': ['5006016636E01CB2']}}
+        vnx_fc.lookup_service = mock.Mock()
+        vnx_fc.lookup_service.get_device_mapping_from_network = mock.Mock(
+            return_value=mapping)
+        get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network
+        vnx_fc.allowed_ports = mocked['adapter'].allowed_ports
+        targets, tgt_map = vnx_fc._get_tgt_list_and_initiator_tgt_map(
+            sg, host, True)
+        self.assertEqual(['5006016636E01CB2'], targets)
+        self.assertEqual({'wwn1': ['5006016636E01CB2']}, tgt_map)
+        get_mapping.assert_called_once_with(
+            ['wwn1', 'wwn2'], ['5006016636E01CB2'])
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py
new file mode 100644
index 00000000000..bb0b1fb7c8d
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_client.py
@@ -0,0 +1,463 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
+    as storops_ex
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+from cinder.tests.unit.volume.drivers.emc.vnx import utils
+from cinder.volume.drivers.emc.vnx import client as vnx_client
+from cinder.volume.drivers.emc.vnx import common as vnx_common
+
+
+class TestCondition(test.TestCase):
+    @res_mock.patch_client
+    def test_is_lun_io_ready_false(self, client, mocked):
+        r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
+        self.assertFalse(r)
+
+    @res_mock.patch_client
+    def test_is_lun_io_ready_true(self, client, mocked):
+        r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
+        self.assertTrue(r)
+
+    @res_mock.patch_client
+    def test_is_lun_io_ready_exception(self, client, mocked):
+        self.assertRaises(exception.VolumeBackendAPIException,
+                          vnx_client.Condition.is_lun_io_ready,
+                          mocked['lun'])
+
+
+class TestClient(test.TestCase):
+    def setUp(self):
+        super(TestClient, self).setUp()
+
+    def tearDown(self):
+        super(TestClient, self).tearDown()
+
+    @res_mock.patch_client
+    def test_create_lun(self, client, mocked):
+        client.create_lun(pool='pool1', name='test', size=1, provision=None,
+                          tier=None, cg_id=None, ignore_thresholds=False)
+        client.vnx.get_pool.assert_called_once_with(name='pool1')
+        pool = client.vnx.get_pool(name='pool1')
+        pool.create_lun.assert_called_with(lun_name='test',
+                                           size_gb=1,
+                                           provision=None,
+                                           tier=None,
+                                           ignore_thresholds=False)
+
+    @res_mock.patch_client
+    def test_create_lun_error(self, client, mocked):
+        self.assertRaises(storops_ex.VNXCreateLunError,
+                          client.create_lun,
+                          pool='pool1',
+                          name='test',
+                          size=1,
+                          provision=None,
+                          tier=None,
+                          cg_id=None,
+                          ignore_thresholds=False)
+        client.vnx.get_pool.assert_called_once_with(name='pool1')
+
+    @res_mock.patch_client
+    def test_create_lun_already_existed(self, client, mocked):
+        client.create_lun(pool='pool1', name='lun3', size=1, provision=None,
+                          tier=None, cg_id=None, ignore_thresholds=False)
+        client.vnx.get_lun.assert_called_once_with(name='lun3')
+
+    @res_mock.patch_client
+    def test_create_lun_in_cg(self, client, mocked):
+        client.create_lun(
+            pool='pool1', name='test', size=1, provision=None,
+            tier=None, cg_id='cg1', ignore_thresholds=False)
+
+    @res_mock.patch_client
+    def test_create_lun_compression(self, client, mocked):
+        client.create_lun(pool='pool1', name='lun2', size=1,
+                          provision=storops.VNXProvisionEnum.COMPRESSED,
+                          tier=None, cg_id=None,
+                          ignore_thresholds=False)
+
+    @res_mock.patch_client
+    def test_migrate_lun(self, client, mocked):
+        client.migrate_lun(src_id=1,
+                           dst_id=2)
+        lun = client.vnx.get_lun()
+        lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH)
+
+    @utils.patch_sleep
+    @res_mock.patch_client
+    def test_migrate_lun_with_retry(self, client, mocked, mock_sleep):
+        lun = client.vnx.get_lun()
+        self.assertRaises(storops_ex.VNXTargetNotReadyError,
+                          client.migrate_lun,
+                          src_id=4,
+                          dst_id=5)
+        lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
+        mock_sleep.assert_called_with(15)
+
+    @res_mock.patch_client
+    def test_session_finished_faulted(self, client, mocked):
+        lun = client.vnx.get_lun()
+        r = client.session_finished(lun)
+        self.assertTrue(r)
+
+    @res_mock.patch_client
+    def test_session_finished_migrating(self, client, mocked):
+        lun = client.vnx.get_lun()
+        r = client.session_finished(lun)
+        self.assertFalse(r)
+
+    @res_mock.patch_client
+    def test_session_finished_not_existed(self, client, mocked):
+        lun = client.vnx.get_lun()
+        r = client.session_finished(lun)
+        self.assertTrue(r)
+
+    @utils.patch_sleep
+    @res_mock.patch_client
+    def test_migrate_lun_error(self, client, mocked, mock_sleep):
+        lun = client.vnx.get_lun()
+        self.assertRaises(storops_ex.VNXMigrationError,
+                          client.migrate_lun,
+                          src_id=4,
+                          dst_id=5)
+        lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
+        mock_sleep.assert_not_called()
+
+    @res_mock.patch_client
+    def test_verify_migration(self, client, mocked):
+        r = client.verify_migration(1, 2, 'test_wwn')
+        self.assertTrue(r)
+
+    @res_mock.patch_client
+    def test_verify_migration_false(self, client, mocked):
+        r = client.verify_migration(1, 2, 'fake_wwn')
+        self.assertFalse(r)
+
+    @res_mock.patch_client
+    def test_cleanup_migration(self, client, mocked):
+        client.cleanup_migration(1, 2)
+
+    @res_mock.patch_client
+    def test_get_lun_by_name(self, client, mocked):
+        lun = client.get_lun(name='lun_name_test_get_lun_by_name')
+        self.assertEqual(888, lun.lun_id)
+
+    @res_mock.patch_client
+    def test_delete_lun(self, client, mocked):
+        client.delete_lun(mocked['lun'].name)
+
+    @res_mock.patch_client
+    def test_delete_smp(self, client, mocked):
+        client.delete_lun(mocked['lun'].name)
+
+    @res_mock.patch_client
+    def test_delete_lun_not_exist(self, client, mocked):
+        client.delete_lun(mocked['lun'].name)
+
+    @res_mock.patch_client
+    def test_delete_lun_exception(self, client, mocked):
+        self.assertRaisesRegexp(storops_ex.VNXDeleteLunError,
+                                'General lun delete error.',
+                                client.delete_lun, mocked['lun'].name)
+
+    @res_mock.patch_client
+    def test_enable_compression(self, client, mocked):
+        lun_obj = mocked['lun']
+        client.enable_compression(lun_obj)
+        lun_obj.enable_compression.assert_called_with(ignore_thresholds=True)
+
+    @res_mock.patch_client
+    def test_enable_compression_on_compressed_lun(self, client, mocked):
+        lun_obj = mocked['lun']
+        client.enable_compression(lun_obj)
+
+    @res_mock.patch_client
+    def test_get_vnx_enabler_status(self, client, mocked):
+        re = client.get_vnx_enabler_status()
+        self.assertTrue(re.dedup_enabled)
+        self.assertFalse(re.compression_enabled)
+        self.assertTrue(re.thin_enabled)
+        self.assertFalse(re.fast_enabled)
+        self.assertTrue(re.snap_enabled)
+
+    @res_mock.patch_client
+    def test_lun_has_snapshot_true(self, client, mocked):
+        re = client.lun_has_snapshot(mocked['lun'])
+        self.assertTrue(re)
+
+    @res_mock.patch_client
+    def test_lun_has_snapshot_false(self, client, mocked):
+        re = client.lun_has_snapshot(mocked['lun'])
+        self.assertFalse(re)
+
+    @res_mock.patch_client
+    def test_create_cg(self, client, mocked):
+        cg = client.create_consistency_group('cg_name')
+        self.assertIsNotNone(cg)
+
+    @res_mock.patch_client
+    def test_create_cg_already_existed(self, client, mocked):
+        cg = client.create_consistency_group('cg_name_already_existed')
+        self.assertIsNotNone(cg)
+
+    @res_mock.patch_client
+    def test_delete_cg(self, client, mocked):
+        client.delete_consistency_group('deleted_name')
+
+    @res_mock.patch_client
+    def test_delete_cg_not_existed(self, client, mocked):
+        client.delete_consistency_group('not_existed')
+
+    @res_mock.patch_client
+    def test_expand_lun(self, client, _ignore):
+        client.expand_lun('lun', 10, poll=True)
+
+    @res_mock.patch_client
+    def test_expand_lun_not_poll(self, client, _ignore):
+        client.expand_lun('lun', 10, poll=False)
+
+    @res_mock.patch_client
+    def test_expand_lun_already_expanded(self, client, _ignore):
+        client.expand_lun('lun', 10)
+
+    @utils.patch_no_sleep
+    @res_mock.patch_client
+    def test_expand_lun_not_ops_ready(self, client, _ignore):
+        self.assertRaises(storops_ex.VNXLunPreparingError,
+                          client.expand_lun, 'lun', 10)
+        lun = client.vnx.get_lun()
+        lun.expand.assert_called_once_with(10, ignore_thresholds=True)
+        # Called twice
+        lun.expand.assert_called_once_with(10, ignore_thresholds=True)
+
+    @res_mock.patch_client
+    def test_create_snapshot(self, client, _ignore):
+        client.create_snapshot('lun_test_create_snapshot',
+                               'snap_test_create_snapshot')
+
+        lun = client.vnx.get_lun()
+        lun.create_snap.assert_called_once_with('snap_test_create_snapshot',
+                                                allow_rw=True,
+                                                auto_delete=False)
+
+    @res_mock.patch_client
+    def test_create_snapshot_snap_name_exist_error(self, client, _ignore):
+        client.create_snapshot('lun_name', 'snapshot_name')
+
+    @res_mock.patch_client
+    def test_delete_snapshot(self, client, _ignore):
+        client.delete_snapshot('snapshot_name')
+
+    @res_mock.patch_client
+    def test_delete_snapshot_delete_attached_error(self, client, _ignore):
+        self.assertRaises(storops_ex.VNXDeleteAttachedSnapError,
+                          client.delete_snapshot, 'snapshot_name')
+
+    @res_mock.patch_client
+    def test_copy_snapshot(self, client, mocked):
+        client.copy_snapshot('old_name', 'new_name')
+
+    @res_mock.patch_client
+    def test_create_mount_point(self, client, mocked):
+        client.create_mount_point('lun_name', 'smp_name')
+
+    @res_mock.patch_client
+    def test_attach_mount_point(self, client, mocked):
+        client.attach_snapshot('smp_name', 'snap_name')
+
+    @res_mock.patch_client
+    def test_detach_mount_point(self, client, mocked):
+        client.detach_snapshot('smp_name')
+
+    @res_mock.patch_client
+    def test_modify_snapshot(self, client, mocked):
+        client.modify_snapshot('snap_name', True, True)
+
+    @utils.patch_no_sleep
+    @res_mock.patch_client
+    def test_create_cg_snapshot(self, client, mocked):
+        snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
+        self.assertIsNotNone(snap)
+
+    @utils.patch_no_sleep
+    @res_mock.patch_client
+    def test_create_cg_snapshot_already_existed(self, client, mocked):
+        snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
+        self.assertIsNotNone(snap)
+
+    @utils.patch_no_sleep
+    @res_mock.patch_client
+    def test_delete_cg_snapshot(self, client, mocked):
+        client.delete_cg_snapshot(cg_snap_name='test_snap')
+
+    @res_mock.patch_client
+    def test_create_sg(self, client, mocked):
+        client.create_storage_group('sg_name')
+
+    @res_mock.patch_client
+    def test_create_sg_name_in_use(self, client, mocked):
+        self.assertRaisesRegexp(storops_ex.VNXStorageGroupNameInUseError,
+                                'Storage group sg_name already exists. '
+                                'Message: ',
+                                client.create_storage_group('sg_name'))
+
+    @res_mock.patch_client
+    def test_get_storage_group(self, client, mocked):
+        sg = client.get_storage_group('sg_name')
+        self.assertEqual('sg_name', sg.name)
+
+    @res_mock.patch_client
+    def test_register_initiator(self, client, mocked):
+        host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
+        client.register_initiator(mocked['sg'], host,
+                                  {'host_initiator': 'port_1'})
+
+    @res_mock.patch_client
+    def test_register_initiator_exception(self, client, mocked):
+        host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
+        client.register_initiator(mocked['sg'], host,
+                                  {'host_initiator': 'port_1'})
+
+    @res_mock.patch_client
+    def test_ping_node(self, client, mocked):
+        self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip'))
+
+    @res_mock.patch_client
+    def test_ping_node_fail(self, client, mocked):
+        self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip'))
+
+    @res_mock.patch_client
+    def test_add_lun_to_sg(self, client, mocked):
+        lun = 'not_care'
+        self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
+
+    @res_mock.patch_client
+    def test_add_lun_to_sg_alu_already_attached(self, client, mocked):
+        lun = 'not_care'
+        self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
+
+    @res_mock.patch_client
+    def test_add_lun_to_sg_alu_in_use(self, client, mocked):
+        self.assertRaisesRegexp(storops_ex.VNXNoHluAvailableError,
+                                'No HLU available.',
+                                client.add_lun_to_sg,
+                                mocked['sg'],
+                                mocked['lun'],
+                                3)
+
+    @res_mock.patch_client
+    def test_update_consistencygroup_no_lun_in_cg(self, client, mocked):
+        lun_1 = mocked['lun_1']
+        lun_2 = mocked['lun_2']
+
+        def _get_lun(lun_id):
+            return list(filter(
+                lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
+
+        client.get_lun = _get_lun
+        cg = mocked['cg']
+
+        client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], [])
+        cg.replace_member.assert_called_once_with(lun_1, lun_2)
+
+    @res_mock.patch_client
+    def test_update_consistencygroup_lun_in_cg(self, client, mocked):
+        lun_1 = mocked['lun_1']
+        lun_2 = mocked['lun_2']
+
+        def _get_lun(lun_id):
+            return list(filter(
+                lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
+
+        client.get_lun = _get_lun
+        cg = mocked['cg']
+
+        client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id])
+        cg.replace_member.assert_called_once_with(lun_2)
+
+    @res_mock.patch_client
+    def test_update_consistencygroup_remove_all(self, client, mocked):
+        lun_1 = mocked['lun_1']
+
+        def _get_lun(lun_id):
+            return list(filter(lambda x: x.lun_id == lun_id, (lun_1,)))[0]
+
+        client.get_lun = _get_lun
+        cg = mocked['cg']
+
+        client.update_consistencygroup(cg, [], [lun_1.lun_id])
+        cg.delete_member.assert_called_once_with(lun_1)
+
+    @res_mock.patch_client
+    def test_get_available_ip(self, client, mocked):
+        ip = client.get_available_ip()
+        self.assertEqual('192.168.1.5', ip)
+
+    @res_mock.patch_client
+    def test_create_mirror(self, client, mocked):
+        mv = client.create_mirror('test_mirror_name', 11)
+        self.assertIsNotNone(mv)
+
+    @res_mock.patch_client
+    def test_create_mirror_already_created(self, client, mocked):
+        mv = client.create_mirror('error_mirror', 12)
+        self.assertIsNotNone(mv)
+
+    @res_mock.patch_client
+    def test_delete_mirror(self, client, mocked):
+        client.delete_mirror('mirror_name')
+
+    @res_mock.patch_client
+    def test_delete_mirror_already_deleted(self, client, mocked):
+        client.delete_mirror('mirror_name_deleted')
+
+    @res_mock.patch_client
+    def test_add_image(self, client, mocked):
+        client.add_image('mirror_namex', '192.168.1.11', 31)
+
+    @res_mock.patch_client
+    def test_remove_image(self, client, mocked):
+        client.remove_image('mirror_remove')
+
+    @res_mock.patch_client
+    def test_fracture_image(self, client, mocked):
+        client.fracture_image('mirror_fracture')
+
+    @res_mock.patch_client
+    def test_sync_image(self, client, mocked):
+        client.sync_image('mirror_sync')
+
+    @res_mock.patch_client
+    def test_promote_image(self, client, mocked):
+        client.promote_image('mirror_promote')
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_client
+    def test_get_lun_id(self, client, mocked, cinder_input):
+        lun_id = client.get_lun_id(cinder_input['volume'])
+        self.assertEqual(1, lun_id)
+
+    @res_mock.mock_driver_input
+    @res_mock.patch_client
+    def test_get_lun_id_without_provider_location(self, client, mocked,
+                                                  cinder_input):
+        lun_id = client.get_lun_id(cinder_input['volume'])
+        self.assertIsInstance(lun_id, int)
+        self.assertEqual(mocked['lun'].lun_id, lun_id)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py
new file mode 100644
index 00000000000..47483833bfe
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_common.py
@@ -0,0 +1,297 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+from cinder.volume.drivers.emc.vnx import client
+from cinder.volume.drivers.emc.vnx import common
+
+
+class TestExtraSpecs(test.TestCase):
+    def test_valid_extra_spec(self):
+        extra_spec = {
+            'provisioning:type': 'deduplicated',
+            'storagetype:tiering': 'nomovement',
+        }
+        spec_obj = common.ExtraSpecs(extra_spec)
+        self.assertEqual(storops.VNXProvisionEnum.DEDUPED,
+                         spec_obj.provision)
+        self.assertEqual(storops.VNXTieringEnum.NO_MOVE,
+                         spec_obj.tier)
+
+    def test_extra_spec_case_insensitive(self):
+        extra_spec = {
+            'provisioning:type': 'Thin',
+            'storagetype:tiering': 'StartHighThenAuto',
+        }
+        spec_obj = common.ExtraSpecs(extra_spec)
+        self.assertEqual(storops.VNXProvisionEnum.THIN,
+                         spec_obj.provision)
+        self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO,
+                         spec_obj.tier)
+
+    def test_empty_extra_spec(self):
+        extra_spec = {}
+        common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK,
+                                       storops.VNXTieringEnum.HIGH_AUTO)
+        spec_obj = common.ExtraSpecs(extra_spec)
+        self.assertEqual(storops.VNXProvisionEnum.THICK, spec_obj.provision)
+        self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, spec_obj.tier)
+
+    def test_invalid_provision(self):
+        extra_spec = {
+            'provisioning:type': 'invalid',
+        }
+        self.assertRaises(exception.InvalidVolumeType,
+                          common.ExtraSpecs,
+                          extra_spec)
+
+    def test_invalid_tiering(self):
+        extra_spec = {
+            'storagetype:tiering': 'invalid',
+        }
+        self.assertRaises(exception.InvalidVolumeType,
+                          common.ExtraSpecs,
+                          extra_spec)
+
+    def test_validate_extra_spec_dedup_and_tier_failed(self):
+        spec_obj = common.ExtraSpecs({
+            'storagetype:pool': 'fake_pool',
+            'provisioning:type': 'deduplicated',
+            'storagetype:tiering': 'auto',
+        })
+        enabler_status = common.VNXEnablerStatus(
+            dedup=True, fast=True, thin=True)
+        self.assertRaises(exception.InvalidVolumeType,
+                          spec_obj.validate,
+                          enabler_status)
+
+    def test_tier_is_not_set_to_default_for_dedup_provision(self):
+        common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK,
+                                       storops.VNXTieringEnum.HIGH_AUTO)
+        spec_obj = common.ExtraSpecs({'provisioning:type': 'deduplicated'})
+        self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec_obj.provision)
+        self.assertIsNone(spec_obj.tier)
+
+    def test_validate_extra_spec_is_valid(self):
+        spec_obj = common.ExtraSpecs({
+            'storagetype:pool': 'fake_pool',
+            'provisioning:type': 'thin',
+            'storagetype:tiering': 'auto',
+        })
+        enabler_status = common.VNXEnablerStatus(
+            dedup=True, fast=True, thin=True)
+        re = spec_obj.validate(enabler_status)
+        self.assertTrue(re)
+
+    def test_validate_extra_spec_dedup_invalid(self):
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'deduplicated',
+        })
+        enabler_status = common.VNXEnablerStatus(dedup=False)
+        self.assertRaises(exception.InvalidVolumeType,
+                          spec_obj.validate,
+                          enabler_status)
+
+    def test_validate_extra_spec_compress_invalid(self):
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'compressed',
+        })
+        enabler_status = common.VNXEnablerStatus(compression=False)
+        self.assertRaises(exception.InvalidVolumeType,
+                          spec_obj.validate,
+                          enabler_status)
+
+    def test_validate_extra_spec_no_thin_invalid(self):
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'compressed',
+        })
+        enabler_status = common.VNXEnablerStatus(compression=True, thin=False)
+        self.assertRaises(exception.InvalidVolumeType,
+                          spec_obj.validate,
+                          enabler_status)
+
+    def test_validate_extra_spec_tier_invalid(self):
+        spec_obj = common.ExtraSpecs({
+            'storagetype:tiering': 'auto',
+        })
+        enabler_status = common.VNXEnablerStatus(
+            dedup=True, fast=False, compression=True, snap=True, thin=True)
+        self.assertRaises(exception.InvalidVolumeType,
+                          spec_obj.validate,
+                          enabler_status)
+
+    def test_get_raw_data(self):
+        spec_obj = common.ExtraSpecs({'key1': 'value1'})
+        self.assertTrue('key1' in spec_obj)
+        self.assertFalse('key2' in spec_obj)
+        self.assertEqual('value1', spec_obj['key1'])
+
+    @res_mock.mock_storage_resources
+    def test_generate_extra_specs_from_lun(self, mocked_res):
+        lun = mocked_res['lun']
+        spec = common.ExtraSpecs.from_lun(lun)
+        self.assertEqual(storops.VNXProvisionEnum.COMPRESSED, spec.provision)
+        self.assertEqual(storops.VNXTieringEnum.HIGH, spec.tier)
+
+        lun = mocked_res['deduped_lun']
+        spec = common.ExtraSpecs.from_lun(lun)
+        self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec.provision)
+        self.assertIsNone(spec.tier)
+
+    @res_mock.mock_storage_resources
+    def test_extra_specs_match_with_lun(self, mocked_res):
+        lun = mocked_res['lun']
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'thin',
+            'storagetype:tiering': 'nomovement',
+        })
+        self.assertTrue(spec_obj.match_with_lun(lun))
+
+        lun = mocked_res['deduped_lun']
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'deduplicated',
+        })
+        self.assertTrue(spec_obj.match_with_lun(lun))
+
+    @res_mock.mock_storage_resources
+    def test_extra_specs_not_match_with_lun(self, mocked_res):
+        lun = mocked_res['lun']
+        spec_obj = common.ExtraSpecs({
+            'provisioning:type': 'thick',
+            'storagetype:tiering': 'nomovement',
+        })
+        self.assertFalse(spec_obj.match_with_lun(lun))
+
+
+class FakeConfiguration(object):
+    def __init__(self):
+        self.replication_device = []
+
+
+class TestReplicationDeviceList(test.TestCase):
+    def setUp(self):
+        super(TestReplicationDeviceList, self).setUp()
+        self.configuration = FakeConfiguration()
+        replication_devices = []
+        device = {'backend_id': 'array_id_1',
+                  'san_ip': '192.168.1.1',
+                  'san_login': 'admin',
+                  'san_password': 'admin',
+                  'storage_vnx_authentication_type': 'global',
+                  'storage_vnx_security_file_dir': '/home/stack/'}
+        replication_devices.append(device)
+        self.configuration.replication_device = replication_devices
+
+    def test_get_device(self):
+        devices_list = common.ReplicationDeviceList(self.configuration)
+        device = devices_list.get_device('array_id_1')
+        self.assertIsNotNone(device)
+        self.assertEqual('192.168.1.1', device.san_ip)
+        self.assertEqual('admin', device.san_login)
+        self.assertEqual('admin', device.san_password)
+        self.assertEqual('global', device.storage_vnx_authentication_type)
+        self.assertEqual('/home/stack/', device.storage_vnx_security_file_dir)
+
+    def test_get_device_not_found(self):
+        devices_list = common.ReplicationDeviceList(self.configuration)
+        device = devices_list.get_device('array_id_not_existed')
+        self.assertIsNone(device)
+
+    def test_devices(self):
+        devices_list = common.ReplicationDeviceList(self.configuration)
+        self.assertEqual(1, len(devices_list.devices))
+        self.assertEqual(1, len(devices_list))
+        self.assertIsNotNone(devices_list[0])
+
+
+class TestVNXMirrorView(test.TestCase):
+    def setUp(self):
+        super(TestVNXMirrorView, self).setUp()
+        self.primary_client = mock.create_autospec(client.Client)
+        self.secondary_client = mock.create_autospec(client.Client)
+        self.mirror_view = common.VNXMirrorView(
+            self.primary_client, self.secondary_client)
+
+    def test_create_mirror(self):
+        self.mirror_view.create_mirror('mirror_test', 11)
+        self.primary_client.create_mirror.assert_called_once_with(
+            'mirror_test', 11)
+
+    def test_create_secondary_lun(self):
+        self.mirror_view.create_secondary_lun('pool_name', 'lun_name',
+                                              10, 'thick', 'auto')
+        self.secondary_client.create_lun.assert_called_once_with(
+            'pool_name', 'lun_name', 10, 'thick', 'auto')
+
+    def test_delete_secondary_lun(self):
+        self.mirror_view.delete_secondary_lun('lun_name')
+        self.secondary_client.delete_lun.assert_called_once_with('lun_name')
+
+    def test_delete_mirror(self):
+        self.mirror_view.delete_mirror('mirror_name')
+        self.primary_client.delete_mirror.assert_called_once_with(
+            'mirror_name')
+
+    def test_add_image(self):
+        self.secondary_client.get_available_ip.return_value = '192.168.1.2'
+        self.mirror_view.add_image('mirror_name', 111)
+        self.secondary_client.get_available_ip.assert_called_once_with()
+        self.primary_client.add_image.assert_called_once_with(
+            'mirror_name', '192.168.1.2', 111)
+
+    def test_remove_image(self):
+        self.mirror_view.remove_image('mirror_remove')
+        self.primary_client.remove_image.assert_called_once_with(
+            'mirror_remove')
+
+    def test_fracture_image(self):
+        self.mirror_view.fracture_image('mirror_fracture')
+        self.primary_client.fracture_image.assert_called_once_with(
+            'mirror_fracture')
+
+    def test_promote_image(self):
+        self.mirror_view.promote_image('mirror_promote')
+        self.secondary_client.promote_image.assert_called_once_with(
+            'mirror_promote')
+
+    def test_destroy_mirror(self):
+        mv = mock.Mock()
+        mv.existed = True
+        self.primary_client.get_mirror.return_value = mv
+        self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name')
+        self.primary_client.get_mirror.assert_called_once_with(
+            'mirror_name')
+        self.primary_client.fracture_image.assert_called_once_with(
+            'mirror_name')
+        self.primary_client.remove_image.assert_called_once_with(
+            'mirror_name')
+        self.primary_client.delete_mirror.assert_called_once_with(
+            'mirror_name')
+        self.secondary_client.delete_lun.assert_called_once_with(
+            'sec_lun_name')
+
+    def test_destroy_mirror_not_existed(self):
+        mv = mock.Mock()
+        mv.existed = False
+        self.primary_client.get_mirror.return_value = mv
+        self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name')
+        self.primary_client.get_mirror.assert_called_once_with(
+            'mirror_name')
+        self.assertFalse(self.primary_client.fracture_image.called)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py
new file mode 100644
index 00000000000..53dc1df4e55
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_driver.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.emc.vnx import driver
+
+
+class TestEMCVNXDriver(test.TestCase):
+    def setUp(self):
+        super(TestEMCVNXDriver, self).setUp()
+        self.configuration = conf.Configuration(None)
+        self.fc_adapter_patcher = mock.patch(
+            'cinder.volume.drivers.emc.vnx.adapter.FCAdapter',
+            autospec=True)
+        self.fc_adapter_patcher.start()
+        self.iscsi_adapter_patcher = mock.patch(
+            'cinder.volume.drivers.emc.vnx.adapter.ISCSIAdapter',
+            autospec=True)
+        self.iscsi_adapter_patcher.start()
+        self.driver = None
+        self.addCleanup(self.fc_adapter_patcher.stop)
+        self.addCleanup(self.iscsi_adapter_patcher.stop)
+
+    def _get_driver(self, protocol):
+        self.configuration.storage_protocol = protocol
+        drv = driver.EMCVNXDriver(configuration=self.configuration,
+                                  active_backend_id=None)
+        drv.do_setup(None)
+        return drv
+
+    def test_init_iscsi_driver(self):
+        _driver = self._get_driver('iscsi')
+        driver_name = str(_driver.adapter)
+        self.assertIn('ISCSIAdapter', driver_name)
+
+    def test_init_fc_driver(self):
+        _driver = self._get_driver('FC')
+        driver_name = str(_driver.adapter)
+        self.assertIn('FCAdapter', driver_name)
+
+    def test_create_volume(self):
+        _driver = self._get_driver('iscsi')
+        _driver.create_volume('fake_volume')
+        _driver.adapter.create_volume.assert_called_once_with('fake_volume')
+
+    def test_initialize_connection(self):
+        _driver = self._get_driver('iscsi')
+        _driver.initialize_connection('fake_volume', {'host': 'fake_host'})
+        _driver.adapter.initialize_connection.assert_called_once_with(
+            'fake_volume', {'host': 'fake_host'})
+
+    def test_terminate_connection(self):
+        _driver = self._get_driver('iscsi')
+        _driver.terminate_connection('fake_volume', {'host': 'fake_host'})
+        _driver.adapter.terminate_connection.assert_called_once_with(
+            'fake_volume', {'host': 'fake_host'})
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.py
new file mode 100644
index 00000000000..3f7231abffb
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.py
@@ -0,0 +1,90 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.emc.vnx import utils
+
+
+class TestResMock(test.TestCase):
+    def setUp(self):
+        super(TestResMock, self).setUp()
+
+    def tearDown(self):
+        super(TestResMock, self).tearDown()
+
+    def test_load_cinder_resource(self):
+        cinder_res = res_mock.CinderResourceMock('mocked_cinder.yaml')
+
+        volume = cinder_res['test_mock_driver_input_inner']['volume']
+
+        items = ['base_lun_name^test', 'version^07.00.00', 'type^lun',
+                 'system^fake_serial', 'id^1']
+        self.assertEqual(sorted(items),
+                         sorted(volume.provider_location.split('|')))
+
+    def test_mock_driver_input(self):
+        @res_mock.mock_driver_input
+        def test_mock_driver_input_inner(self, mocked_input):
+            items = ['base_lun_name^test', 'version^07.00.00', 'type^lun',
+                     'system^fake_serial', 'id^1']
+            mocked_items = mocked_input['volume'].provider_location.split('|')
+            self.assertEqual(sorted(items),
+                             sorted(mocked_items))
+
+        test_mock_driver_input_inner(self)
+
+    def test_load_storage_resource(self):
+        vnx_res = res_mock.StorageResourceMock('test_res_mock.yaml')
+        lun = vnx_res['test_load_storage_resource']['lun']
+        pool = vnx_res['test_load_storage_resource']['pool']
+        created_lun = pool.create_lun()
+        self.assertEqual(lun.lun_id, created_lun.lun_id)
+        self.assertEqual(lun.poll, created_lun.poll)
+        self.assertEqual(lun.state, created_lun.state)
+
+    def test_patch_client(self):
+        @res_mock.patch_client
+        def test_patch_client_inner(self, patched_client, mocked):
+            vnx = patched_client.vnx
+            self.assertEqual('fake_serial', vnx.serial)
+
+            pool = vnx.get_pool()
+            self.assertEqual('pool_name', pool.name)
+
+        test_patch_client_inner(self)
+
+    def test_patch_client_mocked(self):
+        @res_mock.patch_client
+        def test_patch_client_mocked_inner(self, patched_client, mocked):
+            lun = mocked['lun']
+            self.assertEqual('Offline', lun.state)
+
+        test_patch_client_mocked_inner(self)
+
+    def test_patch_adapter_common(self):
+        self.configuration = conf.Configuration(None)
+        utils.init_ops(self.configuration)
+        self.configuration.san_ip = '192.168.1.1'
+        self.configuration.storage_vnx_authentication_type = 'global'
+        self.configuration.storage_vnx_pool_names = 'pool1,unit_test_pool'
+
+        @res_mock.patch_common_adapter
+        def test_patch_common_adapter_inner(self, patched_adapter, mocked):
+            pool = patched_adapter.client.vnx.get_pool()
+            self.assertEqual('pool_name', pool.name)
+
+        test_patch_common_adapter_inner(self)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.yaml b/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.yaml
new file mode 100644
index 00000000000..f835ed0f30d
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_res_mock.yaml
@@ -0,0 +1,59 @@
+#################################################
+# Storage resource
+#################################################
+
+# Common
+lun_base:
+  _properties: &lun_base_prop
+    lun_id: lun_id
+    poll: False
+    operation: None
+    state: Ready
+
+pool_base:
+  _properties: &pool_base_prop
+    name: pool_name
+    pool_id: 0
+    state: Ready
+    user_capacity_gbs: 1311
+    total_subscribed_capacity_gbs: 131
+    available_capacity_gbs: 132
+    percent_full_threshold: 70
+    fast_cache: True
+
+vnx_base:
+  _properties: &vnx_base_prop
+    serial: fake_serial
+
+test_load_storage_resource: &test_load_storage_resource
+  lun: &lun1
+    _properties:
+      <<: *lun_base_prop
+      state: Offline
+    _methods:
+      update:
+
+  pool: &pool1
+    _properties:
+      <<: *pool_base_prop
+    _methods:
+      create_lun: *lun1
+
+  vnx:
+    _properties:
+      <<: *vnx_base_prop
+    _methods:
+      get_pool: *pool1
+
+test_patch_client_inner: *test_load_storage_resource
+
+test_patch_client_mocked_inner: *test_load_storage_resource
+
+test_patch_common_adapter_inner: *test_load_storage_resource
+
+test_property_side_effect_inner:
+  lun:
+    _properties:
+      <<: *lun_base_prop
+      total_capacity_gb:
+        _side_effect: [5, 10]
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py
new file mode 100644
index 00000000000..74cd18f26c6
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_taskflows.py
@@ -0,0 +1,181 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow.types import failure
+
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as vnx_ex
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+import cinder.volume.drivers.emc.vnx.taskflows as vnx_taskflow
+
+
+class TestTaskflow(test.TestCase):
+    def setUp(self):
+        super(TestTaskflow, self).setUp()
+        self.work_flow = linear_flow.Flow('test_task')
+
+    @res_mock.patch_client
+    def test_copy_snapshot_task(self, client, mocked):
+        store_spec = {'client': client,
+                      'snap_name': 'original_name',
+                      'new_snap_name': 'new_name'
+                      }
+        self.work_flow.add(vnx_taskflow.CopySnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+
+    @res_mock.patch_client
+    def test_copy_snapshot_task_revert(self, client, mocked):
+        store_spec = {'client': client,
+                      'snap_name': 'original_name',
+                      'new_snap_name': 'new_name'
+                      }
+        self.work_flow.add(vnx_taskflow.CopySnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXSnapError,
+                          engine.run)
+
+    @res_mock.patch_client
+    def test_create_smp_task(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'smp_name': 'mount_point_name',
+            'base_lun_name': 'base_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateSMPTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+        smp_id = engine.storage.fetch('smp_id')
+        self.assertEqual(15, smp_id)
+
+    @res_mock.patch_client
+    def test_create_smp_task_revert(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'smp_name': 'mount_point_name',
+            'base_lun_name': 'base_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateSMPTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXCreateLunError,
+                          engine.run)
+        smp_id = engine.storage.fetch('smp_id')
+        self.assertIsInstance(smp_id, failure.Failure)
+
+    @res_mock.patch_client
+    def test_attach_snap_task(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'smp_name': 'mount_point_name',
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.AttachSnapTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+
+    @res_mock.patch_client
+    def test_attach_snap_task_revert(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'smp_name': 'mount_point_name',
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.AttachSnapTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXAttachSnapError,
+                          engine.run)
+
+    @res_mock.patch_client
+    def test_create_snapshot_task(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'lun_id': 12,
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+
+    @res_mock.patch_client
+    def test_create_snapshot_task_revert(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'lun_id': 13,
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXCreateSnapError,
+                          engine.run)
+
+    @res_mock.patch_client
+    def test_allow_read_write_task(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+
+    @res_mock.patch_client
+    def test_allow_read_write_task_revert(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'snap_name': 'snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXSnapError,
+                          engine.run)
+
+    @res_mock.patch_client
+    def test_create_cg_snapshot_task(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'cg_name': 'test_cg',
+            'cg_snap_name': 'my_snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        engine.run()
+        snap_name = engine.storage.fetch('new_cg_snap_name')
+        self.assertTrue(isinstance(snap_name, res_mock.StorageObjectMock))
+
+    @res_mock.patch_client
+    def test_create_cg_snapshot_task_revert(self, client, mocked):
+        store_spec = {
+            'client': client,
+            'cg_name': 'test_cg',
+            'cg_snap_name': 'my_snap_name'
+        }
+        self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
+        engine = taskflow.engines.load(self.work_flow,
+                                       store=store_spec)
+        self.assertRaises(vnx_ex.VNXCreateSnapError,
+                          engine.run)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py b/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py
new file mode 100644
index 00000000000..7f4aa08fb97
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/test_utils.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
+    as storops_ex
+from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
+from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
+from cinder.tests.unit.volume.drivers.emc.vnx import utils as ut_utils
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import utils
+
+
+class TestUtils(test.TestCase):
+    def setUp(self):
+        super(TestUtils, self).setUp()
+        self.origin_timeout = common.DEFAULT_TIMEOUT
+        common.DEFAULT_TIMEOUT = 0.05
+
+    def tearDown(self):
+        super(TestUtils, self).tearDown()
+        common.DEFAULT_TIMEOUT = self.origin_timeout
+
+    @ut_utils.patch_looping_call
+    def test_wait_until(self):
+        mock_testmethod = mock.Mock(side_effect=[False, True])
+        utils.wait_until(mock_testmethod)
+        mock_testmethod.assert_has_calls([mock.call(), mock.call()])
+
+    @ut_utils.patch_looping_call
+    def test_wait_until_with_exception(self):
+        mock_testmethod = mock.Mock(side_effect=[
+            False, storops_ex.VNXAttachSnapError('Unknown error')])
+        mock_testmethod.__name__ = 'test_method'
+        self.assertRaises(storops_ex.VNXAttachSnapError,
+                          utils.wait_until,
+                          mock_testmethod,
+                          timeout=20,
+                          reraise_arbiter=(
+                              lambda ex: not isinstance(
+                                  ex, storops_ex.VNXCreateLunError)))
+        mock_testmethod.assert_has_calls([mock.call(), mock.call()])
+
+    @ut_utils.patch_looping_call
+    def test_wait_until_with_params(self):
+        mock_testmethod = mock.Mock(side_effect=[False, True])
+        mock_testmethod.__name__ = 'test_method'
+        utils.wait_until(mock_testmethod,
+                         param1=1,
+                         param2='test')
+        mock_testmethod.assert_has_calls(
+            [mock.call(param1=1, param2='test'),
+             mock.call(param1=1, param2='test')])
+
+    @res_mock.mock_driver_input
+    def test_retype_need_migration_when_host_changed(self, driver_in):
+        volume = driver_in['volume']
+        another_host = driver_in['host']
+        re = utils.retype_need_migration(
+            volume, None, None, another_host)
+        self.assertTrue(re)
+
+    @res_mock.mock_driver_input
+    def test_retype_need_migration_for_smp_volume(self, driver_in):
+        volume = driver_in['volume']
+        host = driver_in['host']
+        re = utils.retype_need_migration(
+            volume, None, None, host)
+        self.assertTrue(re)
+
+    @res_mock.mock_driver_input
+    def test_retype_need_migration_when_provision_changed(
+            self, driver_in):
+        volume = driver_in['volume']
+        host = driver_in['host']
+        old_spec = common.ExtraSpecs({'provisioning:type': 'thin'})
+        new_spec = common.ExtraSpecs({'provisioning:type': 'deduplicated'})
+        re = utils.retype_need_migration(
+            volume, old_spec.provision, new_spec.provision, host)
+        self.assertTrue(re)
+
+    @res_mock.mock_driver_input
+    def test_retype_not_need_migration_when_provision_changed(
+            self, driver_in):
+        volume = driver_in['volume']
+        host = driver_in['host']
+        old_spec = common.ExtraSpecs({'provisioning:type': 'thick'})
+        new_spec = common.ExtraSpecs({'provisioning:type': 'compressed'})
+        re = utils.retype_need_migration(
+            volume, old_spec.provision, new_spec.provision, host)
+        self.assertFalse(re)
+
+    @res_mock.mock_driver_input
+    def test_retype_not_need_migration(self, driver_in):
+        volume = driver_in['volume']
+        host = driver_in['host']
+        old_spec = common.ExtraSpecs({'storagetype:tiering': 'auto'})
+        new_spec = common.ExtraSpecs(
+            {'storagetype:tiering': 'starthighthenauto'})
+        re = utils.retype_need_migration(
+            volume, old_spec.provision, new_spec.provision, host)
+        self.assertFalse(re)
+
+    def test_retype_need_change_tier(self):
+        re = utils.retype_need_change_tier(
+            storops.VNXTieringEnum.AUTO, storops.VNXTieringEnum.HIGH_AUTO)
+        self.assertTrue(re)
+
+    def test_retype_need_turn_on_compression(self):
+        re = utils.retype_need_turn_on_compression(
+            storops.VNXProvisionEnum.THIN,
+            storops.VNXProvisionEnum.COMPRESSED)
+        self.assertTrue(re)
+        re = utils.retype_need_turn_on_compression(
+            storops.VNXProvisionEnum.THICK,
+            storops.VNXProvisionEnum.COMPRESSED)
+        self.assertTrue(re)
+
+    def test_retype_not_need_turn_on_compression(self):
+        re = utils.retype_need_turn_on_compression(
+            storops.VNXProvisionEnum.DEDUPED,
+            storops.VNXProvisionEnum.COMPRESSED)
+        self.assertFalse(re)
+        re = utils.retype_need_turn_on_compression(
+            storops.VNXProvisionEnum.DEDUPED,
+            storops.VNXProvisionEnum.COMPRESSED)
+        self.assertFalse(re)
+
+    @ut_utils.patch_extra_specs({'provisioning:type': 'compressed'})
+    @res_mock.mock_driver_input
+    def test_validate_cg_type(self, mocked_input):
+        cg = mocked_input['cg']
+        self.assertRaises(exception.InvalidInput,
+                          utils.validate_cg_type,
+                          cg)
+
+    @res_mock.mock_driver_input
+    def test_get_base_lun_name(self, mocked):
+        volume = mocked['volume']
+        self.assertEqual(
+            'test',
+            utils.get_base_lun_name(volume))
+
+    def test_convert_to_tgt_list_and_itor_tgt_map(self):
+        zone_mapping = {
+            'san_1': {'initiator_port_wwn_list':
+                      ['wwn1_1'],
+                      'target_port_wwn_list':
+                      ['wwnt_1', 'wwnt_2']},
+            'san_2': {'initiator_port_wwn_list':
+                      ['wwn2_1', 'wwn2_2'],
+                      'target_port_wwn_list':
+                      ['wwnt_1', 'wwnt_3']},
+        }
+
+        tgt_wwns, itor_tgt_map = (
+            utils.convert_to_tgt_list_and_itor_tgt_map(zone_mapping))
+        self.assertEqual(set(['wwnt_1', 'wwnt_2', 'wwnt_3']), set(tgt_wwns))
+        self.assertEqual({'wwn1_1': ['wwnt_1', 'wwnt_2'],
+                          'wwn2_1': ['wwnt_1', 'wwnt_3'],
+                          'wwn2_2': ['wwnt_1', 'wwnt_3']},
+                         itor_tgt_map)
diff --git a/cinder/tests/unit/volume/drivers/emc/vnx/utils.py b/cinder/tests/unit/volume/drivers/emc/vnx/utils.py
new file mode 100644
index 00000000000..508d9b75413
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/emc/vnx/utils.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from os import path
+
+import mock
+import six
+import yaml
+
+from cinder.tests.unit import utils
+from cinder.volume.drivers.emc.vnx import client
+from cinder.volume.drivers.emc.vnx import common
+
+
+patch_looping_call = mock.patch(
+    'oslo_service.loopingcall.FixedIntervalLoopingCall',
+    new=utils.ZeroIntervalLoopingCall)
+
+patch_sleep = mock.patch('time.sleep')
+
+
+patch_vnxsystem = mock.patch('storops.VNXSystem')
+
+
+patch_no_sleep = mock.patch('time.sleep', new=lambda x: None)
+
+
+def load_yaml(file_name):
+    yaml_file = '{}/{}'.format(path.dirname(
+        path.abspath(__file__)), file_name)
+    with open(yaml_file) as f:
+        res = yaml.load(f)
+    return res
+
+
+def patch_extra_specs(specs):
+    return _build_patch_decorator(
+        'cinder.volume.volume_types.get_volume_type_extra_specs',
+        return_value=specs)
+
+
+def patch_extra_specs_validate(return_value=None, side_effect=None):
+    return _build_patch_decorator(
+        'cinder.volume.drivers.emc.vnx.common.ExtraSpecs.validate',
+        return_value=return_value,
+        side_effect=side_effect)
+
+
+def _build_patch_decorator(module_str, return_value=None, side_effect=None):
+    def _inner_mock(func):
+        @six.wraps(func)
+        def decorator(*args, **kwargs):
+            with mock.patch(
+                    module_str,
+                    return_value=return_value,
+                    side_effect=side_effect):
+                return func(*args, **kwargs)
+        return decorator
+    return _inner_mock
+
+
+def build_fake_mirror_view():
+    primary_client = mock.create_autospec(spec=client.Client)
+    secondary_client = mock.create_autospec(spec=client.Client)
+
+    mirror_view = mock.create_autospec(spec=common.VNXMirrorView)
+    mirror_view.primary_client = primary_client
+    mirror_view.secondary_client = secondary_client
+    return mirror_view
+
+
+def get_replication_device():
+    return {
+        'backend_id': 'fake_serial',
+        'san_ip': '192.168.1.12',
+        'san_login': 'admin',
+        'san_password': 'admin',
+        'storage_vnx_authentication_type': 'global',
+        'storage_vnx_security_file_dir': None,
+    }
diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py
index 0c1ba69526b..44383e78d00 100644
--- a/cinder/volume/driver.py
+++ b/cinder/volume/driver.py
@@ -252,6 +252,12 @@ volume_opts = [
                      'discard (aka. trim/unmap). This will not actually '
                      'change the behavior of the backend or the client '
                      'directly, it will only notify that it can be used.'),
+    cfg.StrOpt('storage_protocol',
+               ignore_case=True,
+               default='iscsi',
+               choices=['iscsi', 'fc'],
+               help='Protocol for transferring data between host and '
+                    'storage back-end.'),
 ]
 
 # for backward compatibility
diff --git a/cinder/volume/drivers/emc/emc_cli_iscsi.py b/cinder/volume/drivers/emc/emc_cli_iscsi.py
deleted file mode 100644
index fd0d67261a0..00000000000
--- a/cinder/volume/drivers/emc/emc_cli_iscsi.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""iSCSI Drivers for EMC VNX array based on CLI."""
-
-from oslo_log import log as logging
-
-from cinder import interface
-from cinder.volume import driver
-from cinder.volume.drivers.emc import emc_vnx_cli
-
-LOG = logging.getLogger(__name__)
-
-
-@interface.volumedriver
-class EMCCLIISCSIDriver(driver.ISCSIDriver):
-    """EMC ISCSI Drivers for VNX using CLI.
-
-    Version history:
-
-    .. code-block:: none
-
-        1.0.0 - Initial driver
-        2.0.0 - Thick/thin provisioning, robust enhancement
-        3.0.0 - Array-based Backend Support, FC Basic Support,
-                Target Port Selection for MPIO,
-                Initiator Auto Registration,
-                Storage Group Auto Deletion,
-                Multiple Authentication Type Support,
-                Storage-Assisted Volume Migration,
-                SP Toggle for HA
-        3.0.1 - Security File Support
-        4.0.0 - Advance LUN Features (Compression Support,
-                Deduplication Support, FAST VP Support,
-                FAST Cache Support), Storage-assisted Retype,
-                External Volume Management, Read-only Volume,
-                FC Auto Zoning
-        4.1.0 - Consistency group support
-        5.0.0 - Performance enhancement, LUN Number Threshold Support,
-                Initiator Auto Deregistration,
-                Force Deleting LUN in Storage Groups,
-                robust enhancement
-        5.1.0 - iSCSI multipath enhancement
-        5.2.0 - Pool-aware scheduler support
-        5.3.0 - Consistency group modification support
-        6.0.0 - Over subscription support
-                Create consistency group from cgsnapshot support
-                Multiple pools support enhancement
-                Manage/unmanage volume revise
-                White list target ports support
-                Snap copy support
-                Support efficient non-disruptive backup
-        7.0.0 - Clone consistency group support
-                Replication v2 support(managed)
-                Configurable migration rate support
-    """
-
-    def __init__(self, *args, **kwargs):
-        super(EMCCLIISCSIDriver, self).__init__(*args, **kwargs)
-        self.cli = emc_vnx_cli.getEMCVnxCli(
-            'iSCSI',
-            configuration=self.configuration,
-            active_backend_id=kwargs.get('active_backend_id'))
-        self.VERSION = self.cli.VERSION
-
-    def check_for_setup_error(self):
-        pass
-
-    def create_volume(self, volume):
-        """Creates a VNX volume."""
-        return self.cli.create_volume(volume)
-
-    def create_volume_from_snapshot(self, volume, snapshot):
-        """Creates a volume from a snapshot."""
-        return self.cli.create_volume_from_snapshot(volume, snapshot)
-
-    def create_cloned_volume(self, volume, src_vref):
-        """Creates a cloned volume."""
-        return self.cli.create_cloned_volume(volume, src_vref)
-
-    def extend_volume(self, volume, new_size):
-        """Extend a volume."""
-        self.cli.extend_volume(volume, new_size)
-
-    def delete_volume(self, volume):
-        """Deletes a VNX volume."""
-        self.cli.delete_volume(volume)
-
-    def migrate_volume(self, ctxt, volume, host):
-        return self.cli.migrate_volume(ctxt, volume, host)
-
-    def retype(self, ctxt, volume, new_type, diff, host):
-        """Convert the volume to be of the new type."""
-        return self.cli.retype(ctxt, volume, new_type, diff, host)
-
-    def create_snapshot(self, snapshot):
-        """Creates a snapshot."""
-        self.cli.create_snapshot(snapshot)
-
-    def delete_snapshot(self, snapshot):
-        """Deletes a snapshot."""
-        self.cli.delete_snapshot(snapshot)
-
-    def ensure_export(self, context, volume):
-        """Driver entry point to get the export info for an existing volume."""
-        pass
-
-    def create_export(self, context, volume, connector):
-        """Driver entry point to get the export info for a new volume."""
-        pass
-
-    def remove_export(self, context, volume):
-        """Driver entry point to remove an export for a volume."""
-        pass
-
-    def check_for_export(self, context, volume_id):
-        """Make sure volume is exported."""
-        pass
-
-    def initialize_connection(self, volume, connector):
-        """Initializes the connection and returns connection info.
-
-        The iscsi driver returns a driver_volume_type of 'iscsi'.
-        the format of the driver data is defined in vnx_get_iscsi_properties.
-        Example return value (multipath is not enabled)::
-
-            {
-                'driver_volume_type': 'iscsi'
-                'data': {
-                    'target_discovered': True,
-                    'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
-                    'target_portal': '127.0.0.0.1:3260',
-                    'target_lun': 1,
-                }
-            }
-
-        Example return value (multipath is enabled)::
-
-            {
-                'driver_volume_type': 'iscsi'
-                'data': {
-                    'target_discovered': True,
-                    'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
-                                    'iqn.2010-10.org.openstack:volume-00002'],
-                    'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
-                    'target_luns': [1, 1],
-                }
-            }
-
-        """
-        return self.cli.initialize_connection(volume, connector)
-
-    def terminate_connection(self, volume, connector, **kwargs):
-        """Disallow connection from connector."""
-        self.cli.terminate_connection(volume, connector)
-
-    def get_volume_stats(self, refresh=False):
-        """Get volume status.
-
-        If 'refresh' is True, run update the stats first.
-        """
-        if refresh:
-            self.update_volume_stats()
-
-        return self._stats
-
-    def update_volume_stats(self):
-        """Retrieve status info from volume group."""
-        LOG.debug("Updating volume status.")
-        # retrieving the volume update from the VNX
-        data = self.cli.update_volume_stats()
-
-        backend_name = self.configuration.safe_get('volume_backend_name')
-        data['volume_backend_name'] = backend_name or 'EMCCLIISCSIDriver'
-        data['storage_protocol'] = 'iSCSI'
-
-        self._stats = data
-
-    def manage_existing(self, volume, existing_ref):
-        """Manage an existing lun in the array.
-
-        The lun should be in a manageable pool backend, otherwise
-        error would return.
-        Rename the backend storage object so that it matches the,
-        volume['name'] which is how drivers traditionally map between a
-        cinder volume and the associated backend storage object.
-
-        .. code-block:: none
-
-            manage_existing_ref:{
-                'source-id':<lun id in VNX>
-            }
-
-            or
-
-            manage_existing_ref:{
-                'source-name':<lun name in VNX>
-            }
-
-        """
-        return self.cli.manage_existing(volume, existing_ref)
-
-    def manage_existing_get_size(self, volume, existing_ref):
-        """Return size of volume to be managed by manage_existing."""
-        return self.cli.manage_existing_get_size(volume, existing_ref)
-
-    def create_consistencygroup(self, context, group):
-        """Creates a consistencygroup."""
-        return self.cli.create_consistencygroup(context, group)
-
-    def delete_consistencygroup(self, context, group, volumes):
-        """Deletes a consistency group."""
-        return self.cli.delete_consistencygroup(
-            context, group, volumes)
-
-    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
-        """Creates a cgsnapshot."""
-        return self.cli.create_cgsnapshot(
-            context, cgsnapshot, snapshots)
-
-    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
-        """Deletes a cgsnapshot."""
-        return self.cli.delete_cgsnapshot(
-            context, cgsnapshot, snapshots)
-
-    def get_pool(self, volume):
-        """Returns the pool name of a volume."""
-        return self.cli.get_pool(volume)
-
-    def update_consistencygroup(self, context, group,
-                                add_volumes,
-                                remove_volumes):
-        """Updates LUNs in consistency group."""
-        return self.cli.update_consistencygroup(context, group,
-                                                add_volumes,
-                                                remove_volumes)
-
-    def unmanage(self, volume):
-        """Unmanages a volume."""
-        self.cli.unmanage(volume)
-
-    def create_consistencygroup_from_src(self, context, group, volumes,
-                                         cgsnapshot=None, snapshots=None,
-                                         source_cg=None, source_vols=None):
-        """Creates a consistency group from source."""
-        return self.cli.create_consistencygroup_from_src(context,
-                                                         group,
-                                                         volumes,
-                                                         cgsnapshot,
-                                                         snapshots,
-                                                         source_cg,
-                                                         source_vols)
-
-    def update_migrated_volume(self, context, volume, new_volume,
-                               original_volume_status=None):
-        """Returns model update for migrated volume."""
-        return self.cli.update_migrated_volume(context, volume, new_volume,
-                                               original_volume_status)
-
-    def create_export_snapshot(self, context, snapshot, connector):
-        """Creates a snapshot mount point for snapshot."""
-        return self.cli.create_export_snapshot(context, snapshot, connector)
-
-    def remove_export_snapshot(self, context, snapshot):
-        """Removes snapshot mount point for snapshot."""
-        return self.cli.remove_export_snapshot(context, snapshot)
-
-    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
-        """Allows connection to snapshot."""
-        return self.cli.initialize_connection_snapshot(snapshot,
-                                                       connector,
-                                                       **kwargs)
-
-    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
-        """Disallows connection to snapshot."""
-        return self.cli.terminate_connection_snapshot(snapshot,
-                                                      connector,
-                                                      **kwargs)
-
-    def backup_use_temp_snapshot(self):
-        return True
-
-    def failover_host(self, context, volumes, secondary_id=None):
-        """Failovers volume from primary device to secondary."""
-        return self.cli.failover_host(context, volumes, secondary_id)
diff --git a/cinder/volume/drivers/emc/emc_vnx_cli.py b/cinder/volume/drivers/emc/emc_vnx_cli.py
deleted file mode 100644
index ead6cc8bf4e..00000000000
--- a/cinder/volume/drivers/emc/emc_vnx_cli.py
+++ /dev/null
@@ -1,5054 +0,0 @@
-# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-VNX CLI
-"""
-import copy
-import math
-import os
-import random
-import re
-import time
-import types
-
-import eventlet
-from oslo_concurrency import lockutils
-from oslo_concurrency import processutils
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
-from oslo_service import loopingcall
-from oslo_utils import excutils
-from oslo_utils import timeutils
-import six
-from six.moves import range
-import taskflow.engines
-from taskflow.patterns import linear_flow
-from taskflow import task
-from taskflow.types import failure
-
-from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
-from cinder.objects import fields
-from cinder import utils
-from cinder.volume import configuration as config
-from cinder.volume.drivers.san import san
-from cinder.volume import manager
-from cinder.volume import utils as vol_utils
-from cinder.volume import volume_types
-
-CONF = cfg.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-INTERVAL_5_SEC = 5
-INTERVAL_20_SEC = 20
-INTERVAL_30_SEC = 30
-INTERVAL_60_SEC = 60
-
-ENABLE_TRACE = False
-
-loc_opts = [
-    cfg.StrOpt('storage_vnx_authentication_type',
-               default='global',
-               help='VNX authentication scope type.'),
-    cfg.StrOpt('storage_vnx_security_file_dir',
-               help='Directory path that contains the VNX security file. '
-               'Make sure the security file is generated first.'),
-    cfg.StrOpt('naviseccli_path',
-               default='',
-               help='Naviseccli Path.'),
-    cfg.StrOpt('storage_vnx_pool_names',
-               deprecated_name='storage_vnx_pool_name',
-               help='Comma-separated list of storage pool names to be used.'),
-    cfg.StrOpt('san_secondary_ip',
-               help='VNX secondary SP IP Address.'),
-    cfg.IntOpt('default_timeout',
-               default=60 * 24 * 365,
-               help='Default timeout for CLI operations in minutes. '
-               'For example, LUN migration is a typical long '
-               'running operation, which depends on the LUN size and '
-               'the load of the array. '
-               'An upper bound in the specific deployment can be set to '
-               'avoid unnecessary long wait. '
-               'By default, it is 365 days long.'),
-    cfg.IntOpt('max_luns_per_storage_group',
-               default=255,
-               help='Default max number of LUNs in a storage group.'
-               ' By default, the value is 255.'),
-    cfg.BoolOpt('destroy_empty_storage_group',
-                default=False,
-                help='To destroy storage group '
-                'when the last LUN is removed from it. '
-                'By default, the value is False.'),
-    cfg.StrOpt('iscsi_initiators',
-               default='',
-               help='Mapping between hostname and '
-               'its iSCSI initiator IP addresses.'),
-    cfg.StrOpt('io_port_list',
-               default='*',
-               help='Comma separated iSCSI or FC ports '
-               'to be used in Nova or Cinder.'),
-    cfg.BoolOpt('initiator_auto_registration',
-                default=False,
-                help='Automatically register initiators. '
-                'By default, the value is False.'),
-    cfg.BoolOpt('initiator_auto_deregistration',
-                default=False,
-                help='Automatically deregister initiators after the related '
-                'storage group is destroyed. '
-                'By default, the value is False.'),
-    cfg.BoolOpt('check_max_pool_luns_threshold',
-                default=False,
-                help='Report free_capacity_gb as 0 when the limit to '
-                'maximum number of pool LUNs is reached. '
-                'By default, the value is False.'),
-    cfg.BoolOpt('force_delete_lun_in_storagegroup',
-                default=False,
-                help='Delete a LUN even if it is in Storage Groups.'),
-    cfg.BoolOpt('ignore_pool_full_threshold',
-                default=False,
-                help='Force LUN creation even if '
-                'the full threshold of pool is reached.')
-]
-
-CONF.register_opts(loc_opts)
-
-
-def decorate_all_methods(method_decorator):
-    """Applies decorator on the methods of a class.
-
-    This is a class decorator, which will apply method decorator referred
-    by method_decorator to all the public methods (without underscore as
-    the prefix) in a class.
-    """
-    if not ENABLE_TRACE:
-        return lambda cls: cls
-
-    def _decorate_all_methods(cls):
-        for attr_name, attr_val in cls.__dict__.items():
-            if (isinstance(attr_val, types.FunctionType) and
-                    not attr_name.startswith("_")):
-                setattr(cls, attr_name, method_decorator(attr_val))
-        return cls
-
-    return _decorate_all_methods
-
-
-def log_enter_exit(func):
-    if not CONF.debug:
-        return func
-
-    def inner(self, *args, **kwargs):
-        LOG.debug("Entering %(cls)s.%(method)s",
-                  {'cls': self.__class__.__name__,
-                   'method': func.__name__})
-        start = timeutils.utcnow()
-        ret = func(self, *args, **kwargs)
-        end = timeutils.utcnow()
-        LOG.debug("Exiting %(cls)s.%(method)s. "
-                  "Spent %(duration)s sec. "
-                  "Return %(return)s",
-                  {'cls': self.__class__.__name__,
-                   'duration': timeutils.delta_seconds(start, end),
-                   'method': func.__name__,
-                   'return': ret})
-        return ret
-    return inner
-
-
-class PropertyDescriptor(object):
-    def __init__(self, option, label, key=None, converter=None):
-        self.option = option
-        self._label = None
-        self._key = key
-        self.converter = converter
-
-        self.label = label
-
-    @property
-    def label(self):
-        return self._label
-
-    @label.setter
-    def label(self, value):
-        value = value.strip()
-        if value[-1] == ':':
-            value = value[:-1]
-        self._label = value
-
-    @property
-    def key(self):
-        if self._key is None:
-            self._key = '_'.join(self.label.lower().split())
-        return self._key
-
-
-class _Enum(object):
-    @classmethod
-    def get_all(cls):
-        return [getattr(cls, member) for member in dir(cls)
-                if cls._is_enum(member)]
-
-    @classmethod
-    def _is_enum(cls, name):
-        return (isinstance(name, str)
-                and hasattr(cls, name)
-                and name.isupper())
-
-    @classmethod
-    def get_opt(cls, tier):
-        option_map = getattr(cls, '_map', None)
-        if option_map is None:
-            raise NotImplementedError(
-                _('Option map (cls._map) is not defined.'))
-
-        ret = option_map.get(tier)
-        if ret is None:
-            raise ValueError(_("{} is not a valid option.").format(tier))
-        return ret
-
-
-class VNXError(_Enum):
-
-    GENERAL_NOT_FOUND = 'cannot find|may not exist|does not exist'
-
-    SG_NAME_IN_USE = 'Storage Group name already in use'
-
-    LUN_ALREADY_EXPANDED = 0x712d8e04
-    LUN_EXISTED = 0x712d8d04
-    LUN_IS_PREPARING = 0x712d8e0e
-    LUN_IN_SG = 'contained in a Storage Group|LUN mapping still exists'
-    LUN_NOT_MIGRATING = ('The specified source LUN is '
-                         'not currently migrating')
-    LUN_MIGRATION_STOPPED = 'STOPPED|FAULTED'
-    LUN_MIGRATION_MIGRATING = 'TRANSITIONING|MIGRATING'
-    LUN_IS_NOT_SMP = 'it is not a snapshot mount point'
-
-    CG_IS_DELETING = 0x712d8801
-    CG_EXISTED = 0x716d8021
-    CG_SNAP_NAME_EXISTED = 0x716d8005
-
-    SNAP_NAME_EXISTED = 0x716d8005
-    SNAP_NAME_IN_USE = 0x716d8003
-    SNAP_ALREADY_MOUNTED = 0x716d8055
-    SNAP_NOT_ATTACHED = ('The specified Snapshot mount point '
-                         'is not currently attached.')
-    MIRROR_NOT_FOUND = 'Mirror not found'
-    MIRROR_IN_USE = 'Mirror name already in use'
-
-    @staticmethod
-    def _match(output, error_code):
-        is_match = False
-        if VNXError._is_enum(error_code):
-            error_code = getattr(VNXError, error_code)
-
-        if isinstance(error_code, int):
-            error_code = hex(error_code)
-
-        if isinstance(error_code, str):
-            error_code = error_code.strip()
-            found = re.findall(error_code, output,
-                               flags=re.IGNORECASE)
-            is_match = len(found) > 0
-        return is_match
-
-    @classmethod
-    def has_error(cls, output, *error_codes):
-        if error_codes is None or len(error_codes) == 0:
-            error_codes = VNXError.get_all()
-        return any([cls._match(output, error_code)
-                    for error_code in error_codes])
-
-
-class VNXMigrationRate(_Enum):
-    LOW = 'low'
-    MEDIUM = 'medium'
-    HIGH = 'high'
-    ASAP = 'asap'
-
-
-class VNXProvisionEnum(_Enum):
-    THIN = 'thin'
-    THICK = 'thick'
-    COMPRESSED = 'compressed'
-    DEDUPED = 'deduplicated'
-
-    _map = {
-        THIN: ['-type', 'Thin'],
-        THICK: ['-type', 'NonThin'],
-        COMPRESSED: ['-type', 'Thin'],
-        DEDUPED: ['-type', 'Thin', '-deduplication', 'on']}
-
-
-class VNXTieringEnum(_Enum):
-    NONE = 'none'
-    HIGH_AUTO = 'starthighthenauto'
-    AUTO = 'auto'
-    HIGH = 'highestavailable'
-    LOW = 'lowestavailable'
-    NO_MOVE = 'nomovement'
-
-    _map = {
-        NONE: ['', ''],
-        HIGH_AUTO: [
-            '-initialTier', 'highestAvailable',
-            '-tieringPolicy', 'autoTier'],
-        AUTO: [
-            '-initialTier', 'optimizePool',
-            '-tieringPolicy', 'autoTier'],
-        HIGH: [
-            '-initialTier', 'highestAvailable',
-            '-tieringPolicy', 'highestAvailable'],
-        LOW: [
-            '-initialTier', 'lowestAvailable',
-            '-tieringPolicy', 'lowestAvailable'],
-        NO_MOVE: [
-            '-initialTier', 'optimizePool',
-            '-tieringPolicy', 'noMovement']
-    }
-
-    @classmethod
-    def get_tier(cls, initial, policy):
-        ret = None
-        for k, v in cls._map.items():
-            if len(v) >= 4:
-                v_initial, v_policy = v[1], v[3]
-                if (cls.match_option(initial, v_initial) and
-                        cls.match_option(policy, v_policy)):
-                    ret = k
-                    break
-                elif cls.match_option(policy, 'noMovement'):
-                    # no movement could have different initial tier
-                    ret = cls.NO_MOVE
-                    break
-        if ret is None:
-            raise ValueError(_('Initial tier: {}, policy: {} is not valid.')
-                             .format(initial, policy))
-        return ret
-
-    @staticmethod
-    def match_option(output, option):
-        return output.replace(' ', '').lower() == option.lower()
-
-
-class VNXLun(object):
-
-    DEFAULT_TIER = VNXTieringEnum.HIGH_AUTO
-    DEFAULT_PROVISION = VNXProvisionEnum.THICK
-
-    def __init__(self):
-        self._lun_id = -1
-        self._capacity = 0.0
-        self._pool_name = ''
-        self._tier = self.DEFAULT_TIER
-        self._provision = self.DEFAULT_PROVISION
-
-        self._const = VNXLunProperties
-
-    @property
-    def lun_id(self):
-        return self._lun_id
-
-    @lun_id.setter
-    def lun_id(self, data):
-        if isinstance(data, dict):
-            self._lun_id = self._get(data, self._const.LUN_ID)
-        elif isinstance(data, int):
-            self._lun_id = data
-        elif isinstance(data, str):
-            try:
-                self._lun_id = int(data)
-            except ValueError:
-                raise ValueError(
-                    _('LUN number ({}) is not an integer.').format(data))
-        else:
-            self._raise_type_error(data)
-
-        if self.lun_id < 0:
-            raise ValueError(_('LUN id({}) is not valid.')
-                             .format(self.lun_id))
-
-    @property
-    def pool_name(self):
-        return self._pool_name
-
-    @pool_name.setter
-    def pool_name(self, data):
-        if isinstance(data, dict):
-            self._pool_name = self._get(data, self._const.LUN_POOL)
-        elif isinstance(data, str):
-            self._pool_name = data
-        else:
-            self._raise_type_error(data)
-
-    @property
-    def capacity(self):
-        return self._capacity
-
-    @capacity.setter
-    def capacity(self, data):
-        if isinstance(data, dict):
-            self._capacity = self._get(data, self._const.LUN_CAPACITY)
-        elif isinstance(data, float):
-            self._capacity = data
-        elif isinstance(data, int):
-            self._capacity = float(data)
-        else:
-            self._raise_type_error(data)
-
-    @property
-    def tier(self):
-        return self._tier
-
-    @tier.setter
-    def tier(self, data):
-        if isinstance(data, dict):
-            initial = self._get(data, self._const.LUN_INITIAL_TIER)
-            policy = self._get(data, self._const.LUN_TIERING_POLICY)
-
-            self._tier = VNXTieringEnum.get_tier(initial, policy)
-        elif isinstance(data, str) and data in VNXTieringEnum.get_all():
-            self._tier = data
-        else:
-            self._raise_type_error(data)
-
-    @property
-    def provision(self):
-        return self._provision
-
-    @provision.setter
-    def provision(self, data):
-        self._provision = VNXProvisionEnum.THICK
-        if isinstance(data, dict):
-            is_thin = self._get(data, self._const.LUN_IS_THIN_LUN)
-            is_compressed = self._get(data, self._const.LUN_IS_COMPRESSED)
-            is_dedup = self._get(data, self._const.LUN_DEDUP_STATE)
-
-            if is_compressed:
-                self._provision = VNXProvisionEnum.COMPRESSED
-            elif is_dedup:
-                self._provision = VNXProvisionEnum.DEDUPED
-            elif is_thin:
-                self._provision = VNXProvisionEnum.THIN
-        elif isinstance(data, str) and data in VNXProvisionEnum.get_all():
-            self._provision = data
-        else:
-            self._raise_type_error(data)
-
-    @staticmethod
-    def _raise_type_error(data):
-        raise ValueError(_('Input type {} is not supported.')
-                         .format(type(data)))
-
-    def update(self, data):
-        self.lun_id = data
-        self.pool_name = data
-        self.capacity = data
-        self.provision = data
-        self.tier = data
-
-    @staticmethod
-    def get_lun_by_id(client, lun_id):
-        lun = VNXLun()
-        lun.lun_id = lun_id
-        lun.update(client)
-        return lun
-
-    @staticmethod
-    def _get(data, key):
-        if isinstance(key, PropertyDescriptor):
-            key = key.key
-        return data.get(key)
-
-    def __repr__(self):
-        return ('VNXLun ['
-                'lun_id: {}, '
-                'pool_name: {}, '
-                'capacity: {}, '
-                'provision: {}, '
-                'tier: {}]'
-                .format(self.lun_id,
-                        self.pool_name,
-                        self.capacity,
-                        self.provision,
-                        self.tier))
-
-
-class Converter(object):
-    @staticmethod
-    def str_to_boolean(str_input):
-        ret = False
-        if str_input.strip().lower() in ('yes', 'true', 'enabled', 'on'):
-            ret = True
-        return ret
-
-
-class Dict(dict):
-    def __getattr__(self, item):
-        try:
-            ret = super(Dict, self).__getattr__(item)
-        except AttributeError:
-            if item in self:
-                value = self.get(item)
-            else:
-                raise AttributeError(
-                    _("'{}' object has no attribute '{}'")
-                    .format(__name__, item))
-            ret = value
-        return ret
-
-
-class VNXCliParser(_Enum):
-    @classmethod
-    def get_all_property_descriptor(cls):
-        return (p for p in cls.get_all()
-                if isinstance(p, PropertyDescriptor))
-
-    @classmethod
-    def get_property_options(cls):
-        properties = cls.get_all_property_descriptor()
-        return [p.option for p in properties if p.option is not None]
-
-    @classmethod
-    def parse(cls, output, properties=None):
-        ret = Dict()
-        output = output.strip()
-
-        if properties is None:
-            properties = cls.get_all_property_descriptor()
-
-        for p in properties:
-            pattern = re.compile(
-                '^\s*{}\s*[:]?\s*(?P<value>.*)\s*$'.format(
-                    re.escape(p.label)),
-                re.MULTILINE | re.IGNORECASE)
-            matched = re.search(pattern, output)
-
-            if matched is not None:
-                value = matched.group('value')
-                if p.converter is not None and callable(p.converter):
-                    value = p.converter(value)
-                ret[p.key] = value
-            else:
-                ret[p.key] = None
-        return ret
-
-
-class VNXLunProperties(VNXCliParser):
-    LUN_STATE = PropertyDescriptor(
-        '-state',
-        'Current State',
-        'state')
-    LUN_STATUS = PropertyDescriptor(
-        '-status',
-        'Status')
-    LUN_OPERATION = PropertyDescriptor(
-        '-opDetails',
-        'Current Operation',
-        'operation')
-    LUN_CAPACITY = PropertyDescriptor(
-        '-userCap',
-        'User Capacity (GBs)',
-        'total_capacity_gb',
-        float)
-    LUN_OWNER = PropertyDescriptor(
-        '-owner',
-        'Current Owner',
-        'owner')
-    LUN_ATTACHEDSNAP = PropertyDescriptor(
-        '-attachedSnapshot',
-        'Attached Snapshot')
-    LUN_NAME = PropertyDescriptor(
-        None,
-        'Name',
-        'lun_name')
-    LUN_ID = PropertyDescriptor(
-        None,
-        'LOGICAL UNIT NUMBER',
-        'lun_id',
-        int)
-    LUN_POOL = PropertyDescriptor(
-        '-poolName',
-        'Pool Name',
-        'pool')
-    LUN_IS_THIN_LUN = PropertyDescriptor(
-        '-isThinLUN',
-        'Is Thin LUN',
-        converter=Converter.str_to_boolean)
-    LUN_IS_COMPRESSED = PropertyDescriptor(
-        '-isCompressed',
-        'Is Compressed',
-        converter=Converter.str_to_boolean)
-    LUN_DEDUP_STATE = PropertyDescriptor(
-        '-dedupState',
-        'Deduplication State',
-        'dedup_state',
-        Converter.str_to_boolean)
-    LUN_INITIAL_TIER = PropertyDescriptor(
-        '-initialTier',
-        'Initial Tier')
-    LUN_TIERING_POLICY = PropertyDescriptor(
-        '-tieringPolicy',
-        'Tiering Policy')
-
-    LUN_UID = PropertyDescriptor(
-        '-uid',
-        'UID',
-        'wwn')
-
-    lun_all = [LUN_STATE,
-               LUN_STATUS,
-               LUN_OPERATION,
-               LUN_CAPACITY,
-               LUN_OWNER,
-               LUN_ATTACHEDSNAP,
-               LUN_UID]
-
-    lun_with_pool = [LUN_STATE,
-                     LUN_CAPACITY,
-                     LUN_OWNER,
-                     LUN_ATTACHEDSNAP,
-                     LUN_POOL]
-
-
-class VNXPoolProperties(VNXCliParser):
-    POOL_ID = PropertyDescriptor(
-        None,
-        'Pool ID',
-        'pool_id',
-        int)
-    POOL_STATE = PropertyDescriptor(
-        '-state',
-        'State')
-    POOL_TOTAL_CAPACITY = PropertyDescriptor(
-        '-userCap',
-        'User Capacity (GBs)',
-        'total_capacity_gb',
-        float)
-    POOL_FREE_CAPACITY = PropertyDescriptor(
-        '-availableCap',
-        'Available Capacity (GBs)',
-        'free_capacity_gb',
-        float)
-    POOL_FAST_CACHE = PropertyDescriptor(
-        '-fastcache',
-        'FAST Cache',
-        'fast_cache_enabled',
-        Converter.str_to_boolean)
-    POOL_NAME = PropertyDescriptor(
-        None,
-        'Pool Name')
-    POOL_SUBSCRIBED_CAPACITY = PropertyDescriptor(
-        '-subscribedCap',
-        'Total Subscribed Capacity (GBs)',
-        'provisioned_capacity_gb',
-        float)
-    POOL_FULL_THRESHOLD = PropertyDescriptor(
-        '-prcntFullThreshold',
-        'Percent Full Threshold',
-        'pool_full_threshold',
-        int)
-
-    pool_all = [POOL_TOTAL_CAPACITY,
-                POOL_FREE_CAPACITY,
-                POOL_STATE,
-                POOL_FULL_THRESHOLD]
-
-
-class VNXPoolFeatureProperties(VNXCliParser):
-    MAX_POOL_LUNS = PropertyDescriptor(
-        '-maxPoolLUNs',
-        'Max. Pool LUNs',
-        'max_pool_luns',
-        int)
-    TOTAL_POOL_LUNS = PropertyDescriptor(
-        '-numPoolLUNs',
-        'Total Number of Pool LUNs',
-        'total_pool_luns',
-        int)
-
-    default = [MAX_POOL_LUNS, TOTAL_POOL_LUNS]
-
-
-@decorate_all_methods(log_enter_exit)
-class CommandLineHelper(object):
-    # extra spec constants
-    tiering_spec = 'storagetype:tiering'
-    provisioning_specs = [
-        'provisioning:type',
-        'storagetype:provisioning']
-    copytype_spec = 'copytype:snap'
-
-    def __init__(self, configuration):
-        configuration.append_config_values(san.san_opts)
-
-        self.timeout = configuration.default_timeout * INTERVAL_60_SEC
-        self.max_luns = configuration.max_luns_per_storage_group
-
-        # Checking for existence of naviseccli tool
-        navisecclipath = configuration.naviseccli_path
-        if not os.path.exists(navisecclipath):
-            err_msg = _('naviseccli_path: Could not find '
-                        'NAVISECCLI tool %(path)s.') % {'path': navisecclipath}
-            LOG.error(err_msg)
-            raise exception.VolumeBackendAPIException(data=err_msg)
-
-        self.command = (navisecclipath, '-address')
-        self.active_storage_ip = configuration.san_ip
-        self.primary_storage_ip = self.active_storage_ip
-        self.secondary_storage_ip = configuration.san_secondary_ip
-        if self.secondary_storage_ip == self.primary_storage_ip:
-            LOG.warning(_LW("san_secondary_ip is configured as "
-                            "the same value as san_ip."))
-            self.secondary_storage_ip = None
-        if not configuration.san_ip:
-            err_msg = _('san_ip: Mandatory field configuration. '
-                        'san_ip is not set.')
-            LOG.error(err_msg)
-            raise exception.VolumeBackendAPIException(data=err_msg)
-        # Lock file name for this specific back-end
-        self.toggle_lock_name = configuration.config_group
-        self.credentials = ()
-        storage_username = configuration.san_login
-        storage_password = configuration.san_password
-        storage_auth_type = configuration.storage_vnx_authentication_type
-        storage_vnx_security_file = configuration.storage_vnx_security_file_dir
-
-        if storage_auth_type is None:
-            storage_auth_type = 'global'
-        elif storage_auth_type.lower() not in ('ldap', 'local', 'global'):
-            err_msg = (_('Invalid VNX authentication type: %s')
-                       % storage_auth_type)
-            LOG.error(err_msg)
-            raise exception.VolumeBackendAPIException(data=err_msg)
-        # if there is security file path provided, use this security file
-        if storage_vnx_security_file:
-            self.credentials = ('-secfilepath', storage_vnx_security_file)
-            LOG.info(_LI("Using security file in %s for authentication"),
-                     storage_vnx_security_file)
-        # if there is a username/password provided, use those in the cmd line
-        elif storage_username is not None and len(storage_username) > 0 and\
-                storage_password is not None and len(storage_password) > 0:
-            self.credentials = ('-user', storage_username,
-                                '-password', storage_password,
-                                '-scope', storage_auth_type)
-            LOG.info(_LI("Plain text credentials are being used for "
-                         "authentication"))
-        else:
-            LOG.info(_LI("Neither security file nor plain "
-                         "text credentials are specified. Security file under "
-                         "home directory will be used for authentication "
-                         "if present."))
-
-        self.iscsi_initiator_map = None
-        if configuration.iscsi_initiators:
-            self.iscsi_initiator_map = \
-                json.loads(configuration.iscsi_initiators)
-            LOG.info(_LI("iscsi_initiators: %s"), self.iscsi_initiator_map)
-
-    def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs):
-        raise exception.EMCVnxCLICmdError(cmd=cmd,
-                                          rc=rc,
-                                          out=out.split('\n'),
-                                          **kwargs)
-
-    def create_lun_with_advance_feature(self, pool, name, size,
-                                        provisioning, tiering,
-                                        consistencygroup_id=None,
-                                        ignore_thresholds=False,
-                                        poll=True):
-        command_create_lun = ['lun', '-create',
-                              '-capacity', int(size),
-                              '-sq', 'gb',
-                              '-poolName', pool,
-                              '-name', name]
-        if not poll:
-            command_create_lun = ['-np'] + command_create_lun
-        # provisioning
-        if provisioning:
-            command_create_lun.extend(VNXProvisionEnum.get_opt(provisioning))
-        # tiering
-        if tiering and tiering != 'none':
-            command_create_lun.extend(VNXTieringEnum.get_opt(tiering))
-        if ignore_thresholds:
-            command_create_lun.append('-ignoreThresholds')
-
-        # create lun
-        data = self.create_lun_by_cmd(command_create_lun, name)
-
-        # handle compression
-        try:
-            if provisioning == 'compressed':
-                self.enable_or_disable_compression_on_lun(
-                    name, 'on')
-        except exception.EMCVnxCLICmdError as ex:
-            with excutils.save_and_reraise_exception():
-                self.delete_lun(name)
-                LOG.error(_LE("Error on enable compression on lun %s."),
-                          ex)
-
-        # handle consistency group
-        try:
-            if consistencygroup_id:
-                self.add_lun_to_consistency_group(
-                    consistencygroup_id, data['lun_id'])
-        except exception.EMCVnxCLICmdError as ex:
-            with excutils.save_and_reraise_exception():
-                self.delete_lun(name)
-                LOG.error(_LE("Error on adding lun to consistency"
-                              " group. %s"), ex)
-        return data
-
-    def create_lun_by_cmd(self, cmd, name):
-        out, rc = self.command_execute(*cmd)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.LUN_EXISTED):
-                LOG.warning(_LW('LUN already exists, LUN name %(name)s. '
-                                'Message: %(msg)s'),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(cmd, rc, out)
-
-        def _lun_state_validation(lun_data):
-            lun_state = lun_data[VNXLunProperties.LUN_STATE.key]
-            if lun_state == 'Initializing':
-                return False
-            # Lun in Ready or Faulted state is eligible for IO access,
-            # so if no lun operation, return success.
-            elif lun_state in ['Ready', 'Faulted']:
-                return lun_data[VNXLunProperties.LUN_OPERATION.key] == 'None'
-            # Raise exception if lun state is Offline, Invalid, Destroying
-            # or other unexpected states.
-            else:
-                msg = (_("Volume %(name)s was created in VNX, "
-                         "but in %(state)s state.")
-                       % {'name': lun_data[VNXLunProperties.LUN_NAME.key],
-                          'state': lun_state})
-                raise exception.VolumeBackendAPIException(data=msg)
-
-        def lun_is_ready():
-            try:
-                data = self.get_lun_by_name(name,
-                                            VNXLunProperties.lun_all,
-                                            False)
-            except exception.EMCVnxCLICmdError as ex:
-                orig_out = "\n".join(ex.kwargs["out"])
-                if VNXError.has_error(orig_out, VNXError.GENERAL_NOT_FOUND):
-                    return False
-                else:
-                    raise
-            return _lun_state_validation(data)
-
-        self._wait_for_a_condition(lun_is_ready,
-                                   interval=INTERVAL_5_SEC,
-                                   ignorable_exception_arbiter=lambda ex:
-                                   isinstance(ex, exception.EMCVnxCLICmdError))
-        lun = self.get_lun_by_name(name, VNXLunProperties.lun_all, False)
-        return lun
-
-    def delete_lun(self, name):
-        """Deletes a LUN or mount point."""
-        command_delete_lun = ['lun', '-destroy',
-                              '-name', name,
-                              '-forceDetach',
-                              '-o']
-        # executing cli command to delete volume
-        out, rc = self.command_execute(*command_delete_lun)
-        if rc != 0 or out.strip():
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND):
-                LOG.warning(_LW("LUN is already deleted, LUN name %(name)s. "
-                                "Message: %(msg)s"),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(command_delete_lun, rc, out)
-
-    def get_hlus(self, lun_id, poll=True):
-        hlus = list()
-        command_storage_group_list = ('storagegroup', '-list')
-        out, rc = self.command_execute(*command_storage_group_list,
-                                       poll=poll)
-        if rc != 0:
-            self._raise_cli_error(command_storage_group_list, rc, out)
-        sg_name_p = re.compile(r'^\s*(?P<sg_name>[^\n\r]+)')
-        hlu_alu_p = re.compile(r'HLU/ALU Pairs:'
-                               r'\s*HLU Number\s*ALU Number'
-                               r'\s*[-\s]*'
-                               r'(\d|\s)*'
-                               r'\s+(?P<hlu>\d+)( |\t)+%s' % lun_id)
-        for sg_info in out.split('Storage Group Name:'):
-            hlu_alu_m = hlu_alu_p.search(sg_info)
-            if hlu_alu_m is None:
-                continue
-            sg_name_m = sg_name_p.search(sg_info)
-            if sg_name_m:
-                hlus.append((hlu_alu_m.group('hlu'),
-                             sg_name_m.group('sg_name')))
-        return hlus
-
-    def _wait_for_a_condition(self, testmethod, timeout=None,
-                              interval=INTERVAL_5_SEC,
-                              ignorable_exception_arbiter=lambda ex: True,
-                              *args, **kwargs):
-        start_time = time.time()
-        if timeout is None:
-            timeout = self.timeout
-
-        def _inner():
-            try:
-                test_value = testmethod(*args, **kwargs)
-            except Exception as ex:
-                test_value = False
-                with excutils.save_and_reraise_exception(
-                        reraise=not ignorable_exception_arbiter(ex)):
-                    LOG.debug('CommandLineHelper.'
-                              '_wait_for_a_condition: %(method_name)s '
-                              'execution failed for %(exception)s',
-                              {'method_name': testmethod.__name__,
-                               'exception': ex})
-            if test_value:
-                raise loopingcall.LoopingCallDone()
-
-            if int(time.time()) - start_time > timeout:
-                msg = (_('CommandLineHelper._wait_for_a_condition: %s timeout')
-                       % testmethod.__name__)
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-        timer = loopingcall.FixedIntervalLoopingCall(_inner)
-        timer.start(interval=interval).wait()
-
-    def expand_lun(self, name, new_size, poll=True):
-
-        command_expand_lun = ('lun', '-expand',
-                              '-name', name,
-                              '-capacity', new_size,
-                              '-sq', 'gb',
-                              '-o',
-                              '-ignoreThresholds')
-        out, rc = self.command_execute(*command_expand_lun,
-                                       poll=poll)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.LUN_ALREADY_EXPANDED):
-                LOG.warning(_LW("LUN %(name)s is already expanded. "
-                                "Message: %(msg)s"),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(command_expand_lun, rc, out)
-
-    def expand_lun_and_wait(self, name, new_size):
-        self.expand_lun(name, new_size, poll=False)
-
-        def lun_is_extented():
-            data = self.get_lun_by_name(name, poll=False)
-            return new_size == data[VNXLunProperties.LUN_CAPACITY.key]
-
-        self._wait_for_a_condition(lun_is_extented)
-
-    def rename_lun(self, lun_id, new_name, poll=False):
-        command_lun_rename = ('lun', '-modify',
-                              '-l', lun_id,
-                              '-newName', new_name,
-                              '-o')
-        out, rc = self.command_execute(*command_lun_rename,
-                                       poll=poll)
-        if rc != 0:
-            self._raise_cli_error(command_lun_rename, rc, out)
-
-    def modify_lun_tiering_by_id(self, lun_id, tiering):
-        """Modify the tiering policy of the LUN."""
-        command_modify_lun = ['lun', '-modify', '-l', lun_id, '-o']
-        self._modify_lun_tiering(command_modify_lun, tiering)
-
-    def modify_lun_tiering_by_name(self, name, tiering):
-        """This function used to modify a lun's tiering policy."""
-        command_modify_lun = ['lun', '-modify', '-name', name, '-o']
-        self._modify_lun_tiering(command_modify_lun, tiering)
-
-    def _modify_lun_tiering(self, command_modify_lun, tiering):
-        if tiering and tiering != 'none':
-            command_modify_lun.extend(VNXTieringEnum.get_opt(tiering))
-            out, rc = self.command_execute(*command_modify_lun)
-            if rc != 0:
-                self._raise_cli_error(command_modify_lun, rc, out)
-
-    def create_consistencygroup(self, cg_name, members=None, poll=False):
-        """create the consistency group."""
-        command_create_cg = ('snap', '-group',
-                             '-create',
-                             '-name', cg_name,
-                             '-allowSnapAutoDelete', 'no')
-        if members:
-            command_create_cg += ('-res', ','.join(map(six.text_type,
-                                                       members)))
-
-        out, rc = self.command_execute(*command_create_cg, poll=poll)
-        if rc != 0:
-            # Ignore the error if consistency group already exists
-            if VNXError.has_error(out, VNXError.CG_EXISTED):
-                LOG.warning(_LW('Consistency group %(name)s already '
-                                'exists. Message: %(msg)s'),
-                            {'name': cg_name, 'msg': out})
-            else:
-                self._raise_cli_error(command_create_cg, rc, out)
-
-        self._wait_for_a_condition(self.get_consistency_group_by_name,
-                                   cg_name=cg_name,
-                                   interval=INTERVAL_5_SEC,
-                                   ignorable_exception_arbiter=lambda ex:
-                                   isinstance(ex, exception.EMCVnxCLICmdError))
-
-    def get_consistency_group_by_name(self, cg_name):
-        cmd = ('snap', '-group', '-list', '-id', cg_name)
-        data = {
-            'Name': None,
-            'Luns': None,
-            'State': None
-        }
-        out, rc = self.command_execute(*cmd)
-        if rc == 0:
-            cg_pat = r"Name:(.*)\n"\
-                     r"Description:(.*)\n"\
-                     r"Allow auto delete:(.*)\n"\
-                     r"Member LUN ID\(s\):(.*)\n"\
-                     r"State:(.*)\n"
-            for m in re.finditer(cg_pat, out):
-                data['Name'] = m.groups()[0].strip()
-                data['State'] = m.groups()[4].strip()
-                # Handle case when no lun in cg Member LUN ID(s):  None
-                luns_of_cg = m.groups()[3].replace('None', '').strip()
-                data['Luns'] = ([lun.strip() for lun in luns_of_cg.split(',')]
-                                if luns_of_cg else [])
-                LOG.debug("Found consistent group %s.", data['Name'])
-
-        else:
-            self._raise_cli_error(cmd, rc, out)
-        return data
-
-    def add_lun_to_consistency_group(self, cg_name, lun_id, poll=False):
-        add_lun_to_cg_cmd = ('snap', '-group',
-                             '-addmember', '-id',
-                             cg_name, '-res', lun_id)
-
-        out, rc = self.command_execute(*add_lun_to_cg_cmd, poll=poll)
-        if rc != 0:
-            LOG.error(_LE("Can not add the lun %(lun)s to consistency "
-                          "group %(cg_name)s."), {'lun': lun_id,
-                                                  'cg_name': cg_name})
-            self._raise_cli_error(add_lun_to_cg_cmd, rc, out)
-
-    def remove_luns_from_consistencygroup(self, cg_name, remove_ids,
-                                          poll=False):
-        """Removes LUN(s) from cg"""
-        remove_luns_cmd = ('snap', '-group', '-rmmember',
-                           '-id', cg_name,
-                           '-res', ','.join(remove_ids))
-        out, rc = self.command_execute(*remove_luns_cmd, poll=poll)
-        if rc != 0:
-            LOG.error(_LE("Can not remove LUNs %(luns)s in consistency "
-                          "group %(cg_name)s."), {'luns': remove_ids,
-                                                  'cg_name': cg_name})
-            self._raise_cli_error(remove_luns_cmd, rc, out)
-
-    def replace_luns_in_consistencygroup(self, cg_name, new_ids,
-                                         poll=False):
-        """Replaces LUN(s) with new_ids for cg"""
-        replace_luns_cmd = ('snap', '-group', '-replmember',
-                            '-id', cg_name,
-                            '-res', ','.join(new_ids))
-        out, rc = self.command_execute(*replace_luns_cmd, poll=poll)
-        if rc != 0:
-            LOG.error(_LE("Can not place new LUNs %(luns)s in consistency "
-                          "group %(cg_name)s."), {'luns': new_ids,
-                                                  'cg_name': cg_name})
-            self._raise_cli_error(replace_luns_cmd, rc, out)
-
-    def delete_consistencygroup(self, cg_name):
-        delete_cg_cmd = ('-np', 'snap', '-group',
-                         '-destroy', '-id', cg_name)
-        out, rc = self.command_execute(*delete_cg_cmd)
-        if rc != 0:
-            # Ignore the error if CG doesn't exist
-            if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND):
-                LOG.warning(_LW("CG %(cg_name)s does not exist. "
-                                "Message: %(msg)s"),
-                            {'cg_name': cg_name, 'msg': out})
-            elif VNXError.has_error(out, VNXError.CG_IS_DELETING):
-                LOG.warning(_LW("CG %(cg_name)s is deleting. "
-                                "Message: %(msg)s"),
-                            {'cg_name': cg_name, 'msg': out})
-            else:
-                self._raise_cli_error(delete_cg_cmd, rc, out)
-        else:
-            LOG.info(_LI('Consistency group %s was deleted '
-                         'successfully.'), cg_name)
-
-    def create_cgsnapshot(self, cg_name, snap_name):
-        """Create a cgsnapshot (snap group)."""
-        create_cg_snap_cmd = ('-np', 'snap', '-create',
-                              '-res', cg_name,
-                              '-resType', 'CG',
-                              '-name', snap_name,
-                              '-allowReadWrite', 'yes',
-                              '-allowAutoDelete', 'no')
-
-        out, rc = self.command_execute(*create_cg_snap_cmd)
-        if rc != 0:
-            # Ignore the error if cgsnapshot already exists
-            if VNXError.has_error(out, VNXError.CG_SNAP_NAME_EXISTED):
-                LOG.warning(_LW('Cgsnapshot name %(name)s already '
-                                'exists. Message: %(msg)s'),
-                            {'name': snap_name, 'msg': out})
-            else:
-                self._raise_cli_error(create_cg_snap_cmd, rc, out)
-        self._wait_for_a_condition(self.check_snapshot,
-                                   snap_name=snap_name,
-                                   interval=INTERVAL_30_SEC,
-                                   ignorable_exception_arbiter=lambda ex:
-                                   isinstance(ex, exception.EMCVnxCLICmdError))
-
-    def check_snapshot(self, snap_name, poll=True):
-        """check if a snapshot/cgsnapshot is existed."""
-        cmd_get = ('snap', '-list', '-id', snap_name)
-        out, rc = self.command_execute(*cmd_get)
-        if rc == 0:
-            return True
-        else:
-            self._raise_cli_error(cmd_get, rc, out)
-
-    def delete_cgsnapshot(self, snap_name):
-        """Delete a cgsnapshot (snap group)."""
-        delete_cg_snap_cmd = ('-np', 'snap', '-destroy',
-                              '-id', snap_name, '-o')
-
-        out, rc = self.command_execute(*delete_cg_snap_cmd)
-        if rc != 0:
-            # Ignore the error if cgsnapshot does not exist.
-            if VNXError.has_error(out, VNXError.GENERAL_NOT_FOUND):
-                LOG.warning(_LW('Snapshot %(name)s for consistency group '
-                                'does not exist. Message: %(msg)s'),
-                            {'name': snap_name, 'msg': out})
-            else:
-                self._raise_cli_error(delete_cg_snap_cmd, rc, out)
-
-    def create_snapshot(self, lun_id, name):
-        if lun_id is not None:
-            command_create_snapshot = ('snap', '-create',
-                                       '-res', lun_id,
-                                       '-name', name,
-                                       '-allowReadWrite', 'yes',
-                                       '-allowAutoDelete', 'no')
-
-            out, rc = self.command_execute(*command_create_snapshot,
-                                           poll=False)
-            if rc != 0:
-                # Ignore the error that due to retry
-                if VNXError.has_error(out, VNXError.SNAP_NAME_EXISTED):
-                    LOG.warning(_LW('Snapshot %(name)s already exists. '
-                                    'Message: %(msg)s'),
-                                {'name': name, 'msg': out})
-                else:
-                    self._raise_cli_error(command_create_snapshot, rc, out)
-        else:
-            msg = _('Failed to create snapshot as no LUN ID is specified')
-            raise exception.VolumeBackendAPIException(data=msg)
-
-    def copy_snapshot(self, src_snap_name, new_name):
-        command_copy_snapshot = ('snap', '-copy',
-                                 '-id', src_snap_name,
-                                 '-name', new_name,
-                                 '-ignoreMigrationCheck',
-                                 '-ignoreDeduplicationCheck')
-
-        out, rc = self.command_execute(*command_copy_snapshot)
-        if rc != 0:
-            # Ignore the error if the snap already exists
-            if VNXError.has_error(out, VNXError.SNAP_NAME_EXISTED):
-                LOG.warning(_LW('Snapshot %(name)s already exists. '
-                                'Message: %(msg)s'),
-                            {'name': new_name, 'msg': out})
-            else:
-                self._raise_cli_error(command_copy_snapshot, rc, out)
-
-    def delete_snapshot(self, name):
-
-        def delete_snapshot_success():
-            command_delete_snapshot = ('snap', '-destroy',
-                                       '-id', name,
-                                       '-o')
-            out, rc = self.command_execute(*command_delete_snapshot,
-                                           poll=True)
-            if rc != 0:
-                # Ignore the error that due to retry
-                if rc == 5 and out.find("not exist") >= 0:
-                    LOG.warning(_LW("Snapshot %(name)s may deleted already. "
-                                    "Message: %(msg)s"),
-                                {'name': name, 'msg': out})
-                    return True
-                # The snapshot cannot be destroyed because it is
-                # attached to a snapshot mount point. Wait
-                elif VNXError.has_error(out, VNXError.SNAP_NAME_IN_USE):
-                    LOG.warning(_LW("Snapshot %(name)s is in use, retry. "
-                                    "Message: %(msg)s"),
-                                {'name': name, 'msg': out})
-                    return False
-                else:
-                    self._raise_cli_error(command_delete_snapshot, rc, out)
-            else:
-                LOG.info(_LI('Snapshot %s was deleted successfully.'),
-                         name)
-                return True
-
-        self._wait_for_a_condition(delete_snapshot_success,
-                                   interval=INTERVAL_30_SEC,
-                                   timeout=INTERVAL_30_SEC * 3)
-
-    def create_mount_point(self, primary_lun_name, name):
-
-        command_create_mount_point = ('lun', '-create',
-                                      '-type', 'snap',
-                                      '-primaryLunName', primary_lun_name,
-                                      '-name', name)
-
-        out, rc = self.command_execute(*command_create_mount_point,
-                                       poll=False)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.LUN_EXISTED):
-                LOG.warning(_LW("Mount point %(name)s already exists. "
-                                "Message: %(msg)s"),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(command_create_mount_point, rc, out)
-
-        return rc
-
-    def allow_snapshot_readwrite_and_autodelete(self, snap_name):
-
-        modify_cmd = ('snap', '-modify', '-id', snap_name,
-                      '-allowReadWrite', 'yes', '-allowAutoDelete', 'yes')
-
-        out, rc = self.command_execute(*modify_cmd)
-        if rc != 0:
-            self._raise_cli_error(modify_cmd, rc, out)
-
-    def attach_mount_point(self, name, snapshot_name):
-
-        command_attach_mount_point = ('lun', '-attach',
-                                      '-name', name,
-                                      '-snapName', snapshot_name)
-
-        out, rc = self.command_execute(*command_attach_mount_point)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.SNAP_ALREADY_MOUNTED):
-                LOG.warning(_LW("Snapshot %(snapname)s is attached to "
-                                "snapshot mount point %(mpname)s already. "
-                                "Message: %(msg)s"),
-                            {'snapname': snapshot_name,
-                             'mpname': name,
-                             'msg': out})
-            else:
-                self._raise_cli_error(command_attach_mount_point, rc, out)
-
-        return rc
-
-    def detach_mount_point(self, smp_name):
-
-        command_detach_mount_point = ('lun', '-detach',
-                                      '-name', smp_name, '-o')
-
-        out, rc = self.command_execute(*command_detach_mount_point)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.SNAP_NOT_ATTACHED):
-                LOG.warning(_LW("The specified Snapshot mount point %s is not "
-                                "currently attached."), smp_name)
-            else:
-                self._raise_cli_error(command_detach_mount_point, rc, out)
-
-        return rc
-
-    def migrate_start(self, src_id, dst_id, rate=VNXMigrationRate.HIGH):
-        command_migrate_lun = ('migrate', '-start',
-                               '-source', src_id,
-                               '-dest', dst_id,
-                               '-rate', rate,
-                               '-o')
-        # SP HA is not supported by LUN migration
-        out, rc = self.command_execute(*command_migrate_lun,
-                                       retry_disable=True,
-                                       poll=True)
-
-        if 0 != rc:
-            self._raise_cli_error(command_migrate_lun, rc, out)
-
-        return rc
-
-    def migrate_lun_without_verification(self, src_id, dst_id,
-                                         rate=VNXMigrationRate.HIGH):
-        """Start a migration session from src_id to dst_id.
-
-        :param src_id: source LUN id
-        :param dst_id: destination LUN id
-
-        NOTE: This method will ignore any errors, error-handling
-        is located in verification function.
-        """
-        try:
-            self.migrate_start(src_id, dst_id, rate)
-        except exception.EMCVnxCLICmdError as ex:
-            with excutils.save_and_reraise_exception(reraise=False):
-                # Here we just log whether migration command has started
-                # successfully, post error handling is needed when verify
-                # this migration session
-                LOG.warning(_LW("Starting migration from %(src)s to %(dst)s "
-                                "failed. Will check whether migration was "
-                                "successful later."
-                                ": %(msg)s"), {'src': src_id,
-                                               'dst': dst_id,
-                                               'msg': ex.kwargs['out']})
-        return True
-
-    def verify_lun_migration(self, src_id, dst_id, dst_wwn, dst_name=None):
-        """Verify whether the migration session has finished for src_id
-
-        :param src_id: lun id of source LUN
-        :param dst_id: lun id of destination LUN
-        :param dst_wwn: original wwn for destination LUN
-        :param dst_name: dst_name for cleanup if error met
-        """
-        cmd_list = ('migrate', '-list', '-source', src_id)
-
-        def migration_is_ready(poll=False):
-            mig_ready = False
-            out, rc = self.migration_list(src_id, poll=poll)
-            if rc == 0:
-                # Parse the percentage
-                state = re.search(r'Current State:\s*([^\n]+)', out)
-                percentage = re.search(r'Percent Complete:\s*([^\n]+)', out)
-                current_state = state.group(1)
-                percentage_complete = percentage.group(1)
-                if VNXError.has_error(current_state,
-                                      VNXError.LUN_MIGRATION_STOPPED):
-                    reason = _("Migration of LUN %s has been stopped or"
-                               " faulted.") % src_id
-                    raise exception.VolumeBackendAPIException(data=reason)
-                if VNXError.has_error(current_state,
-                                      VNXError.LUN_MIGRATION_MIGRATING):
-                    LOG.debug("Migration of LUN %(src_id)s in process "
-                              "%(percentage)s %%.",
-                              {"src_id": src_id,
-                               "percentage": percentage_complete})
-            elif VNXError.has_error(out, VNXError.LUN_NOT_MIGRATING):
-                # Verify the wwn has changed for the LUN
-                new_wwn = None
-                try:
-                    new_wwn = self.get_lun_by_id(dst_id)['wwn']
-                except exception.EMCVnxCLICmdError:
-                    # Destination LUN disappeared as expected
-                    pass
-                if not new_wwn or dst_wwn != new_wwn:
-                    LOG.debug("Migration of LUN %s is finished.", src_id)
-                    mig_ready = True
-                else:
-                    # Migration may fail to start
-                    LOG.error(_LE("Migration verification failed: "
-                                  "wwn is not changed after migration."))
-                    self._raise_cli_error(cmd_list, rc, out)
-            else:
-                # Unexpected error occurred, raise it directly
-                self._raise_cli_error(cmd_list, rc, out)
-            return mig_ready
-
-        def migration_disappeared(poll=False):
-            out, rc = self.migration_list(src_id, poll=poll)
-            if rc != 0:
-                if VNXError.has_error(out, VNXError.LUN_NOT_MIGRATING):
-                    LOG.debug("Migration of LUN %s is finished.", src_id)
-                    return True
-                else:
-                    LOG.error(_LE("Failed to query migration status of LUN."),
-                              src_id)
-                    self._raise_cli_error(cmd_list, rc, out)
-            return False
-
-        try:
-            if migration_is_ready(True):
-                return True
-            self._wait_for_a_condition(
-                migration_is_ready,
-                interval=INTERVAL_30_SEC,
-                ignorable_exception_arbiter=lambda ex:
-                type(ex) is not exception.VolumeBackendAPIException)
-        except exception.EMCVnxCLICmdError as ex:
-            # Try to delete destination lun after migration failure
-            with excutils.save_and_reraise_exception():
-                LOG.warning(_LW("Start migration failed. Message: %s"),
-                            ex.kwargs["out"])
-                if dst_name is not None:
-                    LOG.warning(_LW("Deleting temp LUN after migration "
-                                    "start failed. LUN: %s"), dst_name)
-                    self.delete_lun(dst_name)
-        except exception.VolumeBackendAPIException:
-            # Migration cancellation for clean up
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE("Migration of LUN %s failed to complete."),
-                          src_id)
-                self.migration_cancel(src_id)
-                self._wait_for_a_condition(migration_disappeared,
-                                           interval=INTERVAL_30_SEC)
-        return True
-
-    # Cancel migration in case where status is faulted or stopped
-    def migration_cancel(self, src_id):
-        LOG.info(_LI("Cancelling Migration from LUN %s."), src_id)
-        cmd_migrate_cancel = ('migrate', '-cancel', '-source', src_id,
-                              '-o')
-        out, rc = self.command_execute(*cmd_migrate_cancel)
-        if rc != 0:
-            self._raise_cli_error(cmd_migrate_cancel, rc, out)
-
-    def migration_list(self, src, poll=True):
-        """Lists migration status for LUN.
-
-        :param src: WWN or ID of source LUN
-        """
-        cmd_migrate_list = ('migrate', '-list', '-source', src)
-        return self.command_execute(*cmd_migrate_list,
-                                    poll=poll)
-
-    def migrate_lun_with_verification(self, src_id, dst_id,
-                                      dst_name=None,
-                                      dst_wwn=None,
-                                      rate=VNXMigrationRate.HIGH):
-        if not dst_wwn:
-            dst_wwn = self.get_lun_by_id(dst_id, poll=False)['wwn']
-        self.migrate_lun_without_verification(
-            src_id, dst_id, rate=rate)
-
-        eventlet.sleep(INTERVAL_30_SEC)
-        return self.verify_lun_migration(src_id, dst_id, dst_wwn, dst_name)
-
-    def get_storage_group(self, name, poll=True):
-
-        # ALU/HLU as key/value map
-        lun_map = {}
-
-        data = {'storage_group_name': name,
-                'storage_group_uid': None,
-                'lunmap': lun_map,
-                'raw_output': ''}
-
-        command_get_storage_group = ('storagegroup', '-list',
-                                     '-gname', name, '-host',
-                                     '-iscsiAttributes')
-
-        out, rc = self.command_execute(*command_get_storage_group,
-                                       poll=poll)
-        if rc != 0:
-            self._raise_cli_error(command_get_storage_group, rc, out)
-
-        data['raw_output'] = out
-        re_stroage_group_id = 'Storage Group UID:\s*(.*)\s*'
-        m = re.search(re_stroage_group_id, out)
-        if m is not None:
-            data['storage_group_uid'] = m.group(1)
-
-        re_HLU_ALU_pair = 'HLU\/ALU Pairs:\s*HLU Number' \
-                          '\s*ALU Number\s*[-\s]*(?P<lun_details>(\d+\s*)+)'
-        m = re.search(re_HLU_ALU_pair, out)
-        if m is not None:
-            lun_details = m.group('lun_details').strip()
-            values = re.split('\s*', lun_details)
-            while (len(values) >= 2):
-                key = values.pop()
-                value = values.pop()
-                lun_map[int(key)] = int(value)
-
-        return data
-
-    def create_storage_group(self, name):
-
-        command_create_storage_group = ('storagegroup', '-create',
-                                        '-gname', name)
-
-        out, rc = self.command_execute(*command_create_storage_group)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if VNXError.has_error(out, VNXError.SG_NAME_IN_USE):
-                LOG.warning(_LW('Storage group %(name)s already exists. '
-                                'Message: %(msg)s'),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(command_create_storage_group, rc, out)
-
-    def delete_storage_group(self, name):
-
-        command_delete_storage_group = ('storagegroup', '-destroy',
-                                        '-gname', name, '-o')
-
-        out, rc = self.command_execute(*command_delete_storage_group)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if rc == 83 and out.find("group name or UID does not "
-                                     "match any storage groups") >= 0:
-                LOG.warning(_LW("Storage group %(name)s doesn't exist, "
-                                "may have already been deleted. "
-                                "Message: %(msg)s"),
-                            {'name': name, 'msg': out})
-            else:
-                self._raise_cli_error(command_delete_storage_group, rc, out)
-
-    def connect_host_to_storage_group(self, hostname, sg_name):
-
-        command_host_connect = ('storagegroup', '-connecthost',
-                                '-host', hostname,
-                                '-gname', sg_name,
-                                '-o')
-
-        out, rc = self.command_execute(*command_host_connect)
-        if rc != 0:
-            self._raise_cli_error(command_host_connect, rc, out)
-
-    def disconnect_host_from_storage_group(self, hostname, sg_name):
-        command_host_disconnect = ('storagegroup', '-disconnecthost',
-                                   '-host', hostname,
-                                   '-gname', sg_name,
-                                   '-o')
-
-        out, rc = self.command_execute(*command_host_disconnect)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if rc == 116 and \
-                re.search("host is not.*connected to.*storage group",
-                          out) is not None:
-                LOG.warning(_LW("Host %(host)s has already disconnected from "
-                                "storage group %(sgname)s. Message: %(msg)s"),
-                            {'host': hostname, 'sgname': sg_name, 'msg': out})
-            else:
-                self._raise_cli_error(command_host_disconnect, rc, out)
-
-    def add_hlu_to_storage_group(self, hlu, alu, sg_name):
-        """Adds a lun into storage group as specified hlu number.
-
-        Return True if the hlu is as specified, otherwise False.
-        """
-
-        command_add_hlu = ('storagegroup', '-addhlu',
-                           '-hlu', hlu,
-                           '-alu', alu,
-                           '-gname', sg_name, '-o')
-
-        out, rc = self.command_execute(*command_add_hlu, poll=False)
-        if rc != 0:
-            # Do not need to consider the retry for add hlu
-            # Retry is handled in the caller
-            self._raise_cli_error(command_add_hlu, rc, out)
-
-        return True
-
-    def remove_hlu_from_storagegroup(self, hlu, sg_name, poll=False):
-
-        command_remove_hlu = ('storagegroup', '-removehlu',
-                              '-hlu', hlu,
-                              '-gname', sg_name,
-                              '-o')
-
-        out, rc = self.command_execute(*command_remove_hlu, poll=poll)
-        if rc != 0:
-            # Ignore the error that due to retry
-            if rc == 66 and\
-                    out.find("No such Host LUN in this Storage Group") >= 0:
-                LOG.warning(_LW("HLU %(hlu)s has already been removed from "
-                                "%(sgname)s. Message: %(msg)s"),
-                            {'hlu': hlu, 'sgname': sg_name, 'msg': out})
-            else:
-                self._raise_cli_error(command_remove_hlu, rc, out)
-
-    def get_lun_by_name(self,
-                        name,
-                        properties=VNXLunProperties.lun_all,
-                        poll=True):
-        data = self.get_lun_properties(('-name', name),
-                                       properties,
-                                       poll=poll)
-        return data
-
-    def get_lun_by_id(self,
-                      lunid,
-                      properties=VNXLunProperties.lun_all,
-                      poll=True):
-        data = self.get_lun_properties(('-l', lunid),
-                                       properties, poll=poll)
-        return data
-
-    def get_lun_current_ops_state(self, name, poll=False):
-        data = self.get_lun_by_name(name, poll=poll)
-        return data[VNXLunProperties.LUN_OPERATION.key]
-
-    def wait_until_lun_ready_for_ops(self, name):
-        def is_lun_ready_for_ops():
-            data = self.get_lun_current_ops_state(name, False)
-            return data == 'None'
-        # Get the volume's latest operation state by polling.
-        # Otherwise, the operation state may be out of date.
-        ops = self.get_lun_current_ops_state(name, True)
-        if ops != 'None':
-            self._wait_for_a_condition(is_lun_ready_for_ops)
-
-    def get_pool(self,
-                 name,
-                 properties=VNXPoolProperties.pool_all,
-                 poll=True):
-        data = self.get_pool_properties(('-name', name),
-                                        properties=properties,
-                                        poll=poll)
-        return data
-
-    def get_pool_properties(self,
-                            filter_option,
-                            properties=VNXPoolProperties.pool_all,
-                            poll=True):
-        module_list = ('storagepool', '-list')
-        data = self._get_obj_properties(
-            module_list, filter_option,
-            base_properties=(VNXPoolProperties.POOL_NAME,),
-            adv_properties=tuple(properties),
-            poll=poll)
-        return data
-
-    def get_lun_properties(self,
-                           filter_option,
-                           properties=VNXLunProperties.lun_all,
-                           poll=True):
-        module_list = ('lun', '-list')
-        data = self._get_obj_properties(
-            module_list, filter_option,
-            base_properties=(VNXLunProperties.LUN_NAME,
-                             VNXLunProperties.LUN_ID),
-            adv_properties=tuple(properties),
-            poll=poll)
-        return data
-
-    def get_pool_feature_properties(
-            self,
-            properties=VNXPoolFeatureProperties.default,
-            poll=True):
-        module_list = ("storagepool", '-feature', '-info')
-        data = self._get_obj_properties(
-            module_list, tuple(),
-            base_properties=(),
-            adv_properties=tuple(properties),
-            poll=poll)
-        return data
-
-    def _get_obj_properties(self, module_list,
-                            filter_option,
-                            base_properties=tuple(),
-                            adv_properties=tuple(),
-                            poll=True):
-        # to do instance check
-        command_get = module_list + filter_option
-        options = []
-        for prop in adv_properties:
-            option = prop.option
-            if option and option not in options:
-                options.append(option)
-        command_get += tuple(options)
-        out, rc = self.command_execute(*command_get, poll=poll)
-
-        if rc != 0:
-            self._raise_cli_error(command_get, rc, out)
-
-        data = VNXCliParser.parse(out,
-                                  list(base_properties) + list(adv_properties))
-        LOG.debug('Return Object properties. Data: %s', data)
-        return data
-
-    def _get_property_value(self, out, propertyDescriptor):
-        label = propertyDescriptor.label
-        m = re.search(label, out)
-        if m:
-            if (propertyDescriptor.converter is not None):
-                try:
-                    converter = propertyDescriptor.converter
-                    if isinstance(converter, staticmethod):
-                        converter = converter.__func__
-                    return converter(m.group(1))
-                except ValueError:
-                    LOG.error(_LE("Invalid value for %(key)s, "
-                                  "value is %(value)s."),
-                              {'key': propertyDescriptor.key,
-                               'value': m.group(1)})
-                    return None
-            else:
-                return m.group(1)
-        else:
-            LOG.debug('%s value is not found in the output.',
-                      propertyDescriptor.label)
-            return None
-
-    def check_lun_has_snap(self, lun_id):
-        cmd = ('snap', '-list', '-res', lun_id)
-        rc = self.command_execute(*cmd, poll=False)[1]
-        if rc == 0:
-            LOG.debug("Found snapshots for %s.", lun_id)
-            return True
-        else:
-            return False
-
-    def get_pool_list(self,
-                      properties=VNXPoolProperties.pool_all,
-                      poll=True):
-        temp_cache = []
-        list_cmd = ('storagepool', '-list')
-        for prop in properties:
-            list_cmd += (prop.option,)
-        output_properties = [VNXPoolProperties.POOL_NAME] + properties
-        out, rc = self.command_execute(*list_cmd, poll=poll)
-        if rc != 0:
-            self._raise_cli_error(list_cmd, rc, out)
-
-        try:
-            for pool in out.strip().split('\n\n'):
-                pool_data = VNXPoolProperties.parse(
-                    pool, output_properties)
-                temp_cache.append(pool_data)
-        except Exception as ex:
-            LOG.error(_LE("Error happened during storage pool querying, %s."),
-                      ex)
-            # NOTE: Do not want to continue raise the exception
-            # as the pools may be temporarily unavailable
-            pass
-        return temp_cache
-
-    def get_array_serial(self, poll=False):
-        """return array Serial No for pool backend."""
-        data = {'array_serial': 'unknown'}
-
-        command_get_array_serial = ('getagent', '-serial')
-        # Set the property timeout to get array serial
-        out, rc = self.command_execute(*command_get_array_serial,
-                                       poll=poll)
-        if 0 == rc:
-            m = re.search(r'Serial No:\s+(\w+)', out)
-            if m:
-                data['array_serial'] = m.group(1)
-            else:
-                LOG.warning(_LW("No array serial number returned, "
-                                "set as unknown."))
-        else:
-            self._raise_cli_error(command_get_array_serial, rc, out)
-
-        return data
-
-    def get_status_up_ports(self, storage_group_name, io_ports=None,
-                            poll=True):
-        """Function to get ports whose status are up."""
-        cmd_get_hba = ('storagegroup', '-list', '-gname', storage_group_name)
-        out, rc = self.command_execute(*cmd_get_hba, poll=poll)
-        wwns = []
-        if 0 == rc:
-            _re_hba_sp_pair = re.compile('((\w\w:){15}(\w\w)\s*' +
-                                         '(SP\s[A-B]){1}\s*(\d*)\s*\n)')
-            _all_hba_sp_pairs = re.findall(_re_hba_sp_pair, out)
-            sps = [each[3] for each in _all_hba_sp_pairs]
-            portid = [each[4] for each in _all_hba_sp_pairs]
-            cmd_get_port = ('port', '-list', '-sp')
-            out, rc = self.command_execute(*cmd_get_port)
-            if 0 != rc:
-                self._raise_cli_error(cmd_get_port, rc, out)
-            for i, sp in enumerate(sps):
-                if io_ports:  # Skip ports which are not in io_ports
-                    if (sp.split()[1], int(portid[i])) not in io_ports:
-                        continue
-                wwn = self.get_port_wwn(sp, portid[i], out)
-                if (wwn is not None) and (wwn not in wwns):
-                    LOG.debug('Add wwn:%(wwn)s for sg:%(sg)s.',
-                              {'wwn': wwn,
-                               'sg': storage_group_name})
-                    wwns.append(wwn)
-        elif 83 == rc:
-            LOG.warning(_LW("Storage Group %s is not found."),
-                        storage_group_name)
-        else:
-            self._raise_cli_error(cmd_get_hba, rc, out)
-        return wwns
-
-    def get_login_ports(self, storage_group_name, connector_wwpns,
-                        io_ports=None):
-        cmd_list_hba = ('port', '-list', '-gname', storage_group_name)
-        out, rc = self.command_execute(*cmd_list_hba)
-        ports = []
-        wwns = []
-        connector_hba_list = []
-        if 0 == rc and out.find('Information about each HBA:') != -1:
-            hba_list = out.split('Information about each SPPORT:')[0].split(
-                'Information about each HBA:')[1:]
-            allports = out.split('Information about each SPPORT:')[1]
-            hba_uid_pat = re.compile('HBA\sUID:\s*((\w\w:){15}(\w\w))')
-            for each in hba_list:
-                obj_search = re.search(hba_uid_pat, each)
-                if obj_search and obj_search.group(1). \
-                        replace(':', '')[16:].lower() in connector_wwpns:
-                    connector_hba_list.append(each)
-            port_pat = re.compile('SP Name:\s*(SP\s\w)\n\s*' +
-                                  'SP Port ID:\s*(\w*)\n\s*' +
-                                  'HBA Devicename:.*\n\s*' +
-                                  'Trusted:.*\n\s*' +
-                                  'Logged In:\s*YES\n')
-
-            for each in connector_hba_list:
-                ports.extend(re.findall(port_pat, each))
-            ports = list(set(ports))
-            if io_ports:
-                ports = filter(lambda po:
-                               (po[0].split()[1], int(po[1])) in io_ports,
-                               ports)
-            for each in ports:
-                wwn = self.get_port_wwn(each[0], each[1], allports)
-                if wwn:
-                    wwns.append(wwn)
-        else:
-            self._raise_cli_error(cmd_list_hba, rc, out)
-        return wwns
-
-    def get_port_wwn(self, sp, port_id, allports=None):
-        """Returns wwn via sp and port_id
-
-        :param sp: should be in this format 'SP A'
-        :param port_id: '0' or 0
-        """
-        wwn = None
-        if allports is None:
-            allports, rc = self.get_port_output()
-        _re_port_wwn = re.compile('SP Name:\s*' + sp +
-                                  '\nSP Port ID:\s*' + str(port_id) +
-                                  '\nSP UID:\s*((\w\w:){15}(\w\w))' +
-                                  '\nLink Status:         Up' +
-                                  '\nPort Status:         Online')
-        _obj_search = re.search(_re_port_wwn, allports)
-        if _obj_search is not None:
-            wwn = _obj_search.group(1).replace(':', '')[16:]
-        return wwn
-
-    def get_fc_targets(self):
-        out, rc = self.get_port_output()
-        fc_target_dict = {'A': [], 'B': []}
-
-        _fcport_pat = (r'SP Name:             SP\s(\w)\s*'
-                       r'SP Port ID:\s*(\w*)\n'
-                       r'SP UID:\s*((\w\w:){15}(\w\w))\s*'
-                       r'Link Status:         Up\n'
-                       r'Port Status:         Online\n')
-
-        for m in re.finditer(_fcport_pat, out):
-            sp = m.groups()[0]
-            sp_port_id = m.groups()[1]
-            fc_target_dict[sp].append({'SP': sp,
-                                       'Port ID': sp_port_id})
-        return fc_target_dict
-
-    def get_port_output(self):
-        cmd_get_port = ('port', '-list', '-sp')
-        out, rc = self.command_execute(*cmd_get_port)
-        if 0 != rc:
-            self._raise_cli_error(cmd_get_port, rc, out)
-        return out, rc
-
-    def get_connection_getport_output(self):
-        connection_getport_cmd = ('connection', '-getport', '-vlanid')
-        out, rc = self.command_execute(*connection_getport_cmd)
-        if 0 != rc:
-            self._raise_cli_error(connection_getport_cmd, rc, out)
-        return out, rc
-
-    def _filter_iscsi_ports(self, all_ports, io_ports):
-        """Filter ports in white list from all iSCSI ports."""
-        new_iscsi_ports = {'A': [], 'B': []}
-        valid_ports = []
-        for sp in all_ports:
-            for port in all_ports[sp]:
-                port_tuple = (port['SP'],
-                              port['Port ID'],
-                              port['Virtual Port ID'])
-                if port_tuple in io_ports:
-                    new_iscsi_ports[sp].append(port)
-                    valid_ports.append(port_tuple)
-        if len(io_ports) != len(valid_ports):
-            invalid_port_set = set(io_ports) - set(valid_ports)
-            for invalid in invalid_port_set:
-                LOG.warning(_LW('Invalid iSCSI port %(sp)s-%(port)s-%(vlan)s '
-                                'found in io_port_list, will be ignored.'),
-                            {'sp': invalid[0], 'port': invalid[1],
-                             'vlan': invalid[2]})
-        return new_iscsi_ports
-
-    def get_iscsi_targets(self, poll=False, io_ports=None):
-        cmd_getport = ('connection', '-getport', '-address', '-vlanid')
-        out, rc = self.command_execute(*cmd_getport, poll=poll)
-        if rc != 0:
-            self._raise_cli_error(cmd_getport, rc, out)
-
-        iscsi_target_dict = {'A': [], 'B': []}
-        iscsi_spport_pat = r'(A|B)\s*' + \
-                           r'Port ID:\s+(\d+)\s*' + \
-                           r'Port WWN:\s+(iqn\S+)'
-        iscsi_vport_pat = r'Virtual Port ID:\s+(\d+)\s*' + \
-                          r'VLAN ID:\s*\S*\s*' + \
-                          r'IP Address:\s+(\S+)'
-        for spport_content in re.split(r'^SP:\s+|\nSP:\s*', out):
-            m_spport = re.match(iscsi_spport_pat, spport_content,
-                                flags=re.IGNORECASE)
-            if not m_spport:
-                continue
-            sp = m_spport.group(1)
-            port_id = int(m_spport.group(2))
-            iqn = m_spport.group(3)
-            for m_vport in re.finditer(iscsi_vport_pat, spport_content):
-                vport_id = int(m_vport.group(1))
-                ip_addr = m_vport.group(2)
-                if ip_addr.find('N/A') != -1:
-                    LOG.debug("Skip port without IP Address: %s",
-                              m_spport.group(0) + m_vport.group(0))
-                    continue
-                iscsi_target_dict[sp].append({'SP': sp,
-                                              'Port ID': port_id,
-                                              'Port WWN': iqn,
-                                              'Virtual Port ID': vport_id,
-                                              'IP Address': ip_addr})
-        if io_ports:
-            return self._filter_iscsi_ports(iscsi_target_dict, io_ports)
-        return iscsi_target_dict
-
-    def get_registered_spport_set(self, initiator_iqn, sgname, sg_raw_out):
-        spport_set = set()
-        for m_spport in re.finditer(
-                r'\n\s+%s\s+SP\s.*\n.*\n\s*SPPort:\s+(A|B)-(\d+)v(\d+)\s*\n'
-                % initiator_iqn, sg_raw_out, flags=re.IGNORECASE):
-            spport_set.add((m_spport.group(1), int(m_spport.group(2)),
-                           int(m_spport.group(3))))
-        LOG.debug('See path %(path)s in %(sg)s.',
-                  {'path': spport_set,
-                   'sg': sgname})
-        return spport_set
-
-    def ping_node(self, target_portal, initiator_ip):
-        connection_pingnode = ('connection', '-pingnode', '-sp',
-                               target_portal['SP'], '-portid',
-                               target_portal['Port ID'], '-vportid',
-                               target_portal['Virtual Port ID'],
-                               '-address', initiator_ip,
-                               '-count', '1')
-        out, rc = self.command_execute(*connection_pingnode)
-        if rc == 0:
-            ping_ok = re.compile(r'Reply from %s' % initiator_ip)
-            if re.match(ping_ok, out) is not None:
-                LOG.debug("See available iSCSI target: %s",
-                          connection_pingnode)
-                return True
-        LOG.warning(_LW("See unavailable iSCSI target: %s"),
-                    connection_pingnode)
-        return False
-
-    def find_available_iscsi_targets(self, hostname,
-                                     registered_spport_set,
-                                     all_iscsi_targets):
-        """Finds available iscsi targets for a host.
-
-        When the iscsi_initiator_map is configured, the driver will find
-        an accessible portal and put it as the first portal in the portal
-        list to ensure the accessible portal will be used when multipath
-        is not used. All the registered portals will be returned for Nova
-        to clean up all the unused devices related to this LUN created by
-        logging into these portals during attaching other LUNs on VNX.
-        """
-
-        if self.iscsi_initiator_map and hostname in self.iscsi_initiator_map:
-            iscsi_initiator_ips = list(self.iscsi_initiator_map[hostname])
-            random.shuffle(iscsi_initiator_ips)
-        else:
-            iscsi_initiator_ips = None
-
-        target_portals = []
-
-        all_portals = all_iscsi_targets['A'] + all_iscsi_targets['B']
-        random.shuffle(all_portals)
-        for portal in all_portals:
-            spport = (portal['SP'],
-                      portal['Port ID'],
-                      portal['Virtual Port ID'])
-            if spport not in registered_spport_set:
-                LOG.debug(
-                    "Skip SP Port %(port)s since "
-                    "no path from %(host)s is through it.",
-                    {'port': spport,
-                     'host': hostname})
-                continue
-            target_portals.append(portal)
-
-        main_portal_index = None
-        if iscsi_initiator_ips:
-            for i, portal in enumerate(target_portals):
-                for initiator_ip in iscsi_initiator_ips:
-                    if self.ping_node(portal, initiator_ip):
-                        main_portal_index = i
-                        break
-                else:
-                    # Else for the for loop. If there is no main portal found,
-                    # continue to try next initiator IP.
-                    continue
-                break
-
-        if main_portal_index is not None:
-            target_portals.insert(0, target_portals.pop(main_portal_index))
-        return target_portals
-
-    def _is_sp_unavailable_error(self, out):
-        error_pattern = ('(^Error.*Message.*End of data stream.*)|'
-                         '(.*Message.*connection refused.*)|'
-                         '(^Error.*Message.*Service Unavailable.*)|'
-                         '(^A network error occurred while trying to'
-                         ' connect.* )|'
-                         '(^Exception: Error occurred because of time out\s*)')
-        pattern = re.compile(error_pattern, re.DOTALL)
-        return pattern.match(out)
-
-    @utils.retry(exception.EMCSPUnavailableException, retries=5,
-                 interval=30, backoff_rate=1)
-    def command_execute(self, *command, **kwargs):
-        """Executes command against the VNX array.
-
-        When there is named parameter poll=False, the command will be sent
-        alone with option -np.
-        """
-        # NOTE: retry_disable need to be removed from kwargs
-        # before it pass to utils.execute, otherwise exception will thrown
-        retry_disable = kwargs.pop('retry_disable', False)
-        # get active ip before execute command
-        current_ip = self.active_storage_ip
-        out, rc = self._command_execute_on_active_ip(*command, **kwargs)
-        if not retry_disable and self._is_sp_unavailable_error(out):
-            # When active sp is unavailable, switch to another sp
-            # and set it to active and force a poll
-            if self._toggle_sp(current_ip):
-                LOG.debug('EMC: Command Exception: %(rc)s %(result)s. '
-                          'Retry on another SP.', {'rc': rc,
-                                                   'result': out})
-                # Raise exception for retry
-                raise exception.EMCSPUnavailableException(
-                    cmd=command, rc=rc, out=out.split('\n'))
-
-        return out, rc
-
-    def _command_execute_on_active_ip(self, *command, **kwargs):
-        if "check_exit_code" not in kwargs:
-            kwargs["check_exit_code"] = True
-        rc = 0
-        out = ""
-        need_poll = kwargs.pop('poll', True)
-        if "-np" not in command and not need_poll:
-            command = ("-np",) + command
-
-        try:
-            active_ip = (self.active_storage_ip,)
-            out, err = utils.execute(
-                *(self.command
-                  + active_ip
-                  + self.credentials
-                  + command),
-                **kwargs)
-        except processutils.ProcessExecutionError as pe:
-            rc = pe.exit_code
-            out = pe.stdout
-            out = out.replace('\n', '\\n')
-
-        LOG.debug('EMC: Command: %(command)s. Result: %(result)s.',
-                  {'command': self.command + active_ip + command,
-                   'result': out.replace('\n', '\\n')})
-
-        return out, rc
-
-    def _toggle_sp(self, current_ip):
-        """Toggle the storage IP.
-
-        :param current_ip: active ip before toggle
-        :returns True or False: if toggle happens, return True, otherwise False
-
-        """
-        @lockutils.synchronized(
-            'vnx-toggle-' + self.toggle_lock_name, 'vnx-toggle-', True)
-        def inner():
-            if self.secondary_storage_ip is None:
-                return False
-            self.active_storage_ip = (
-                self.secondary_storage_ip
-                if current_ip == self.primary_storage_ip
-                else self.primary_storage_ip)
-
-            LOG.info(_LI('Toggle san_ip from %(current)s to '
-                         '%(new)s.'),
-                     {'current': current_ip,
-                      'new': self.active_storage_ip})
-            return True
-        return inner()
-
-    def get_enablers_on_array(self, poll=False):
-        """The function would get all the enablers installed on array."""
-        enablers = []
-        cmd_list = ('ndu', '-list')
-        out, rc = self.command_execute(*cmd_list, poll=poll)
-
-        if rc != 0:
-            self._raise_cli_error(cmd_list, rc, out)
-        else:
-            enabler_pat = r'Name of the software package:\s*(\S+)\s*'
-            for m in re.finditer(enabler_pat, out):
-                enablers.append(m.groups()[0])
-
-        LOG.debug('Enablers on array %s.', enablers)
-        return enablers
-
-    def enable_or_disable_compression_on_lun(self, volumename, compression):
-        """The function will enable or disable the compression on lun."""
-        lun_data = self.get_lun_by_name(volumename)
-
-        command_compression_cmd = ('compression', '-' + compression,
-                                   '-l', lun_data['lun_id'],
-                                   '-ignoreThresholds', '-o')
-
-        out, rc = self.command_execute(*command_compression_cmd)
-
-        if 0 != rc:
-            self._raise_cli_error(command_compression_cmd, rc, out)
-        return rc, out
-
-    def deregister_initiator(self, initiator_uid):
-        """This function tries to deregister initiators on VNX."""
-        command_deregister = ('port', '-removeHBA',
-                              '-hbauid', initiator_uid,
-                              '-o')
-        out, rc = self.command_execute(*command_deregister)
-        return rc, out
-
-
-@decorate_all_methods(log_enter_exit)
-class EMCVnxCliBase(object):
-    """This class defines the functions to use the native CLI functionality."""
-
-    VERSION = '07.00.00'
-    stats = {'driver_version': VERSION,
-             'storage_protocol': None,
-             'vendor_name': 'EMC',
-             'volume_backend_name': None,
-             'compression_support': 'False',
-             'fast_support': 'False',
-             'deduplication_support': 'False',
-             'thin_provisioning_support': False,
-             'thick_provisioning_support': True}
-    REPLICATION_KEYS = ['san_ip', 'san_login', 'san_password',
-                        'san_secondary_ip',
-                        'storage_vnx_authentication_type',
-                        'storage_vnx_security_file_dir']
-    enablers = []
-    tmp_snap_prefix = 'tmp-snap-'
-    tmp_smp_for_backup_prefix = 'tmp-smp-'
-    snap_as_vol_prefix = 'snap-as-vol-'
-
-    def __init__(self, prtcl, configuration=None, active_backend_id=None):
-        self.protocol = prtcl
-        self.configuration = configuration
-        self.max_luns_per_sg = self.configuration.max_luns_per_storage_group
-        self.destroy_empty_sg = self.configuration.destroy_empty_storage_group
-        self.itor_auto_reg = self.configuration.initiator_auto_registration
-        self.itor_auto_dereg = self.configuration.initiator_auto_deregistration
-        self.check_max_pool_luns_threshold = (
-            self.configuration.check_max_pool_luns_threshold)
-        # if zoning_mode is fabric, use lookup service to build itor_tgt_map
-        self.zonemanager_lookup_service = None
-        zm_conf = config.Configuration(manager.volume_manager_opts)
-        if (zm_conf.safe_get('zoning_mode') == 'fabric' or
-                self.configuration.safe_get('zoning_mode') == 'fabric'):
-            from cinder.zonemanager import fc_san_lookup_service as fc_service
-            self.zonemanager_lookup_service = \
-                fc_service.FCSanLookupService(configuration=configuration)
-        self.max_retries = 5
-        if self.destroy_empty_sg:
-            LOG.warning(_LW("destroy_empty_storage_group: True. "
-                            "Empty storage group will be deleted "
-                            "after volume is detached."))
-        if not self.itor_auto_reg:
-            LOG.info(_LI("initiator_auto_registration: False. "
-                         "Initiator auto registration is not enabled. "
-                         "Please register initiator manually."))
-        self.hlu_set = set(range(1, self.max_luns_per_sg + 1))
-        self._client = self._build_client(active_backend_id)
-        self._active_backend_id = active_backend_id
-        # Create connection to the secondary storage device
-        self._mirror = self._build_mirror_view()
-        self.update_enabler_in_volume_stats()
-        # Fail the driver if configuration is not correct
-        if self._mirror:
-            if '-MirrorView/S' not in self.enablers:
-                no_enabler_err = _('MirrorView/S enabler is not installed.')
-                raise exception.VolumeBackendAPIException(data=no_enabler_err)
-        else:
-            self._mirror = None
-
-        conf_pools = self.configuration.safe_get("storage_vnx_pool_names")
-        self.storage_pools = self._get_managed_storage_pools(conf_pools)
-        self.array_serial = None
-        self.io_ports = self._parse_ports(self.configuration.io_port_list,
-                                          self.protocol)
-        if self.protocol == 'iSCSI':
-            self.iscsi_targets = self._client.get_iscsi_targets(
-                poll=True, io_ports=self.io_ports)
-        self.hlu_cache = {}
-        self.force_delete_lun_in_sg = (
-            self.configuration.force_delete_lun_in_storagegroup)
-        if self.force_delete_lun_in_sg:
-            LOG.warning(_LW("force_delete_lun_in_storagegroup=True"))
-
-        self.max_over_subscription_ratio = (
-            self.configuration.max_over_subscription_ratio)
-        self.ignore_pool_full_threshold = (
-            self.configuration.ignore_pool_full_threshold)
-        if self.ignore_pool_full_threshold:
-            LOG.warning(_LW("ignore_pool_full_threshold: True. "
-                            "LUN creation will still be forced "
-                            "even if the pool full threshold is exceeded."))
-        self.reserved_percentage = self.configuration.reserved_percentage
-
-    def _get_managed_storage_pools(self, pools):
-        storage_pools = set()
-        if pools:
-            storage_pools = set([po.strip() for po in pools.split(",")])
-            array_pools = self._client.get_pool_list(
-                [VNXPoolProperties.POOL_STATE], False)
-            array_pools = set([po['pool_name'] for po in array_pools])
-            un_exist_pools = storage_pools.difference(array_pools)
-            storage_pools.difference_update(un_exist_pools)
-            if not storage_pools:
-                msg = _("All the specified storage pools to be managed "
-                        "do not exist. Please check your configuration. "
-                        "Non-existent pools: %s") % ",".join(un_exist_pools)
-                raise exception.VolumeBackendAPIException(data=msg)
-            if un_exist_pools:
-                LOG.warning(_LW("The following specified storage pools "
-                                "do not exist: %(unexist)s. "
-                                "This host will only manage the storage "
-                                "pools: %(exist)s"),
-                            {'unexist': ",".join(un_exist_pools),
-                             'exist': ",".join(storage_pools)})
-            else:
-                LOG.debug("This host will manage the storage pools: %s.",
-                          ",".join(storage_pools))
-        else:
-            LOG.debug("No storage pool is configured. This host will "
-                      "manage all the pools on the VNX system.")
-        return storage_pools
-
-    def _parse_ports(self, io_port_list, protocol):
-        """Validates IO port format, supported format is a-1, b-3, a-3-0."""
-        if not io_port_list or io_port_list == '*':
-            return None
-        ports = re.split('\s*,\s*', io_port_list)
-        valid_ports = []
-        invalid_ports = []
-        if 'iSCSI' == protocol:
-            out, rc = self._client.get_connection_getport_output()
-            for port in ports:
-                port_tuple = port.split('-')
-                if (re.match('[abAB]-\d+-\d+$', port) and
-                        self._validate_iscsi_port(
-                            port_tuple[0], port_tuple[1], port_tuple[2], out)):
-                    valid_ports.append(
-                        (port_tuple[0].upper(), int(port_tuple[1]),
-                         int(port_tuple[2])))
-                else:
-                    invalid_ports.append(port)
-        elif 'FC' == protocol:
-            out, rc = self._client.get_port_output()
-            for port in ports:
-                port_tuple = port.split('-')
-                if re.match('[abAB]-\d+$', port) and self._validate_fc_port(
-                        port_tuple[0], port_tuple[1], out):
-                    valid_ports.append(
-                        (port_tuple[0].upper(), int(port_tuple[1])))
-                else:
-                    invalid_ports.append(port)
-        if len(invalid_ports) > 0:
-            msg = _('Invalid %(protocol)s ports %(port)s specified '
-                    'for io_port_list.') % {'protocol': self.protocol,
-                                            'port': ','.join(invalid_ports)}
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        return valid_ports
-
-    def _validate_iscsi_port(self, sp, port_id, vport_id, cmd_output):
-        """Validates whether the iSCSI port is existed on VNX."""
-        sp_port_pattern = (r'SP:\s+%(sp)s\nPort ID:\s+%(port_id)s\n' %
-                           {'sp': sp.upper(), 'port_id': port_id})
-        sp_port_fields = re.split(sp_port_pattern, cmd_output)
-        if len(sp_port_fields) < 2:
-            return False
-        sp_port_info = re.split('SP:\s+(A|B)', sp_port_fields[1])[0]
-        vport_pattern = '\nVirtual Port ID:\s+%s\nVLAN ID:' % vport_id
-        return re.search(vport_pattern, sp_port_info) is not None
-
-    def _validate_fc_port(self, sp, port_id, cmd_output):
-        """Validates whether the FC port is existed on VNX"""
-        fc_pattern = ('SP Name:\s*SP\s*' + sp.upper() +
-                      '\nSP Port ID:\s*' + str(port_id) +
-                      '\nSP UID:\s*((\w\w:){15}(\w\w))')
-        return re.search(fc_pattern, cmd_output)
-
-    def get_array_serial(self):
-        if not self.array_serial:
-            self.array_serial = self._client.get_array_serial()
-        return self.array_serial['array_serial']
-
-    def _construct_store_spec(self, volume, snapshot):
-        if snapshot['cgsnapshot_id']:
-            # Snapshot is part of cg snapshot
-            snapshot_name = snapshot['cgsnapshot_id']
-        else:
-            snapshot_name = snapshot['name']
-        new_snap_name = snapshot_name
-        if self._is_snapcopy_enabled(volume):
-            new_snap_name = self._construct_snap_name(volume)
-        pool_name = self.get_target_storagepool(volume, snapshot['volume'])
-        volume_name = volume['name']
-        volume_size = snapshot['volume_size']
-        dest_volume_name = volume_name + '_dest'
-        specs = self.get_volumetype_extraspecs(volume)
-        provisioning, tiering = self._get_extra_spec_value(specs)
-        store_spec = {
-            'volume': volume,
-            'src_snap_name': snapshot_name,
-            'new_snap_name': new_snap_name,
-            'dest_vol_name': dest_volume_name,
-            'pool_name': pool_name,
-            'provisioning': provisioning,
-            'tiering': tiering,
-            'volume_size': volume_size,
-            'client': self._client,
-            'ignore_pool_full_threshold': self.ignore_pool_full_threshold
-        }
-        return store_spec
-
-    def _construct_snap_name(self, volume):
-        """Returns snapshot or cgsnapshot name."""
-        if self._is_snapcopy_enabled(volume):
-            return self.snap_as_vol_prefix + six.text_type(volume['name_id'])
-        else:
-            return self.tmp_snap_prefix + six.text_type(volume['name_id'])
-
-    def _construct_tmp_smp_name(self, snapshot):
-        return self.tmp_smp_for_backup_prefix + snapshot.id
-
-    def _construct_mirror_name(self, volume):
-        return 'mirror_' + volume.id
-
-    def create_volume(self, volume):
-        """Creates a EMC volume."""
-        volume_size = volume['size']
-        volume_name = volume['name']
-
-        self._volume_creation_check(volume)
-        volume_metadata = self._get_volume_metadata(volume)
-        # defining CLI command
-        specs = self.get_volumetype_extraspecs(volume)
-        pool = self.get_target_storagepool(volume)
-        provisioning, tiering = self._get_extra_spec_value(specs)
-        if 'snapcopy' in volume_metadata:
-            # We ignore snapcopy metadata when creating volume
-            LOG.warning(_LW('snapcopy metadata is ignored when'
-                            ' creating volume.'))
-
-        volume_metadata['snapcopy'] = 'False'
-        LOG.info(_LI('Create Volume: %(volume)s  Size: %(size)s '
-                     'pool: %(pool)s '
-                     'provisioning: %(provisioning)s '
-                     'tiering: %(tiering)s '),
-                 {'volume': volume_name,
-                  'size': volume_size,
-                  'pool': pool,
-                  'provisioning': provisioning,
-                  'tiering': tiering})
-
-        data = self._client.create_lun_with_advance_feature(
-            pool, volume_name, volume_size,
-            provisioning, tiering, volume['consistencygroup_id'],
-            ignore_thresholds=self.ignore_pool_full_threshold,
-            poll=False)
-        pl = self._build_provider_location(lun_id=data['lun_id'],
-                                           base_lun_name=volume['name'])
-        # Setup LUN Replication/MirrorView between devices,
-        # secondary LUN will inherit properties from primary LUN.
-        rep_update, metadata_update = self.setup_lun_replication(
-            volume, data['lun_id'], provisioning, tiering)
-        volume_metadata.update(metadata_update)
-        model_update = {'provider_location': pl,
-                        'metadata': volume_metadata}
-        model_update.update(rep_update)
-        return model_update
-
-    def _volume_creation_check(self, volume):
-        """Checks on extra spec before the volume can be created."""
-        specs = self.get_volumetype_extraspecs(volume)
-        self._get_and_validate_extra_specs(specs)
-
-    def _get_migration_rate(self, volume):
-        metadata = self._get_volume_metadata(volume)
-        rate = metadata.get('migrate_rate', VNXMigrationRate.HIGH)
-        if rate:
-            if rate.lower() in VNXMigrationRate.get_all():
-                return rate.lower()
-            else:
-                LOG.warning(_LW('Unknown migration rate specified, '
-                                'using [high] as migration rate.'))
-
-        return VNXMigrationRate.HIGH
-
-    def _get_and_validate_extra_specs(self, specs):
-        """Checks on extra specs combinations."""
-        if "storagetype:pool" in specs:
-            LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
-                            "since driver version 5.1.0. This key will be "
-                            "ignored."))
-
-        provisioning, tiering = self._get_extra_spec_value(specs)
-        # step 1: check extra spec value
-        if provisioning:
-            self._check_extra_spec_value(
-                provisioning,
-                VNXProvisionEnum.get_all())
-        if tiering:
-            self._check_extra_spec_value(
-                tiering,
-                VNXTieringEnum.get_all())
-
-        # step 2: check extra spec combination
-        self._check_extra_spec_combination([provisioning, tiering])
-        return provisioning, tiering
-
-    def _check_extra_spec_value(self, extra_spec, valid_values):
-        """Checks whether an extra spec's value is valid."""
-
-        if not extra_spec or not valid_values:
-            LOG.error(_LE('The given extra_spec or valid_values is None.'))
-        elif extra_spec not in valid_values:
-            msg = _("The extra_spec: %s is invalid.") % extra_spec
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        return
-
-    def _get_extra_spec_value(self, extra_specs):
-        """Gets EMC extra spec values."""
-        provisioning = 'thick'
-        if self._client.provisioning_specs[0] in extra_specs:
-            provisioning = (
-                extra_specs[self._client.provisioning_specs[0]].lower())
-            if self._client.provisioning_specs[1] in extra_specs:
-                LOG.warning(_LW("Both 'storagetype:prvosioning' and "
-                                "'provisioning:type' are set in the "
-                                "extra specs, the value of "
-                                "'provisioning:type' will be used. The "
-                                "key 'storagetype:provisioning' may be "
-                                "deprecated in the next release."))
-        elif self._client.provisioning_specs[1] in extra_specs:
-            provisioning = (
-                extra_specs[self._client.provisioning_specs[1]].lower())
-            LOG.warning(_LW("Extra spec key 'storagetype:provisioning' may "
-                            "be deprecated in the next release. It is "
-                            "recommended to use extra spec key "
-                            "'provisioning:type' instead."))
-        tiering = extra_specs.get(
-            self._client.tiering_spec, 'None').lower()
-
-        return provisioning, tiering
-
-    def _check_extra_spec_combination(self, spec_values):
-        """Checks whether extra spec combination is valid."""
-        enablers = self.enablers
-        # check provisioning, tiering
-        # deduplicated and tiering can not be both enabled
-        provisioning, tiering = spec_values
-        if provisioning == 'deduplicated' and tiering != 'none':
-            msg = _("deduplicated and auto tiering can't be both enabled.")
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        elif provisioning == 'compressed' and '-Compression' not in enablers:
-            msg = _("Compression Enabler is not installed. "
-                    "Can not create compressed volume.")
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        elif provisioning == 'deduplicated' and \
-                '-Deduplication' not in enablers:
-            msg = _("Deduplication Enabler is not installed."
-                    " Can not create deduplicated volume")
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        elif provisioning in ['thin', 'deduplicated', 'compressed'] and \
-                '-ThinProvisioning' not in enablers:
-            msg = _("ThinProvisioning Enabler is not installed. "
-                    "Can not create thin volume")
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        elif tiering != 'none' and '-FAST' not in enablers:
-            msg = _("FAST VP Enabler is not installed. "
-                    "Can't set tiering policy for the volume")
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        return
-
-    def delete_volume(self, volume, force_delete=False):
-        """Deletes an EMC volume."""
-        if self._is_replication_enabled(volume):
-            self.cleanup_lun_replication(volume)
-        try:
-            self._client.delete_lun(volume['name'])
-        except exception.EMCVnxCLICmdError as ex:
-            orig_out = "\n".join(ex.kwargs["out"])
-            if ((force_delete or self.force_delete_lun_in_sg) and
-                    VNXError.has_error(orig_out, VNXError.LUN_IN_SG)):
-                LOG.warning(_LW('LUN corresponding to %s is still '
-                                'in some Storage Groups.'
-                                'Try to bring the LUN out of Storage Groups '
-                                'and retry the deletion.'),
-                            volume['name'])
-                lun_id = self.get_lun_id(volume)
-                for hlu, sg in self._client.get_hlus(lun_id):
-                    self._client.remove_hlu_from_storagegroup(hlu, sg)
-                self._client.delete_lun(volume['name'])
-            else:
-                with excutils.save_and_reraise_exception():
-                    # Reraise the original exception
-                    pass
-        if volume['provider_location']:
-            lun_type = self.extract_provider_location(
-                volume['provider_location'], 'type')
-            if lun_type == 'smp':
-                self._client.delete_snapshot(
-                    self._construct_snap_name(volume))
-
-    def extend_volume(self, volume, new_size):
-        """Extends an EMC volume."""
-
-        try:
-            self._client.expand_lun_and_wait(volume['name'], new_size)
-        except exception.EMCVnxCLICmdError as ex:
-            with excutils.save_and_reraise_exception(ex) as ctxt:
-                out = "\n".join(ex.kwargs["out"])
-                if VNXError.has_error(out, VNXError.LUN_IS_PREPARING):
-                    # The error means the operation cannot be performed
-                    # because the LUN is 'Preparing'. Wait for a while
-                    # so that the LUN may get out of the transitioning
-                    # state.
-                    LOG.warning(_LW("LUN %(name)s is not ready for extension: "
-                                    "%(out)s"),
-                                {'name': volume['name'], 'out': out})
-                    self._client.wait_until_lun_ready_for_ops(volume['name'])
-                    self._client.expand_lun_and_wait(volume['name'], new_size)
-                    ctxt.reraise = False
-                else:
-                    ctxt.reraise = True
-
-    def _get_original_status(self, volume):
-        if not volume['volume_attachment']:
-            return 'available'
-        else:
-            return 'in-use'
-
-    def _is_valid_for_storage_assisted_migration(
-            self, volume, host, new_type=None):
-        """Check the src and dest volume to decide the migration type."""
-        false_ret = (False, None)
-
-        if 'location_info' not in host['capabilities']:
-            LOG.warning(_LW("Failed to get target_pool_name and "
-                            "target_array_serial. 'location_info' "
-                            "is not in host['capabilities']."))
-            return false_ret
-
-        # mandatory info should be ok
-        info = host['capabilities']['location_info']
-        LOG.debug("Host for migration is %s.", info)
-        try:
-            info_detail = info.split('|')
-            target_pool_name = info_detail[0]
-            target_array_serial = info_detail[1]
-        except AttributeError:
-            LOG.warning(_LW("Error on parsing target_pool_name/"
-                            "target_array_serial."))
-            return false_ret
-
-        # source and destination should be on same array
-        array_serial = self.get_array_serial()
-        if target_array_serial != array_serial:
-            LOG.debug('Skip storage-assisted migration because '
-                      'target and source backend are not managing'
-                      'the same array.')
-            return false_ret
-
-        if len(target_pool_name) == 0:
-            # Destination host is using a legacy driver
-            LOG.warning(_LW("Didn't get the pool information of the "
-                            "host %s. Storage assisted Migration is not "
-                            "supported. The host may be using a legacy "
-                            "driver."),
-                        host['name'])
-            return false_ret
-
-        # Same protocol should be used if volume is in-use
-        if host['capabilities']['storage_protocol'] != self.protocol \
-                and self._get_original_status(volume) == 'in-use':
-            LOG.debug('Skip storage-assisted migration because '
-                      'in-use volume can not be '
-                      'migrate between different protocols.')
-            return false_ret
-
-        return (True, target_pool_name)
-
-    def migrate_volume(self, ctxt, volume, host, new_type=None):
-        """Leverage the VNX on-array migration functionality.
-
-        This method is invoked at the source backend.
-        """
-        false_ret = (False, None)
-        is_valid, target_pool_name = \
-            self._is_valid_for_storage_assisted_migration(
-                volume, host, new_type)
-        if not is_valid:
-            return false_ret
-
-        specs = None
-        if new_type is not None:
-            specs = new_type.get('extra_specs')
-
-        return self._migrate_volume(volume, target_pool_name, specs)
-
-    def _migrate_volume(self,
-                        volume,
-                        target_pool_name,
-                        type_specs=None,
-                        src_id=None):
-        LOG.debug("Starting real storage-assisted migration...")
-        # first create a new volume with same name and size of source volume
-        volume_name = volume['name']
-        new_volume_name = "%(src)s-%(ts)s" % {'src': volume_name,
-                                              'ts': int(time.time())}
-
-        if src_id is None:
-            src_id = self.get_lun_id(volume)
-
-        if type_specs is not None:
-            provisioning, tiering = self._get_extra_spec_value(
-                type_specs)
-        else:
-            provisioning, tiering = self._get_extra_spec_value(
-                self.get_volumetype_extraspecs(volume))
-
-        data = self._client.create_lun_with_advance_feature(
-            target_pool_name, new_volume_name, volume['size'],
-            provisioning, tiering,
-            ignore_thresholds=self.ignore_pool_full_threshold)
-
-        dst_id = data['lun_id']
-        dst_wwn = data['wwn']
-        moved = self._client.migrate_lun_with_verification(
-            src_id, dst_id, new_volume_name, dst_wwn,
-            rate=self._get_migration_rate(volume))
-
-        lun_type = self.extract_provider_location(
-            volume['provider_location'], 'type')
-        # A smp will become a LUN after migration
-        if lun_type == 'smp':
-            self._client.delete_snapshot(
-                self._construct_snap_name(volume))
-
-        pl = self._build_provider_location(src_id, 'lun',
-                                           base_lun_name=volume['name'])
-        volume_metadata = self._get_volume_metadata(volume)
-        volume_metadata['snapcopy'] = 'False'
-        model_update = {'provider_location': pl,
-                        'metadata': volume_metadata}
-        return moved, model_update
-
-    def update_migrated_volume(self, context, volume, new_volume,
-                               original_volume_status):
-        """Updates metadata after host-assisted migration."""
-
-        lun_type = self.extract_provider_location(
-            new_volume['provider_location'], 'type')
-        volume_metadata = self._get_volume_metadata(volume)
-        model_update = {'provider_location':
-                        new_volume['provider_location']}
-        if lun_type:
-            volume_metadata['snapcopy'] = (
-                'True' if lun_type == 'smp' else 'False')
-            model_update['metadata'] = volume_metadata
-        return model_update
-
-    def retype(self, ctxt, volume, new_type, diff, host):
-        new_specs = new_type['extra_specs']
-
-        new_provisioning, new_tiering = (
-            self._get_and_validate_extra_specs(new_specs))
-
-        # Check what changes are needed
-        changes = self.determine_changes_when_retype(volume, new_type, host)
-
-        if self._client.check_lun_has_snap(self.get_lun_id(volume)):
-            # Reject if volume has snapshot when migration is needed
-            if changes['migration']:
-                LOG.debug('Driver is not able to do retype because the volume '
-                          '%s has a snapshot which is forbidden to migrate.',
-                          volume['id'])
-                return False
-            # Reject if volume has snapshot when trying to
-            # turn on compression
-            if changes['compression_on']:
-                LOG.debug('Driver is not able to do retype because the volume '
-                          '%s has a snapshot which is forbidden to turn on '
-                          'compression.', volume['id'])
-                return False
-
-        if changes['migration']:
-            # Check whether the migration is valid
-            is_valid, target_pool_name = (
-                self._is_valid_for_storage_assisted_migration(
-                    volume, host, new_type))
-            if is_valid:
-                specs = None
-                if new_type is not None:
-                    specs = new_type.get('extra_specs')
-                moved, model_update = self._migrate_volume(
-                    volume, target_pool_name, specs)
-                if moved:
-                    return moved, model_update
-                else:
-                    LOG.warning(_LW('Storage-assisted migration failed during '
-                                    'retype.'))
-                    return False
-            else:
-                # Migration is invalid
-                LOG.debug('Driver is not able to do retype due to '
-                          'storage-assisted migration is not valid '
-                          'in this situation.')
-                return False
-        if changes['compression_on']:
-            # Turn on compression feature on the volume
-            self._client.enable_or_disable_compression_on_lun(
-                volume['name'], 'on')
-        if changes['tiering']:
-            # Modify lun to change tiering policy
-            self._client.modify_lun_tiering_by_name(volume['name'],
-                                                    new_tiering)
-        return True
-
-    def determine_changes_when_retype(self, volume, new_type, host):
-        changes = {
-            'migration': False,
-            'tiering': False,
-            'compression_on': False
-        }
-
-        old_specs = self.get_volumetype_extraspecs(volume)
-        old_provisioning, old_tiering = (
-            self._get_extra_spec_value(old_specs))
-
-        new_specs = new_type['extra_specs']
-        new_provisioning, new_tiering = (
-            self._get_extra_spec_value(new_specs))
-
-        lun_type = self.extract_provider_location(
-            volume['provider_location'], 'type')
-
-        if volume['host'] != host['host']:
-            changes['migration'] = True
-        elif old_provisioning != new_provisioning:
-            if (old_provisioning in ['thin', 'thick'] and
-                    new_provisioning == 'compressed'):
-                changes['compression_on'] = True
-            else:
-                changes['migration'] = True
-        if lun_type == 'smp':
-            changes['migration'] = True
-
-        if new_tiering != old_tiering:
-            changes['tiering'] = True
-        return changes
-
-    def determine_all_enablers_exist(self, enablers):
-        """Determine all wanted enablers whether exist."""
-        wanted = ['-ThinProvisioning',
-                  '-Deduplication',
-                  '-FAST',
-                  '-Compression']
-        for each in wanted:
-            if each not in enablers:
-                return False
-        return True
-
-    def _build_pool_stats(self, pool, pool_feature=None):
-        pool_stats = {
-            'pool_name': pool['pool_name'],
-            'total_capacity_gb': pool['total_capacity_gb'],
-            'provisioned_capacity_gb': (pool['provisioned_capacity_gb'])
-        }
-
-        # Handle pool state Initializing, Ready, Faulted, Offline or Deleting.
-        if pool['state'] in ('Initializing', 'Offline', 'Deleting'):
-            pool_stats['free_capacity_gb'] = 0
-            LOG.warning(_LW("Storage Pool '%(pool)s' is '%(state)s'."),
-                        {'pool': pool_stats['pool_name'],
-                         'state': pool['state']})
-        else:
-            pool_stats['free_capacity_gb'] = pool['free_capacity_gb']
-            if self.check_max_pool_luns_threshold:
-                pool_feature = self._client.get_pool_feature_properties(
-                    poll=False) if not pool_feature else pool_feature
-                if (pool_feature['max_pool_luns'] <=
-                        pool_feature['total_pool_luns']):
-                    LOG.warning(_LW("Maximum number of Pool LUNs, %s, "
-                                    "have been created. "
-                                    "No more LUN creation can be done."),
-                                pool_feature['max_pool_luns'])
-                    pool_stats['free_capacity_gb'] = 0
-
-        if not self.reserved_percentage:
-            # Since the admin is not sure of what value is proper,
-            # the driver will calculate the recommended value.
-
-            # Some extra capacity will be used by meta data of pool LUNs.
-            # The overhead is about LUN_Capacity * 0.02 + 3 GB
-            # reserved_percentage will be used to make sure the scheduler
-            # takes the overhead into consideration.
-            # Assume that all the remaining capacity is to be used to create
-            # a thick LUN, reserved_percentage is estimated as follows:
-            reserved = (((0.02 * pool['free_capacity_gb'] + 3) /
-                         (1.02 * pool['total_capacity_gb'])) * 100)
-            # Take pool full threshold into consideration
-            if not self.ignore_pool_full_threshold:
-                reserved += 100 - pool['pool_full_threshold']
-            pool_stats['reserved_percentage'] = int(math.ceil(min(reserved,
-                                                                  100)))
-        else:
-            pool_stats['reserved_percentage'] = self.reserved_percentage
-
-        array_serial = self.get_array_serial()
-        pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
-                                       {'pool_name': pool['pool_name'],
-                                        'array_serial': array_serial})
-        # Check if this pool's fast_cache is enabled
-        if 'fast_cache_enabled' not in pool:
-            pool_stats['fast_cache_enabled'] = 'False'
-        else:
-            pool_stats['fast_cache_enabled'] = pool['fast_cache_enabled']
-
-        # Copy advanced feature stats from backend stats
-        pool_stats['compression_support'] = self.stats['compression_support']
-        pool_stats['fast_support'] = self.stats['fast_support']
-        pool_stats['deduplication_support'] = (
-            self.stats['deduplication_support'])
-        # Thin provisioning is supported on VNX pools only when
-        # ThinProvisioning Enabler software is installed on VNX,
-        # and thick provisioning is always supported on VNX pools.
-        pool_stats['thin_provisioning_support'] = (
-            self.stats['thin_provisioning_support'])
-        pool_stats['thick_provisioning_support'] = True
-        pool_stats['consistencygroup_support'] = (
-            self.stats['consistencygroup_support'])
-        pool_stats['max_over_subscription_ratio'] = (
-            self.max_over_subscription_ratio)
-        # Add replication V2 support
-        targets = []
-        if self._mirror:
-            pool_stats['replication_enabled'] = True
-            pool_stats['replication_count'] = 1
-            pool_stats['replication_type'] = ['sync']
-            for device in self.configuration.replication_device:
-                targets.append(device['backend_id'])
-        else:
-            pool_stats['replication_enabled'] = False
-        pool_stats['replication_targets'] = targets
-        return pool_stats
-
-    def update_enabler_in_volume_stats(self):
-        """Updates the enabler information in stats."""
-        if not self.determine_all_enablers_exist(self.enablers):
-            self.enablers = self._client.get_enablers_on_array()
-
-        self.stats['compression_support'] = (
-            'True' if '-Compression' in self.enablers else 'False')
-
-        self.stats['fast_support'] = (
-            'True' if '-FAST' in self.enablers else 'False')
-
-        self.stats['deduplication_support'] = (
-            'True' if '-Deduplication' in self.enablers else 'False')
-
-        self.stats['thin_provisioning_support'] = (
-            True if '-ThinProvisioning' in self.enablers else False)
-
-        self.stats['consistencygroup_support'] = (
-            'True' if '-VNXSnapshots' in self.enablers else 'False')
-
-        return self.stats
-
-    def create_snapshot(self, snapshot):
-        """Creates a snapshot."""
-
-        snapshot_name = snapshot['name']
-        volume_name = snapshot['volume_name']
-        volume = snapshot['volume']
-        LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
-                 {'snapshot': snapshot_name,
-                  'volume': volume_name})
-        lun_id = self.get_lun_id(volume)
-
-        try:
-            self._client.create_snapshot(lun_id, snapshot_name)
-        except exception.EMCVnxCLICmdError as ex:
-            with excutils.save_and_reraise_exception(ex) as ctxt:
-                out = "\n".join(ex.kwargs["out"])
-                if VNXError.has_error(out, VNXError.LUN_IS_PREPARING):
-                    # The error means the operation cannot be performed
-                    # because the LUN is 'Preparing'. Wait for a while
-                    # so that the LUN may get out of the transitioning
-                    # state.
-                    LOG.warning(_LW("LUN %(name)s is not ready for snapshot: "
-                                    "%(out)s"),
-                                {'name': volume_name, 'out': out})
-                    self._client.wait_until_lun_ready_for_ops(volume['name'])
-                    self._client.create_snapshot(lun_id, snapshot_name)
-                    ctxt.reraise = False
-                else:
-                    ctxt.reraise = True
-
-    def delete_snapshot(self, snapshot):
-        """Deletes a snapshot."""
-
-        snapshot_name = snapshot['name']
-
-        LOG.info(_LI('Delete Snapshot: %(snapshot)s'),
-                 {'snapshot': snapshot_name})
-
-        self._client.delete_snapshot(snapshot_name)
-
-    def create_volume_from_snapshot(self, volume, snapshot):
-        """Constructs a work flow to create a volume from snapshot.
-
-        This flow will do the following:
-
-        1. Create a snap mount point (SMP) for the snapshot.
-        2. Attach the snapshot to the SMP created in the first step.
-        3. Create a temporary lun prepare for migration.
-           (Skipped if snapcopy='true')
-        4. Start a migration between the SMP and the temp lun.
-           (Skipped if snapcopy='true')
-        """
-        self._volume_creation_check(volume)
-        flow_name = 'create_volume_from_snapshot'
-        base_lun_name = self._get_base_lun_name(snapshot.volume)
-        work_flow = linear_flow.Flow(flow_name)
-        store_spec = self._construct_store_spec(volume, snapshot)
-        store_spec.update({'base_lun_name': base_lun_name})
-        volume_metadata = self._get_volume_metadata(volume)
-        rep_update = {}
-        if self._is_snapcopy_enabled(volume):
-            if self._is_replication_enabled(volume):
-                err_msg = _("Unable to enable replication "
-                            "and snapcopy at the same time.")
-                raise exception.VolumeBackendAPIException(data=err_msg)
-            work_flow.add(CopySnapshotTask(),
-                          AllowReadWriteOnSnapshotTask(),
-                          CreateSMPTask(),
-                          AttachSnapTask())
-            flow_engine = taskflow.engines.load(work_flow,
-                                                store=store_spec)
-            flow_engine.run()
-            new_lun_id = flow_engine.storage.fetch('new_smp_data')['lun_id']
-            pl = self._build_provider_location(
-                new_lun_id, 'smp', base_lun_name)
-            volume_metadata['snapcopy'] = 'True'
-        else:
-            store_spec.update({'rate': self._get_migration_rate(volume)})
-            work_flow.add(CreateSMPTask(),
-                          AttachSnapTask(),
-                          CreateDestLunTask(),
-                          MigrateLunTask())
-            flow_engine = taskflow.engines.load(work_flow,
-                                                store=store_spec)
-            flow_engine.run()
-            new_lun_id = flow_engine.storage.fetch('new_lun_id')
-            pl = self._build_provider_location(
-                new_lun_id, 'lun', volume['name'])
-            volume_metadata['snapcopy'] = 'False'
-            # Setup LUN Replication/MirrorView between devices,
-            # secondary LUN will inherit properties from primary LUN.
-            rep_update, metadata_update = self.setup_lun_replication(
-                volume, new_lun_id,
-                store_spec['provisioning'],
-                store_spec['tiering'])
-            volume_metadata.update(metadata_update)
-        model_update = {'provider_location': pl,
-                        'metadata': volume_metadata}
-        model_update.update(rep_update)
-        volume_host = volume['host']
-        host = vol_utils.extract_host(volume_host, 'backend')
-        host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
-        if volume_host != host_and_pool:
-            model_update['host'] = host_and_pool
-
-        return model_update
-
-    def create_cloned_volume(self, volume, src_vref):
-        """Creates a clone of the specified volume."""
-        self._volume_creation_check(volume)
-        base_lun_name = self._get_base_lun_name(src_vref)
-        source_lun_id = self.get_lun_id(src_vref)
-        volume_size = volume['size']
-        source_volume_name = src_vref['name']
-        consistencygroup_id = src_vref['consistencygroup_id']
-        cgsnapshot_name = None
-        if consistencygroup_id:
-            cgsnapshot_name = self._construct_snap_name(volume)
-
-        snapshot_name = self._construct_snap_name(volume)
-        snapshot = {
-            'name': snapshot_name,
-            'volume_name': source_volume_name,
-            'volume_size': volume_size,
-            'volume': src_vref,
-            'cgsnapshot_id': cgsnapshot_name,
-            'consistencygroup_id': consistencygroup_id,
-            'id': cgsnapshot_name
-        }
-        flow_name = 'create_cloned_volume'
-        store_spec = self._construct_store_spec(volume, snapshot)
-        work_flow = linear_flow.Flow(flow_name)
-        store_spec.update({'snapshot': snapshot})
-        store_spec.update({'source_lun_id': source_lun_id})
-        store_spec.update({'base_lun_name': base_lun_name})
-        volume_metadata = self._get_volume_metadata(volume)
-        rep_update = {}
-        if self._is_snapcopy_enabled(volume):
-            # snapcopy feature enabled
-            if self._is_replication_enabled(volume):
-                err_msg = _("Unable to enable replication "
-                            "and snapcopy at the same time.")
-                raise exception.VolumeBackendAPIException(data=err_msg)
-            work_flow.add(CreateSnapshotTask(),
-                          CreateSMPTask(),
-                          AttachSnapTask())
-            flow_engine = taskflow.engines.load(work_flow,
-                                                store=store_spec)
-            flow_engine.run()
-            new_lun_id = flow_engine.storage.fetch('new_smp_data')['lun_id']
-            pl = self._build_provider_location(
-                new_lun_id, 'smp', base_lun_name)
-        else:
-            # snapcopy feature disabled, need to migrate
-            store_spec.update({'rate': self._get_migration_rate(volume)})
-            work_flow.add(CreateSnapshotTask(),
-                          CreateSMPTask(),
-                          AttachSnapTask(),
-                          CreateDestLunTask(),
-                          MigrateLunTask())
-            flow_engine = taskflow.engines.load(work_flow,
-                                                store=store_spec)
-            flow_engine.run()
-            new_lun_id = flow_engine.storage.fetch('new_lun_id')
-            # Delete temp Snapshot
-            if consistencygroup_id:
-                self._client.delete_cgsnapshot(snapshot['id'])
-            else:
-                self.delete_snapshot(snapshot)
-            # After migration, volume's base lun is itself
-            pl = self._build_provider_location(
-                new_lun_id, 'lun', volume['name'])
-            volume_metadata['snapcopy'] = 'False'
-
-            # Setup LUN Replication/MirrorView between devices,
-            # secondary LUN will inherit properties from primary LUN.
-            rep_update, metadata_update = self.setup_lun_replication(
-                volume, new_lun_id,
-                store_spec['provisioning'],
-                store_spec['tiering'])
-            volume_metadata.update(metadata_update)
-
-        model_update = {'provider_location': pl,
-                        'metadata': volume_metadata}
-        model_update.update(rep_update)
-        volume_host = volume['host']
-        host = vol_utils.extract_host(volume_host, 'backend')
-        host_and_pool = vol_utils.append_host(host, store_spec['pool_name'])
-        if volume_host != host_and_pool:
-            model_update['host'] = host_and_pool
-
-        return model_update
-
-    @staticmethod
-    def _get_volume_metadata(volume):
-        # Since versionedobjects is partially merged, metadata
-        # may come from 'volume_metadata' or 'metadata', here
-        # we need to take care both of them.
-        volume_metadata = {}
-        if 'volume_metadata' in volume:
-            for metadata in volume['volume_metadata']:
-                volume_metadata[metadata['key']] = metadata['value']
-            return volume_metadata
-        return volume['metadata'] if 'metadata' in volume else {}
-
-    def _is_snapcopy_enabled(self, volume):
-        meta = self._get_volume_metadata(volume)
-        return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true'
-
-    def _get_base_lun_name(self, volume):
-        """Returns base LUN name for SMP or LUN."""
-        base_name = self.extract_provider_location(
-            volume['provider_location'], 'base_lun_name')
-        if base_name is None or base_name == 'None':
-            return volume['name']
-        return base_name
-
-    def dumps_provider_location(self, pl_dict):
-        return '|'.join([k + '^' + pl_dict[k] for k in pl_dict])
-
-    def _build_provider_location(self, lun_id, type='lun', base_lun_name=None):
-        """Builds provider_location for volume or snapshot.
-
-        :param lun_id: LUN ID in VNX
-        :param type: 'lun' or 'smp'
-        :param base_lun_name: primary LUN name,
-                              it will be used when creating snap lun
-        """
-        pl_dict = {'system': self.get_array_serial(),
-                   'type': type,
-                   'id': six.text_type(lun_id),
-                   'base_lun_name': six.text_type(base_lun_name),
-                   'version': self.VERSION}
-        return self.dumps_provider_location(pl_dict)
-
-    def _update_provider_location(self, provider_location,
-                                  key=None, value=None):
-        pl_dict = {tp.split('^')[0]: tp.split('^')[1]
-                   for tp in provider_location.split('|')}
-        pl_dict[key] = value
-        return self.dumps_provider_location(pl_dict)
-
-    @staticmethod
-    def extract_provider_location(provider_location, key='id'):
-        """Extracts value of the specified field from provider_location string.
-
-        :param provider_location: provider_location string
-        :param key: field name of the value that to be extracted
-        :return: value of the specified field if it exists, otherwise,
-                 None is returned
-        """
-        ret = None
-        if provider_location is not None:
-            kvps = provider_location.split('|')
-            for kvp in kvps:
-                fields = kvp.split('^')
-                if len(fields) == 2 and fields[0] == key:
-                    ret = fields[1]
-        return ret
-
-    def _consistencygroup_creation_check(self, group):
-        """Check extra spec for consistency group."""
-
-        if group.get('volume_type_id') is not None:
-            for id in group['volume_type_id'].split(","):
-                if id:
-                    provisioning, tiering = (
-                        self._get_extra_spec_value(
-                            volume_types.get_volume_type_extra_specs(id)))
-                    if provisioning == 'compressed':
-                        msg = _("Failed to create consistency group %s "
-                                "because VNX consistency group cannot "
-                                "accept compressed LUNs as members."
-                                ) % group['id']
-                        raise exception.VolumeBackendAPIException(data=msg)
-
-    def create_consistencygroup(self, context, group):
-        """Creates a consistency group."""
-        LOG.info(_LI('Start to create consistency group: %(group_name)s '
-                     'id: %(id)s'),
-                 {'group_name': group['name'], 'id': group['id']})
-
-        self._consistencygroup_creation_check(group)
-
-        model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
-        try:
-            self._client.create_consistencygroup(group['id'])
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Create consistency group %s failed.'),
-                          group['id'])
-
-        return model_update
-
-    def delete_consistencygroup(self, context, group, volumes):
-        """Deletes a consistency group."""
-        cg_name = group['id']
-
-        model_update = {}
-        volumes_model_update = []
-        model_update['status'] = group['status']
-        LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
-                 {'cg_name': cg_name})
-        try:
-            self._client.delete_consistencygroup(cg_name)
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Delete consistency group %s failed.'), cg_name)
-
-        for volume_ref in volumes:
-            try:
-                self._client.delete_lun(volume_ref['name'])
-                volumes_model_update.append(
-                    {'id': volume_ref['id'], 'status': 'deleted'})
-            except Exception:
-                volumes_model_update.append(
-                    {'id': volume_ref['id'], 'status': 'error_deleting'})
-
-        return model_update, volumes_model_update
-
-    def update_consistencygroup(self, context,
-                                group,
-                                add_volumes,
-                                remove_volumes):
-        """Adds or removes LUN(s) to/from an existing consistency group"""
-        model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
-        cg_name = group['id']
-        add_ids = [six.text_type(self.get_lun_id(vol))
-                   for vol in add_volumes] if add_volumes else []
-        remove_ids = [six.text_type(self.get_lun_id(vol))
-                      for vol in remove_volumes] if remove_volumes else []
-
-        data = self._client.get_consistency_group_by_name(cg_name)
-        ids_curr = data['Luns']
-        ids_later = []
-
-        if ids_curr:
-            ids_later.extend(ids_curr)
-        ids_later.extend(add_ids)
-        for remove_id in remove_ids:
-            if remove_id in ids_later:
-                ids_later.remove(remove_id)
-            else:
-                LOG.warning(_LW("LUN with id %(remove_id)s is not present "
-                                "in cg %(cg_name)s, skip it."),
-                            {'remove_id': remove_id, 'cg_name': cg_name})
-        # Remove all from cg
-        if not ids_later:
-            self._client.remove_luns_from_consistencygroup(cg_name,
-                                                           ids_curr)
-        else:
-            self._client.replace_luns_in_consistencygroup(cg_name,
-                                                          ids_later)
-        return model_update, None, None
-
-    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
-        """Creates a cgsnapshot (snap group)."""
-        cgsnapshot_id = cgsnapshot['id']
-
-        model_update = {}
-        snapshots_model_update = []
-        LOG.info(_LI('Start to create cgsnapshot for consistency group'
-                     ': %(group_name)s'),
-                 {'group_name': cgsnapshot['consistencygroup_id']})
-
-        try:
-            self._client.create_cgsnapshot(cgsnapshot['consistencygroup_id'],
-                                           cgsnapshot['id'])
-            for snapshot in snapshots:
-                snapshots_model_update.append(
-                    {'id': snapshot['id'],
-                     'status': fields.SnapshotStatus.AVAILABLE})
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Create cg snapshot %s failed.'),
-                          cgsnapshot_id)
-
-        model_update['status'] = 'available'
-
-        return model_update, snapshots_model_update
-
-    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
-        """Deletes a cgsnapshot (snap group)."""
-        cgsnapshot_id = cgsnapshot['id']
-
-        model_update = {}
-        snapshots_model_update = []
-        model_update['status'] = cgsnapshot['status']
-        LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
-                     '%(group_name)s'), {'snap_name': cgsnapshot['id'],
-                 'group_name': cgsnapshot['consistencygroup_id']})
-
-        try:
-            self._client.delete_cgsnapshot(cgsnapshot['id'])
-            for snapshot in snapshots:
-                snapshots_model_update.append(
-                    {'id': snapshot['id'],
-                     'status': fields.SnapshotStatus.DELETED})
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.error(_LE('Delete cgsnapshot %s failed.'),
-                          cgsnapshot_id)
-
-        return model_update, snapshots_model_update
-
-    def get_lun_id(self, volume):
-        lun_id = None
-        try:
-            provider_location = volume.get('provider_location')
-            if provider_location:
-                lun_id = self.extract_provider_location(
-                    provider_location,
-                    'id')
-            if lun_id:
-                lun_id = int(lun_id)
-            else:
-                LOG.debug('Lun id is not stored in provider location, '
-                          'query it.')
-                lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
-        except Exception as ex:
-            LOG.debug('Exception when getting lun id: %s.', six.text_type(ex))
-            lun_id = self._client.get_lun_by_name(volume['name'])['lun_id']
-        LOG.debug('Get lun_id: %s.', lun_id)
-        return lun_id
-
-    def get_lun_map(self, storage_group):
-        data = self._client.get_storage_group(storage_group)
-        return data['lunmap']
-
-    def assure_storage_group(self, storage_group):
-        self._client.create_storage_group(storage_group)
-
-    def assure_host_in_storage_group(self, hostname, storage_group):
-        try:
-            self._client.connect_host_to_storage_group(hostname, storage_group)
-        except exception.EMCVnxCLICmdError as ex:
-            if ex.kwargs["rc"] == 83:
-                # SG was not created or was destroyed by another concurrent
-                # operation before connected.
-                # Create SG and try to connect again
-                LOG.warning(_LW('Storage Group %s is not found. Create it.'),
-                            storage_group)
-                self.assure_storage_group(storage_group)
-                self._client.connect_host_to_storage_group(
-                    hostname, storage_group)
-            else:
-                raise
-        return hostname
-
-    def get_lun_owner(self, volume):
-        """Returns SP owner of the volume."""
-        data = self._client.get_lun_by_name(volume['name'],
-                                            poll=False)
-        owner_sp = data['owner']
-        LOG.debug('Owner SP : %s', owner_sp)
-        return owner_sp
-
-    def filter_available_hlu_set(self, used_hlus):
-        used_hlu_set = set(used_hlus)
-        return self.hlu_set - used_hlu_set
-
-    def _extract_iscsi_uids(self, connector):
-        if 'initiator' not in connector:
-            if self.protocol == 'iSCSI':
-                msg = (_('Host %s has no iSCSI initiator')
-                       % connector['host'])
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-            else:
-                return ()
-        return [connector['initiator']]
-
-    def _extract_fc_uids(self, connector):
-        if 'wwnns' not in connector or 'wwpns' not in connector:
-            if self.protocol == 'FC':
-                msg = _('Host %s has no FC initiators') % connector['host']
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-            else:
-                return ()
-        wwnns = connector['wwnns']
-        wwpns = connector['wwpns']
-        wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)]
-        return [re.sub(r'\S\S', lambda m: m.group(0) + ':',
-                       wwn, len(wwn) // 2 - 1)
-                for wwn in wwns]
-
-    def _exec_command_setpath(self, initiator_uid, sp, port_id,
-                              ip, host, vport_id=None):
-        gname = host
-        if vport_id is not None:
-            cmd_iscsi_setpath = ('storagegroup', '-setpath', '-gname', gname,
-                                 '-hbauid', initiator_uid, '-sp', sp,
-                                 '-spport', port_id, '-spvport', vport_id,
-                                 '-ip', ip, '-host', host, '-o')
-            out, rc = self._client.command_execute(*cmd_iscsi_setpath)
-        else:
-            cmd_fc_setpath = ('storagegroup', '-setpath', '-gname', gname,
-                              '-hbauid', initiator_uid, '-sp', sp,
-                              '-spport', port_id,
-                              '-ip', ip, '-host', host, '-o')
-            out, rc = self._client.command_execute(*cmd_fc_setpath)
-        if rc != 0:
-            LOG.warning(_LW("Failed to register %(itor)s to SP%(sp)s "
-                            "port %(portid)s because: %(msg)s."),
-                        {'itor': initiator_uid,
-                         'sp': sp,
-                         'portid': port_id,
-                         'msg': out})
-
-    def auto_register_with_io_port_filter(self, connector, sgdata,
-                                          io_port_filter):
-        """Automatically register specific IO ports to storage group."""
-        initiator = connector['initiator']
-        ip = connector['ip']
-        host = connector['host']
-        new_white = {'A': [], 'B': []}
-        if self.protocol == 'iSCSI':
-            if sgdata:
-                sp_ports = self._client.get_registered_spport_set(
-                    initiator, host, sgdata['raw_output'])
-                # Normalize io_ports
-                for sp in ('A', 'B'):
-                    new_ports = filter(
-                        lambda pt: (pt['SP'], pt['Port ID'],
-                                    pt['Virtual Port ID'])
-                        not in sp_ports,
-                        self.iscsi_targets[sp])
-                    new_white[sp] = map(lambda white:
-                                        {'SP': white['SP'],
-                                         'Port ID': white['Port ID'],
-                                         'Virtual Port ID':
-                                             white['Virtual Port ID']},
-                                        new_ports)
-            else:
-                new_white = self.iscsi_targets
-            self._register_iscsi_initiator(ip, host, [initiator], new_white)
-
-        elif self.protocol == 'FC':
-            wwns = self._extract_fc_uids(connector)
-            ports_list = []
-            if sgdata:
-                for wwn in wwns:
-                    for port in io_port_filter:
-                        if ((port not in ports_list) and
-                                (not re.search(wwn + '\s+SP\s+' +
-                                               port[0] + '\s+' + str(port[1]),
-                                               sgdata['raw_output'],
-                                               re.IGNORECASE))):
-                            # Record ports to be added
-                            ports_list.append(port)
-                            new_white[port[0]].append({
-                                'SP': port[0],
-                                'Port ID': port[1]})
-            else:
-                # Need to translate to dict format
-                for fc_port in io_port_filter:
-                    new_white[fc_port[0]].append({'SP': fc_port[0],
-                                                  'Port ID': fc_port[1]})
-            self._register_fc_initiator(ip, host, wwns, new_white)
-        return new_white['A'] or new_white['B']
-
-    def _register_iscsi_initiator(self, ip, host, initiator_uids,
-                                  port_to_register=None):
-        iscsi_targets = (port_to_register if port_to_register else
-                         self.iscsi_targets)
-        for initiator_uid in initiator_uids:
-            LOG.info(_LI('Get ISCSI targets %(tg)s to register '
-                         'initiator %(in)s.'),
-                     {'tg': iscsi_targets,
-                      'in': initiator_uid})
-
-            target_portals_SPA = list(iscsi_targets['A'])
-            target_portals_SPB = list(iscsi_targets['B'])
-
-            for pa in target_portals_SPA:
-                sp = 'A'
-                port_id = pa['Port ID']
-                vport_id = pa['Virtual Port ID']
-                self._exec_command_setpath(initiator_uid, sp, port_id,
-                                           ip, host, vport_id)
-
-            for pb in target_portals_SPB:
-                sp = 'B'
-                port_id = pb['Port ID']
-                vport_id = pb['Virtual Port ID']
-                self._exec_command_setpath(initiator_uid, sp, port_id,
-                                           ip, host, vport_id)
-
-    def _register_fc_initiator(self, ip, host, initiator_uids,
-                               ports_to_register=None):
-        fc_targets = (ports_to_register if ports_to_register else
-                      self._client.get_fc_targets())
-        for initiator_uid in initiator_uids:
-            LOG.info(_LI('Get FC targets %(tg)s to register '
-                         'initiator %(in)s.'),
-                     {'tg': fc_targets,
-                      'in': initiator_uid})
-
-            target_portals_SPA = list(fc_targets['A'])
-            target_portals_SPB = list(fc_targets['B'])
-
-            for pa in target_portals_SPA:
-                sp = 'A'
-                port_id = pa['Port ID']
-                self._exec_command_setpath(initiator_uid, sp, port_id,
-                                           ip, host)
-
-            for pb in target_portals_SPB:
-                sp = 'B'
-                port_id = pb['Port ID']
-                self._exec_command_setpath(initiator_uid, sp, port_id,
-                                           ip, host)
-
-    def _deregister_initiators(self, connector):
-        initiator_uids = []
-        try:
-            if self.protocol == 'iSCSI':
-                initiator_uids = self._extract_iscsi_uids(connector)
-            elif self.protocol == 'FC':
-                initiator_uids = self._extract_fc_uids(connector)
-        except exception.VolumeBackendAPIException:
-            LOG.warning(_LW("Failed to extract initiators of %s, so ignore "
-                            "deregistration operation."),
-                        connector['host'])
-        if initiator_uids:
-            for initiator_uid in initiator_uids:
-                rc, out = self._client.deregister_initiator(initiator_uid)
-                if rc != 0:
-                    LOG.warning(_LW("Failed to deregister %(itor)s "
-                                    "because: %(msg)s."),
-                                {'itor': initiator_uid,
-                                 'msg': out})
-
-    def _filter_unregistered_initiators(self, initiator_uids, sgdata):
-        unregistered_initiators = []
-        if not initiator_uids:
-            return unregistered_initiators
-
-        out = sgdata['raw_output']
-
-        for initiator_uid in initiator_uids:
-            m = re.search(initiator_uid, out)
-            if m is None:
-                unregistered_initiators.append(initiator_uid)
-        return unregistered_initiators
-
-    def auto_register_initiator_to_all(self, connector, sgdata):
-        """Automatically registers available initiators.
-
-        Returns True if has registered initiator otherwise returns False.
-        """
-        initiator_uids = []
-        ip = connector['ip']
-        host = connector['host']
-        if self.protocol == 'iSCSI':
-            initiator_uids = self._extract_iscsi_uids(connector)
-            if sgdata is not None:
-                itors_toReg = self._filter_unregistered_initiators(
-                    initiator_uids,
-                    sgdata)
-            else:
-                itors_toReg = initiator_uids
-
-            if len(itors_toReg) == 0:
-                return False
-
-            LOG.info(_LI('iSCSI Initiators %(in)s of %(ins)s '
-                         'need registration.'),
-                     {'in': itors_toReg,
-                      'ins': initiator_uids})
-            self._register_iscsi_initiator(ip, host, itors_toReg)
-            return True
-
-        elif self.protocol == 'FC':
-            initiator_uids = self._extract_fc_uids(connector)
-            if sgdata is not None:
-                itors_toReg = self._filter_unregistered_initiators(
-                    initiator_uids,
-                    sgdata)
-            else:
-                itors_toReg = initiator_uids
-
-            if len(itors_toReg) == 0:
-                return False
-
-            LOG.info(_LI('FC Initiators %(in)s of %(ins)s need registration'),
-                     {'in': itors_toReg,
-                      'ins': initiator_uids})
-            self._register_fc_initiator(ip, host, itors_toReg)
-            return True
-
-    def auto_register_initiator(self, connector, sgdata, io_ports_filter=None):
-        """Automatically register available initiators.
-
-        :returns: True if has registered initiator otherwise return False
-        """
-        if io_ports_filter:
-            return self.auto_register_with_io_port_filter(connector, sgdata,
-                                                          io_ports_filter)
-        else:
-            return self.auto_register_initiator_to_all(connector, sgdata)
-
-    def assure_host_access(self, volume, connector):
-        hostname = connector['host']
-        volumename = volume['name']
-        auto_registration_done = False
-        try:
-            sgdata = self._client.get_storage_group(hostname,
-                                                    poll=False)
-        except exception.EMCVnxCLICmdError as ex:
-            if ex.kwargs["rc"] != 83:
-                raise
-            # Storage Group has not existed yet
-            self.assure_storage_group(hostname)
-            if self.itor_auto_reg:
-                self.auto_register_initiator(connector, None, self.io_ports)
-                auto_registration_done = True
-            else:
-                self._client.connect_host_to_storage_group(hostname, hostname)
-
-            sgdata = self._client.get_storage_group(hostname,
-                                                    poll=True)
-
-        if self.itor_auto_reg and not auto_registration_done:
-            new_registerred = self.auto_register_initiator(connector, sgdata,
-                                                           self.io_ports)
-            if new_registerred:
-                sgdata = self._client.get_storage_group(hostname,
-                                                        poll=True)
-
-        lun_id = self.get_lun_id(volume)
-        tried = 0
-        while tried < self.max_retries:
-            tried += 1
-            lun_map = sgdata['lunmap']
-            used_hlus = lun_map.values()
-            candidate_hlus = self.filter_available_hlu_set(used_hlus)
-            candidate_hlus = list(candidate_hlus)
-
-            if len(candidate_hlus) != 0:
-                hlu = candidate_hlus[random.randint(0,
-                                                    len(candidate_hlus) - 1)]
-                try:
-                    self._client.add_hlu_to_storage_group(
-                        hlu,
-                        lun_id,
-                        hostname)
-
-                    if hostname not in self.hlu_cache:
-                        self.hlu_cache[hostname] = {}
-                    self.hlu_cache[hostname][lun_id] = hlu
-                    return hlu, sgdata
-                except exception.EMCVnxCLICmdError as ex:
-                    LOG.debug("Add HLU to storagegroup failed, retry %s",
-                              tried)
-            elif tried == 1:
-                # The first try didn't get the in time data,
-                # so we need a retry
-                LOG.debug("Did not find candidate HLUs, retry %s",
-                          tried)
-            else:
-                msg = (_('Reach limitation set by configuration '
-                         'option max_luns_per_storage_group. '
-                         'Operation to add %(vol)s into '
-                         'Storage Group %(sg)s is rejected.')
-                       % {'vol': volumename, 'sg': hostname})
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-
-            # Need a full poll to get the real in time data
-            # Query storage group with poll for retry
-            sgdata = self._client.get_storage_group(hostname, poll=True)
-            self.hlu_cache[hostname] = sgdata['lunmap']
-            if lun_id in sgdata['lunmap']:
-                hlu = sgdata['lunmap'][lun_id]
-                return hlu, sgdata
-
-        msg = _("Failed to add %(vol)s into %(sg)s "
-                "after %(retries)s tries.") % \
-            {'vol': volumename,
-             'sg': hostname,
-             'retries': tried}
-        LOG.error(msg)
-        raise exception.VolumeBackendAPIException(data=msg)
-
-    def vnx_get_iscsi_properties(self, volume, connector, hlu, sg_raw_output):
-        storage_group = connector['host']
-        registered_spports = self._client.get_registered_spport_set(
-            connector['initiator'],
-            storage_group,
-            sg_raw_output)
-        targets = self._client.find_available_iscsi_targets(
-            storage_group,
-            registered_spports,
-            self.iscsi_targets)
-        properties = {'target_discovered': False,
-                      'target_iqn': 'unknown',
-                      'target_iqns': None,
-                      'target_portal': 'unknown',
-                      'target_portals': None,
-                      'target_lun': 'unknown',
-                      'target_luns': None,
-                      'volume_id': volume['id']}
-        if targets:
-            properties['target_discovered'] = True
-            properties['target_iqns'] = [t['Port WWN'] for t in targets]
-            properties['target_iqn'] = properties['target_iqns'][0]
-            properties['target_portals'] = [
-                "%s:3260" % t['IP Address'] for t in targets]
-            properties['target_portal'] = properties['target_portals'][0]
-            properties['target_luns'] = [hlu] * len(targets)
-            properties['target_lun'] = hlu
-        else:
-            LOG.error(_LE('Failed to find available iSCSI targets for %s.'),
-                      storage_group)
-
-        LOG.debug('The iSCSI properties for %(host)s is %(properties)s.',
-                  {'host': storage_group,
-                   'properties': properties})
-        return properties
-
-    def vnx_get_fc_properties(self, connector, device_number):
-        fc_properties = {'target_lun': device_number,
-                         'target_dicovered': True,
-                         'target_wwn': None}
-        if self.zonemanager_lookup_service is None:
-            fc_properties['target_wwn'] = self.get_login_ports(connector,
-                                                               self.io_ports)
-        else:
-            target_wwns, itor_tgt_map = self.get_initiator_target_map(
-                connector['wwpns'],
-                self.get_status_up_ports(connector, self.io_ports))
-            fc_properties['target_wwn'] = target_wwns
-            fc_properties['initiator_target_map'] = itor_tgt_map
-        return fc_properties
-
-    def initialize_connection(self, volume, connector):
-        """Initializes the connection and returns connection info."""
-        @lockutils.synchronized('emc-connection-' + connector['host'],
-                                "emc-connection-", True)
-        def do_initialize_connection():
-            return self.assure_host_access(
-                volume, connector)
-
-        data = {}
-        if self.protocol == 'iSCSI':
-            (device_number, sg_data) = do_initialize_connection()
-            iscsi_properties = self.vnx_get_iscsi_properties(
-                volume,
-                connector,
-                device_number,
-                sg_data['raw_output']
-            )
-            data = {'driver_volume_type': 'iscsi',
-                    'data': iscsi_properties}
-        elif self.protocol == 'FC':
-            (device_number, sg_data) = do_initialize_connection()
-            fc_properties = self.vnx_get_fc_properties(connector,
-                                                       device_number)
-            fc_properties['volume_id'] = volume['id']
-            data = {'driver_volume_type': 'fibre_channel',
-                    'data': fc_properties}
-
-        return data
-
-    def terminate_connection(self, volume, connector):
-        """Disallow connection from connector."""
-        @lockutils.synchronized('emc-connection-' + connector['host'],
-                                "emc-connection-", True)
-        def do_terminate_connection():
-            hostname = connector['host']
-            volume_name = volume['name']
-            lun_id = self.get_lun_id(volume)
-            lun_map = None
-            conn_info = None
-            if (hostname in self.hlu_cache and
-                    lun_id in self.hlu_cache[hostname] and
-                    not self.destroy_empty_sg and
-                    not self.zonemanager_lookup_service):
-                hlu = self.hlu_cache[hostname][lun_id]
-                self._client.remove_hlu_from_storagegroup(hlu, hostname,
-                                                          poll=True)
-                self.hlu_cache[hostname].pop(lun_id)
-            else:
-                try:
-                    lun_map = self.get_lun_map(hostname)
-                    self.hlu_cache[hostname] = lun_map
-                except exception.EMCVnxCLICmdError as ex:
-                    if ex.kwargs["rc"] == 83:
-                        LOG.warning(_LW("Storage Group %s is not found. "
-                                        "terminate_connection() is "
-                                        "unnecessary."),
-                                    hostname)
-                if lun_id in lun_map:
-                    self._client.remove_hlu_from_storagegroup(
-                        lun_map[lun_id], hostname)
-                    lun_map.pop(lun_id)
-                else:
-                    LOG.warning(_LW("Volume %(vol)s was not in Storage Group"
-                                    " %(sg)s."),
-                                {'vol': volume_name, 'sg': hostname})
-
-            if self.protocol == 'FC':
-                conn_info = {'driver_volume_type': 'fibre_channel',
-                             'data': {}}
-                if self.zonemanager_lookup_service and not lun_map:
-                    target_wwns, itor_tgt_map = self.get_initiator_target_map(
-                        connector['wwpns'],
-                        self.get_status_up_ports(connector))
-                    conn_info['data']['initiator_target_map'] = itor_tgt_map
-
-            if self.destroy_empty_sg and not lun_map:
-                try:
-                    LOG.info(_LI("Storage Group %s was empty."), hostname)
-                    self._client.disconnect_host_from_storage_group(
-                        hostname, hostname)
-                    self._client.delete_storage_group(hostname)
-                    if self.itor_auto_dereg:
-                        self._deregister_initiators(connector)
-                except Exception:
-                    LOG.warning(_LW("Failed to destroy Storage Group %s."),
-                                hostname)
-                    try:
-                        self._client.connect_host_to_storage_group(
-                            hostname, hostname)
-                    except Exception:
-                        LOG.warning(_LW("Fail to connect host %(host)s "
-                                        "back to storage group %(sg)s."),
-                                    {'host': hostname, 'sg': hostname})
-            return conn_info
-        return do_terminate_connection()
-
-    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
-        """Initializes connection for mount point."""
-        smp_name = self._construct_tmp_smp_name(snapshot)
-        self._client.attach_mount_point(smp_name, snapshot.name)
-        volume = {'name': smp_name, 'id': snapshot.id}
-        return self.initialize_connection(volume, connector)
-
-    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
-        """Disallows connection for mount point."""
-        smp_name = self._construct_tmp_smp_name(snapshot)
-        volume = {'name': smp_name}
-        conn_info = self.terminate_connection(volume, connector)
-        self._client.detach_mount_point(smp_name)
-        return conn_info
-
-    def create_export_snapshot(self, context, snapshot, connector):
-        """Creates mount point for a snapshot."""
-        smp_name = self._construct_tmp_smp_name(snapshot)
-        primary_lun_name = snapshot.volume_name
-        self._client.create_mount_point(primary_lun_name, smp_name)
-        return None
-
-    def remove_export_snapshot(self, context, snapshot):
-        """Removes mount point for a snapshot."""
-        smp_name = self._construct_tmp_smp_name(snapshot)
-        volume = {'name': smp_name, 'provider_location': None,
-                  'volume_type_id': None}
-        self.delete_volume(volume, True)
-
-    def manage_existing_get_size(self, volume, existing_ref):
-        """Returns size of volume to be managed by manage_existing."""
-        if 'source-id' in existing_ref:
-            data = self._client.get_lun_by_id(
-                existing_ref['source-id'],
-                properties=VNXLunProperties.lun_with_pool)
-        elif 'source-name' in existing_ref:
-            data = self._client.get_lun_by_name(
-                existing_ref['source-name'],
-                properties=VNXLunProperties.lun_with_pool)
-        else:
-            reason = _('Reference must contain source-id or source-name key.')
-            raise exception.ManageExistingInvalidReference(
-                existing_ref=existing_ref, reason=reason)
-        target_pool = self.get_target_storagepool(volume)
-        if target_pool and data['pool'] != target_pool:
-            reason = (_('The imported lun %(lun_id)s is in pool %(lun_pool)s '
-                        'which is not managed by the host %(host)s.')
-                      % {'lun_id': data['lun_id'],
-                         'lun_pool': data['pool'],
-                         'host': volume['host']})
-            raise exception.ManageExistingInvalidReference(
-                existing_ref=existing_ref, reason=reason)
-        return data['total_capacity_gb']
-
-    def manage_existing(self, volume, manage_existing_ref):
-        """Imports the existing backend storage object as a volume.
-
-        .. code-block:: none
-
-            manage_existing_ref:{
-                'source-id':<lun id in VNX>
-            }
-
-            or
-
-            manage_existing_ref:{
-                'source-name':<lun name in VNX>
-            }
-
-        """
-        client = self._client
-
-        lun_id = self._get_lun_id(manage_existing_ref)
-
-        specs = self.get_volumetype_extraspecs(volume)
-        LOG.debug('Specs of the volume is: %s.', specs)
-
-        host = volume['host']
-        LOG.debug('Host of the volume is: %s.', host)
-
-        tar_pool = vol_utils.extract_host(volume['host'], 'pool')
-        LOG.debug("Target pool of LUN to manage is: %s.", tar_pool)
-
-        tar_type, tar_tier = self._get_extra_spec_value(specs)
-        vnx_lun = self._get_lun_pool_and_type(lun_id)
-        LOG.debug("LUN to manage: %s.", vnx_lun)
-        LOG.debug("Target info: pool: %(pool)s, type: %(type)s, "
-                  "tier: %(tier)s.", {'pool': tar_pool,
-                                      'type': tar_type,
-                                      'tier': tar_tier})
-
-        do_migration = (tar_type is not None and
-                        tar_type != vnx_lun.provision or
-                        tar_pool != vnx_lun.pool_name)
-        change_tier = (tar_tier is not None and
-                       not do_migration and
-                       tar_tier != vnx_lun.tier)
-
-        reason = None
-        if do_migration:
-            LOG.debug("Need migration during manage.")
-            if client.check_lun_has_snap(lun_id):
-                reason = _('Driver is not able to do retype because'
-                           ' the volume (LUN {}) has snapshot which is '
-                           'forbidden to migrate.').format(lun_id)
-            else:
-                volume['size'] = vnx_lun.capacity
-                moved, empty = self._migrate_volume(volume,
-                                                    tar_pool,
-                                                    specs,
-                                                    src_id=lun_id)
-                if not moved:
-                    reason = _('Storage-assisted migration failed during '
-                               'manage volume.')
-
-        if reason is None and change_tier:
-            LOG.debug('Change LUN tier policy to: %s.', tar_tier)
-            client.modify_lun_tiering_by_id(lun_id, tar_tier)
-
-        if reason is not None:
-            raise exception.ManageExistingVolumeTypeMismatch(reason=reason)
-        else:
-            client.rename_lun(lun_id, volume['name'])
-
-        location = self._build_provider_location(lun_id, 'lun', volume['name'])
-        return {'provider_location': location}
-
-    def _get_lun_pool_and_type(self, lun_id):
-        client = self._client
-        data = client.get_lun_by_id(lun_id,
-                                    VNXLunProperties.get_all(),
-                                    poll=False)
-        lun = VNXLun()
-        lun.update(data)
-        return lun
-
-    def _get_lun_id(self, manage_existing_ref):
-        if 'source-id' in manage_existing_ref:
-            lun_id = manage_existing_ref['source-id']
-        elif 'source-name' in manage_existing_ref:
-            lun_id = self._client.get_lun_by_name(
-                manage_existing_ref['source-name'], poll=False)['lun_id']
-        else:
-            reason = _('Reference must contain source-id or source-name key.')
-            raise exception.ManageExistingInvalidReference(
-                existing_ref=manage_existing_ref, reason=reason)
-        return lun_id
-
-    def get_login_ports(self, connector, io_ports=None):
-        return self._client.get_login_ports(connector['host'],
-                                            connector['wwpns'],
-                                            io_ports)
-
-    def get_status_up_ports(self, connector, io_ports=None):
-        return self._client.get_status_up_ports(connector['host'],
-                                                io_ports=io_ports)
-
-    def get_initiator_target_map(self, fc_initiators, fc_targets):
-        target_wwns = []
-        itor_tgt_map = {}
-
-        if self.zonemanager_lookup_service:
-            mapping = \
-                self.zonemanager_lookup_service. \
-                get_device_mapping_from_network(fc_initiators, fc_targets)
-            for each in mapping:
-                map_d = mapping[each]
-                target_wwns.extend(map_d['target_port_wwn_list'])
-                for initiator in map_d['initiator_port_wwn_list']:
-                    itor_tgt_map[initiator] = map_d['target_port_wwn_list']
-        return list(set(target_wwns)), itor_tgt_map
-
-    def get_volumetype_extraspecs(self, volume):
-        specs = {}
-
-        type_id = volume['volume_type_id']
-        if type_id is not None:
-            specs = volume_types.get_volume_type_extra_specs(type_id)
-
-        return specs
-
-    def failover_host(self, context, volumes, secondary_id=None):
-        """Fails over the volume back and forth.
-
-        Driver needs to update following info for this volume:
-        2. provider_location: update serial number and lun id
-        """
-        volume_update_list = []
-
-        if secondary_id and secondary_id != 'default':
-            rep_status = 'failed-over'
-            backend_id = (
-                self.configuration.replication_device[0]['backend_id'])
-            if secondary_id != backend_id:
-                msg = (_('Invalid secondary_id specified. '
-                         'Valid backend id is %s.') % backend_id)
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-        else:
-            rep_status = 'enabled'
-
-        def failover_one(volume, new_status):
-            rep_data = json.loads(volume['replication_driver_data'])
-            is_primary = rep_data['is_primary']
-            mirror_name = self._construct_mirror_name(volume)
-            mirror_view = self._get_mirror_view(volume)
-            remote_client = mirror_view._secondary_client
-
-            provider_location = volume['provider_location']
-            try:
-                mirror_view.promote_image(mirror_name)
-            except exception.EMCVnxCLICmdError as ex:
-                msg = _LE(
-                    'Failed to failover volume %(volume_id)s '
-                    'to %(target)s: %(error)s.')
-                LOG.error(msg, {'volume_id': volume.id,
-                                'target': secondary_id,
-                                'error': ex},)
-                new_status = 'error'
-            else:
-                rep_data.update({'is_primary': not is_primary})
-                # Transfer ownership to secondary_id and
-                # update provider_location field
-                provider_location = self._update_provider_location(
-                    provider_location,
-                    'system', remote_client.get_array_serial()['array_serial'])
-                provider_location = self._update_provider_location(
-                    provider_location,
-                    'id',
-                    six.text_type(
-                        remote_client.get_lun_by_name(volume.name)['lun_id'])
-                )
-            model_update = {'volume_id': volume.id,
-                            'updates':
-                                {'replication_driver_data':
-                                    json.dumps(rep_data),
-                                 'replication_status': new_status,
-                                 'provider_location': provider_location}}
-            volume_update_list.append(model_update)
-        for volume in volumes:
-            if self._is_replication_enabled(volume):
-                failover_one(volume, rep_status)
-            else:
-                volume_update_list.append({
-                    'volume_id': volume.id,
-                    'updates': {'status': 'error'}})
-        return secondary_id, volume_update_list
-
-    def _is_replication_enabled(self, volume):
-        """Return True if replication extra specs is specified.
-
-        If replication_enabled exists and value is '<is> True', return True.
-        If replication exists and value is 'True', return True.
-        Otherwise, return False.
-        """
-        specs = self.get_volumetype_extraspecs(volume)
-        return specs and specs.get('replication_enabled') == '<is> True'
-
-    def setup_lun_replication(self, volume, primary_lun_id,
-                              provisioning, tiering):
-        """Setup replication for LUN, this only happens in primary system."""
-        rep_update = {'replication_driver_data': None,
-                      'replication_status': 'disabled'}
-        metadata_update = {}
-        if self._is_replication_enabled(volume):
-            LOG.debug('Starting setup replication '
-                      'for volume: %s.', volume.id)
-            lun_size = volume['size']
-            mirror_name = self._construct_mirror_name(volume)
-            pool_name = vol_utils.extract_host(volume.host, 'pool')
-            self._mirror.create_mirror_workflow(
-                mirror_name, primary_lun_id, pool_name,
-                volume.name, lun_size,
-                provisioning, tiering)
-
-            LOG.info(_LI('Successfully setup replication for %s.'), volume.id)
-            rep_update.update({'replication_driver_data':
-                              self.__class__._build_replication_driver_data(
-                                  self.configuration),
-                               'replication_status': 'enabled'})
-            metadata_update = {
-                'system': self.get_array_serial()}
-        return rep_update, metadata_update
-
-    def cleanup_lun_replication(self, volume):
-        if self._is_replication_enabled(volume):
-            LOG.debug('Starting cleanup replication form volume: '
-                      '%s.', volume.id)
-            mirror_name = self._construct_mirror_name(volume)
-            mirror_view = self._get_mirror_view(volume)
-            mv = mirror_view.get_image(mirror_name)
-            if mv:
-                mirror_view.destroy_mirror_view(mirror_name, volume.name, mv)
-
-    def _get_mirror_view(self, volume):
-        """Determines where to build a Mirror View operator."""
-        if volume['replication_driver_data']:
-            rep_data = json.loads(volume['replication_driver_data'])
-            is_primary = rep_data['is_primary']
-        else:
-            is_primary = True
-        if is_primary:
-            # if on primary, promote to configured array in conf
-            mirror_view = self._mirror
-        else:
-            # else promote to array according to volume data
-            mirror_view = self._build_mirror_view(volume)
-        return mirror_view
-
-    @staticmethod
-    def _build_replication_driver_data(configuration):
-        """Builds driver specific data for replication.
-
-        This data will be used by secondary backend to connect
-        primary device.
-        """
-        driver_data = dict()
-        driver_data['san_ip'] = configuration.san_ip
-        driver_data['san_login'] = configuration.san_login
-        driver_data['san_password'] = configuration.san_password
-        driver_data['san_secondary_ip'] = configuration.san_secondary_ip
-        driver_data['storage_vnx_authentication_type'] = (
-            configuration.storage_vnx_authentication_type)
-        driver_data['storage_vnx_security_file_dir'] = (
-            configuration.storage_vnx_security_file_dir)
-        driver_data['is_primary'] = True
-        return json.dumps(driver_data)
-
-    def _build_client(self, active_backend_id=None):
-        """Builds a client pointing to the right VNX."""
-        if not active_backend_id:
-            return CommandLineHelper(self.configuration)
-        else:
-            configuration = self.configuration
-            if not configuration.replication_device:
-                err_msg = (
-                    _('replication_device should be configured '
-                      'on backend: %s.') % configuration.config_group)
-                LOG.error(err_msg)
-                raise exception.VolumeBackendAPIException(data=err_msg)
-            current_target = None
-            for target in configuration.replication_device:
-                if target['backend_id'] == active_backend_id:
-                    current_target = target
-                    break
-            if not current_target:
-                err_msg = (
-                    _('replication_device with backend_id [%s] is missing.')
-                    % active_backend_id)
-                LOG.error(err_msg)
-                raise exception.VolumeBackendAPIException(data=err_msg)
-            target_conf = copy.copy(configuration)
-            for key in self.REPLICATION_KEYS:
-                if key in current_target:
-                    setattr(target_conf, key, current_target[key])
-            return CommandLineHelper(target_conf)
-
-    def _build_mirror_view(self, volume=None):
-        """Builds a client for remote storage device.
-
-        Currently, only support one remote, managed device.
-        :param volume: if volume is not None, then build a remote client
-                       from volume's replication_driver_data.
-        """
-        configuration = self.configuration
-        remote_info = None
-        if volume:
-            if volume['replication_driver_data']:
-                remote_info = json.loads(volume['replication_driver_data'])
-            else:
-                LOG.warning(
-                    _LW('No replication info from this volume: %s.'),
-                    volume.id)
-                return None
-        else:
-            if not configuration.replication_device:
-                LOG.info(_LI('Replication is not configured on backend: %s.'),
-                         configuration.config_group)
-                return None
-            remote_info = configuration.replication_device[0]
-        # Copy info to replica configuration for remote client
-        replica_conf = copy.copy(configuration)
-        for key in self.REPLICATION_KEYS:
-            if key in remote_info:
-                setattr(replica_conf, key, remote_info[key])
-        _remote_client = CommandLineHelper(replica_conf)
-        _mirror = MirrorView(self._client, _remote_client)
-        return _mirror
-
-    def get_pool(self, volume):
-        """Returns the pool name of a volume."""
-
-        data = self._client.get_lun_by_name(volume['name'],
-                                            [VNXLunProperties.LUN_POOL],
-                                            poll=False)
-        return data.get(VNXLunProperties.LUN_POOL.key)
-
-    def unmanage(self, volume):
-        """Unmanages a volume"""
-        pass
-
-    def create_consistencygroup_from_src(self, context, group, volumes,
-                                         cgsnapshot=None, snapshots=None,
-                                         source_cg=None, source_vols=None):
-        """Creates a consistency group from cgsnapshot."""
-        if cgsnapshot and snapshots and not source_cg:
-            return self._create_consisgroup_from_cgsnapshot(
-                group, volumes, cgsnapshot, snapshots)
-        elif source_cg and source_vols and not cgsnapshot:
-            return self._clone_consisgroup(
-                group, volumes, source_cg, source_vols)
-        else:
-            msg = _("create_consistencygroup_from_src supports a "
-                    "cgsnapshot source or a consistency group source. "
-                    "Multiple sources cannot be used.")
-            raise exception.InvalidInput(reason=msg)
-
-    def _clone_consisgroup(self, group, volumes, source_cg, source_vols):
-        temp_cgsnapshot_name = 'temp_snapshot_for_{}'.format(group.id)
-        store_spec = {
-            'group': group,
-            'snapshot': {'id': temp_cgsnapshot_name,
-                         'consistencygroup_id': source_cg.id},
-            'new_snap_name': temp_cgsnapshot_name,
-            'source_lun_id': None,
-            'client': self._client
-        }
-        flow_name = 'clone_consisgroup'
-        snap_build_tasks = [CreateSnapshotTask()]
-
-        volume_model_updates = self._create_cg_from_cgsnap_use_workflow(
-            flow_name, snap_build_tasks, store_spec,
-            volumes, source_vols)
-
-        self._delete_temp_cgsnap(temp_cgsnapshot_name)
-
-        LOG.info(_LI('Consistency group %(cg)s is created successfully.'),
-                 {'cg': group.id})
-
-        return None, volume_model_updates
-
-    def _create_consisgroup_from_cgsnapshot(self, group, volumes,
-                                            cgsnapshot, snapshots):
-        flow_name = 'create_consisgroup_from_cgsnapshot'
-        copied_snapshot_name = 'temp_snapshot_for_%s' % group.id
-        store_spec = {
-            'group': group,
-            'src_snap_name': cgsnapshot['id'],
-            'new_snap_name': copied_snapshot_name,
-            'client': self._client
-        }
-
-        snap_build_tasks = [CopySnapshotTask(),
-                            AllowReadWriteOnSnapshotTask()]
-
-        src_vols = map(lambda snap: snap.volume, snapshots)
-
-        volume_model_updates = self._create_cg_from_cgsnap_use_workflow(
-            flow_name, snap_build_tasks, store_spec, volumes, src_vols)
-
-        self._delete_temp_cgsnap(copied_snapshot_name)
-
-        LOG.info(_LI('Consistency group %(cg)s is created successfully.'),
-                 {'cg': group.id})
-
-        return None, volume_model_updates
-
-    def _delete_temp_cgsnap(self, snap):
-        try:
-            self._client.delete_cgsnapshot(snap)
-        except exception.EMCVnxCLICmdError as ex:
-            LOG.warning(_LW('Delete the temporary cgsnapshot %(name)s failed. '
-                            'This temporary cgsnapshot can be deleted '
-                            'manually. '
-                            'Message: %(msg)s'),
-                        {'name': snap,
-                         'msg': ex.kwargs['out']})
-
-    def _create_cg_from_cgsnap_use_workflow(self, flow_name, snap_build_tasks,
-                                            store_spec, volumes, source_vols):
-        work_flow = linear_flow.Flow(flow_name)
-        work_flow.add(*snap_build_tasks)
-        # Add tasks for each volumes in the consistency group
-        lun_id_key_template = 'new_lun_id_%s'
-        lun_data_key_template = 'vol_%s'
-        volume_model_updates = []
-
-        for i, (volume, src_volume) in enumerate(zip(volumes, source_vols)):
-            specs = self.get_volumetype_extraspecs(volume)
-            provisioning, tiering = (
-                self._get_and_validate_extra_specs(specs))
-            pool_name = self.get_target_storagepool(volume, src_volume)
-            base_lun_name = self._get_base_lun_name(src_volume)
-            sub_store_spec = {
-                'volume': volume,
-                'base_lun_name': base_lun_name,
-                'pool_name': pool_name,
-                'dest_vol_name': volume['name'] + '_dest',
-                'volume_size': volume['size'],
-                'provisioning': provisioning,
-                'tiering': tiering,
-                'ignore_pool_full_threshold': self.ignore_pool_full_threshold,
-            }
-            work_flow.add(
-                CreateSMPTask(name="CreateSMPTask%s" % i,
-                              inject=sub_store_spec),
-                AttachSnapTask(name="AttachSnapTask%s" % i,
-                               inject=sub_store_spec),
-                CreateDestLunTask(name="CreateDestLunTask%s" % i,
-                                  provides=lun_data_key_template % i,
-                                  inject=sub_store_spec),
-                MigrateLunTask(name="MigrateLunTask%s" % i,
-                               provides=lun_id_key_template % i,
-                               inject=sub_store_spec,
-                               rebind={'lun_data': lun_data_key_template % i},
-                               wait_for_completion=False))
-
-            volume_model_updates.append({'id': volume['id']})
-            volume_host = volume['host']
-            host = vol_utils.extract_host(volume_host, 'backend')
-            host_and_pool = vol_utils.append_host(host, pool_name)
-            if volume_host != host_and_pool:
-                volume_model_updates[i]['host'] = host_and_pool
-
-        work_flow.add(WaitMigrationsCompleteTask(lun_id_key_template,
-                                                 lun_data_key_template,
-                                                 len(volumes)),
-                      CreateConsistencyGroupTask(lun_id_key_template,
-                                                 len(volumes)))
-
-        flow_engine = taskflow.engines.load(work_flow, store=store_spec)
-        flow_engine.run()
-
-        for i, update in enumerate(volume_model_updates):
-            new_lun_id = flow_engine.storage.fetch(lun_id_key_template % i)
-            update['provider_location'] = (
-                self._build_provider_location(new_lun_id))
-        return volume_model_updates
-
-    def get_target_storagepool(self, volume, source_volume=None):
-        pool = vol_utils.extract_host(volume['host'], 'pool')
-
-        # For new created volume that is not from snapshot or cloned
-        # or the pool is the managed pool,
-        # just use the pool selected by scheduler
-        if not source_volume or pool in self.storage_pools:
-            return pool
-
-        # For volume created from snapshot or cloned from volume, the pool to
-        # use depends on the source volume version. If the source volume is
-        # created by older version of driver which doesn't support pool
-        # scheduler, use the pool where the source volume locates. Otherwise,
-        # use the pool selected by scheduler
-        provider_location = source_volume.get('provider_location')
-
-        if (not provider_location or
-                not self.extract_provider_location(provider_location,
-                                                   'version')):
-            LOG.warning(_LW("The source volume is a legacy volume. "
-                            "Create volume in the pool where the source "
-                            "volume %s is created."),
-                        source_volume['name'])
-            data = self._client.get_lun_by_name(source_volume['name'],
-                                                [VNXLunProperties.LUN_POOL],
-                                                poll=False)
-            if data is None:
-                msg = (_("Failed to find storage pool for source volume %s.")
-                       % source_volume['name'])
-                LOG.error(msg)
-                raise exception.VolumeBackendAPIException(data=msg)
-            pool = data[VNXLunProperties.LUN_POOL.key]
-
-        if self.storage_pools and pool not in self.storage_pools:
-            msg = (_("The source volume %s is not in the pool which "
-                     "is managed by the current host.")
-                   % source_volume['name'])
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-        return pool
-
-    def update_volume_stats(self):
-        """Retrieves stats info."""
-        self.update_enabler_in_volume_stats()
-        if self.protocol == 'iSCSI':
-            self.iscsi_targets = self._client.get_iscsi_targets(
-                poll=False, io_ports=self.io_ports)
-
-        properties = [VNXPoolProperties.POOL_FREE_CAPACITY,
-                      VNXPoolProperties.POOL_TOTAL_CAPACITY,
-                      VNXPoolProperties.POOL_STATE,
-                      VNXPoolProperties.POOL_SUBSCRIBED_CAPACITY,
-                      VNXPoolProperties.POOL_FULL_THRESHOLD]
-        if '-FASTCache' in self.enablers:
-            properties.append(VNXPoolProperties.POOL_FAST_CACHE)
-
-        pool_list = self._client.get_pool_list(properties, False)
-
-        if self.storage_pools:
-            pool_list = filter(lambda a: a['pool_name'] in self.storage_pools,
-                               pool_list)
-        pool_feature = (self._client.get_pool_feature_properties(poll=False)
-                        if self.check_max_pool_luns_threshold else None)
-        self.stats['pools'] = [self._build_pool_stats(pool, pool_feature)
-                               for pool in pool_list]
-
-        return self.stats
-
-
-def getEMCVnxCli(prtcl, configuration=None, active_backend_id=None):
-    configuration.append_config_values(loc_opts)
-    return EMCVnxCliBase(prtcl, configuration=configuration,
-                         active_backend_id=active_backend_id)
-
-
-class CreateSMPTask(task.Task):
-    """Creates a snap mount point (SMP) for the source snapshot.
-
-    Reversion strategy: Delete the SMP.
-    """
-    def __init__(self, name=None, inject=None):
-        super(CreateSMPTask, self).__init__(name=name,
-                                            provides='new_smp_data',
-                                            inject=inject)
-
-    def execute(self, client, volume, base_lun_name, *args, **kwargs):
-        LOG.debug('CreateSMPTask.execute')
-        client.create_mount_point(base_lun_name, volume['name'])
-        return client.get_lun_by_name(volume['name'])
-
-    def revert(self, result, client, volume, *args, **kwargs):
-        LOG.debug('CreateSMPTask.revert')
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('CreateSMPTask.revert: delete mount point %s'),
-                        volume['name'])
-            client.delete_lun(volume['name'])
-
-
-class AttachSnapTask(task.Task):
-    """Attaches the snapshot to the SMP created before.
-
-    Reversion strategy: Detach the SMP.
-    """
-    def execute(self, client, volume, new_snap_name,
-                *args, **kwargs):
-        LOG.debug('AttachSnapTask.execute')
-        client.attach_mount_point(volume['name'], new_snap_name)
-
-    def revert(self, result, client, volume, *args, **kwargs):
-        LOG.debug('AttachSnapTask.revert')
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('AttachSnapTask.revert: detach mount point %s'),
-                        volume['name'])
-            try:
-                client.detach_mount_point(volume['name'])
-            except exception.EMCVnxCLICmdError as ex:
-                with excutils.save_and_reraise_exception() as ctxt:
-                    is_not_smp_err = (
-                        ex.kwargs["rc"] == 163 and
-                        VNXError.has_error("".join(ex.kwargs["out"]),
-                                           VNXError.LUN_IS_NOT_SMP))
-                    ctxt.reraise = not is_not_smp_err
-
-
-class CreateDestLunTask(task.Task):
-    """Creates a destination lun for migration.
-
-    Reversion strategy: Delete the temp destination lun.
-    """
-    def __init__(self, name=None, provides='lun_data', inject=None):
-        super(CreateDestLunTask, self).__init__(name=name,
-                                                provides=provides,
-                                                inject=inject)
-
-    def execute(self, client, pool_name, dest_vol_name, volume_size,
-                provisioning, tiering, ignore_pool_full_threshold,
-                *args, **kwargs):
-        LOG.debug('CreateDestLunTask.execute')
-        data = client.create_lun_with_advance_feature(
-            pool_name, dest_vol_name, volume_size,
-            provisioning, tiering,
-            ignore_thresholds=ignore_pool_full_threshold)
-        return data
-
-    def revert(self, result, client, dest_vol_name, *args, **kwargs):
-        LOG.debug('CreateDestLunTask.revert')
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('CreateDestLunTask.revert: delete temp lun %s'),
-                        dest_vol_name)
-            client.delete_lun(dest_vol_name)
-
-
-class MigrateLunTask(task.Task):
-    """Starts a migration between the SMP and the temp lun.
-
-    Reversion strategy: None
-    """
-    def __init__(self, name=None, provides='new_lun_id', inject=None,
-                 rebind=None, wait_for_completion=True):
-        super(MigrateLunTask, self).__init__(name=name,
-                                             provides=provides,
-                                             inject=inject,
-                                             rebind=rebind)
-        self.wait_for_completion = wait_for_completion
-
-    def execute(self, client, new_smp_data, lun_data,
-                rate=VNXMigrationRate.HIGH, *args, **kwargs):
-        LOG.debug('MigrateLunTask.execute')
-        dest_vol_lun_id = lun_data['lun_id']
-        dst_wwn = lun_data['wwn']
-        src_smp_id = new_smp_data['lun_id']
-        LOG.debug('Migrating Mount Point Volume ID: %s', src_smp_id)
-        if self.wait_for_completion:
-            migrated = client.migrate_lun_with_verification(
-                src_smp_id, dest_vol_lun_id, None, dst_wwn, rate)
-        else:
-            migrated = client.migrate_lun_without_verification(
-                src_smp_id, dest_vol_lun_id)
-        if not migrated:
-            msg = (_("Migrate volume failed between source vol %(src)s"
-                     " and dest vol %(dst)s.") %
-                   {'src': src_smp_id, 'dst': dest_vol_lun_id})
-            LOG.error(msg)
-            raise exception.VolumeBackendAPIException(data=msg)
-
-        return src_smp_id
-
-    def revert(self, *args, **kwargs):
-        pass
-
-
-class CreateSnapshotTask(task.Task):
-    """Creates a snapshot/cgsnapshot of a volume.
-
-    Reversion Strategy: Delete the created snapshot/cgsnapshot.
-    """
-    def execute(self, client, snapshot, source_lun_id, *args, **kwargs):
-        LOG.debug('CreateSnapshotTask.execute')
-        # Create temp Snapshot
-        if snapshot['consistencygroup_id']:
-            client.create_cgsnapshot(snapshot['consistencygroup_id'],
-                                     snapshot['id'])
-        else:
-            snapshot_name = snapshot['name']
-            volume_name = snapshot['volume_name']
-            LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s'),
-                     {'snapshot': snapshot_name,
-                      'volume': volume_name})
-            client.create_snapshot(source_lun_id, snapshot_name)
-
-    def revert(self, result, client, snapshot, *args, **kwargs):
-        LOG.debug('CreateSnapshotTask.revert')
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            if snapshot['consistencygroup_id']:
-                LOG.warning(_LW('CreateSnapshotTask.revert: '
-                                'delete temp cgsnapshot %s'),
-                            snapshot['consistencygroup_id'])
-                client.delete_cgsnapshot(snapshot['id'])
-            else:
-                LOG.warning(_LW('CreateSnapshotTask.revert: '
-                                'delete temp snapshot %s'),
-                            snapshot['name'])
-                client.delete_snapshot(snapshot['name'])
-
-
-class CopySnapshotTask(task.Task):
-    """Task to copy a volume snapshot/consistency group snapshot.
-
-    Reversion Strategy: Delete the copied snapshot/cgsnapshot
-    """
-    def execute(self, client, src_snap_name, new_snap_name, *args, **kwargs):
-        LOG.debug('CopySnapshotTask.execute')
-        client.copy_snapshot(src_snap_name,
-                             new_snap_name)
-
-    def revert(self, result, client, src_snap_name, new_snap_name,
-               *args, **kwargs):
-        LOG.debug('CopySnapshotTask.revert')
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('CopySnapshotTask.revert: delete the '
-                            'copied snapshot %(new_name)s of '
-                            '%(source_name)s.'),
-                        {'new_name': new_snap_name,
-                         'source_name': src_snap_name})
-            client.delete_snapshot(new_snap_name)
-
-
-class AllowReadWriteOnSnapshotTask(task.Task):
-    """Task to modify a Snapshot to allow ReadWrite on it."""
-    def execute(self, client, new_snap_name, *args, **kwargs):
-        LOG.debug('AllowReadWriteOnSnapshotTask.execute')
-        client.allow_snapshot_readwrite_and_autodelete(new_snap_name)
-
-
-class CreateConsistencyGroupTask(task.Task):
-    """Task to create a consistency group."""
-    def __init__(self, lun_id_key_template, num_of_members):
-        self.lun_id_keys = sorted(set(
-            [lun_id_key_template % i for i in range(num_of_members)]))
-        super(CreateConsistencyGroupTask, self).__init__(
-            requires=self.lun_id_keys)
-
-    def execute(self, client, group, *args, **kwargs):
-        LOG.debug('CreateConsistencyGroupTask.execute')
-        lun_ids = [kwargs[key] for key in self.lun_id_keys]
-        client.create_consistencygroup(group['id'], lun_ids,
-                                       poll=True)
-
-
-class WaitMigrationsCompleteTask(task.Task):
-    """Task to wait migrations to be completed."""
-    def __init__(self, lun_id_key_template, lun_data_key_template,
-                 num_of_members):
-        self.lun_id_keys = sorted(set(
-            [lun_id_key_template % i for i in range(num_of_members)]))
-        self.lun_data_keys = sorted(set(
-            [lun_data_key_template % i for i in range(num_of_members)]))
-        self.migrate_tuples = [
-            (lun_id_key_template % x, lun_data_key_template % x)
-            for x in range(num_of_members)]
-        super(WaitMigrationsCompleteTask, self).__init__(
-            requires=self.lun_id_keys + self.lun_data_keys)
-
-    def execute(self, client, *args, **kwargs):
-        LOG.debug('WaitMigrationsCompleteTask.execute')
-        for src_id_key, dst_data_key in self.migrate_tuples:
-            src_id = kwargs[src_id_key]
-            dst_data = kwargs[dst_data_key]
-            migrated = client.verify_lun_migration(src_id,
-                                                   dst_data['lun_id'],
-                                                   dst_data['wwn'],
-                                                   dst_name=None)
-            if not migrated:
-                msg = _("Migrate volume %(src)s failed.") % {'src': src_id}
-                raise exception.VolumeBackendAPIException(data=msg)
-
-
-class MirrorView(object):
-    """MirrorView synchronous/asynchronous operations.
-
-    This class is to support operations for volume replication.
-    Each operation should ensure commands are sent to correct targeting device.
-
-    NOTE: currently, only synchronous is supported.
-    """
-    SYNCHRONIZE_MODE = ['sync']
-
-    SYNCHRONIZED_STATE = 'Synchronized'
-    CONSISTENT_STATE = 'Consistent'
-
-    def __init__(self, client, secondary_client, mode='sync'):
-        """Caller needs to initialize MirrorView via this method.
-
-        :param client: client connecting to primary system
-        :param secondary_client: client connecting to secondary system
-        :param mode: only 'sync' is allowed
-        """
-        self._client = client
-        self._secondary_client = secondary_client
-        if mode not in self.SYNCHRONIZE_MODE:
-            msg = _('Invalid synchronize mode specified, allowed '
-                    'mode is %s.') % self.SYNCHRONIZE_MODE
-            raise exception.VolumeBackendAPIException(
-                data=msg)
-        self.mode = '-sync'
-
-    def create_mirror_workflow(self, mirror_name, lun_id, pool_name,
-                               lun_name, lun_size, provisioning, tiering):
-        """Creates mirror view for LUN."""
-        store_spec = {'mirror': self}
-        work_flow = self._get_create_mirror_flow(
-            mirror_name, lun_id, pool_name,
-            lun_name, lun_size, provisioning, tiering)
-        flow_engine = taskflow.engines.load(work_flow, store=store_spec)
-        flow_engine.run()
-
-    def destroy_mirror_view(self, mirror_name, lun_name, mv=None):
-        self.fracture_image(mirror_name, mv)
-        self.remove_image(mirror_name, mv)
-        self.destroy_mirror(mirror_name)
-        self.delete_secondary_lun(lun_name)
-
-    def _get_create_mirror_flow(self, mirror_name, lun_id, pool_name,
-                                lun_name, lun_size, provisioning, tiering):
-        """Gets mirror create flow."""
-        flow_name = 'create_mirror_view'
-        work_flow = linear_flow.Flow(flow_name)
-        work_flow.add(MirrorCreateTask(mirror_name, lun_id),
-                      MirrorSecLunCreateTask(pool_name, lun_name, lun_size,
-                                             provisioning, tiering),
-                      MirrorAddImageTask(mirror_name))
-        return work_flow
-
-    def create_mirror(self, name, primary_lun_id, poll=False):
-        command_create = ('mirror', '-sync', '-create',
-                          '-name', name, '-lun', primary_lun_id,
-                          '-usewriteintentlog', '-o')
-        out, rc = self._client.command_execute(*command_create, poll=poll)
-        if rc != 0 or out.strip():
-            if VNXError.has_error(out, VNXError.MIRROR_IN_USE):
-                LOG.warning(_LW('MirrorView already created, mirror name '
-                                '%(name)s. Message: %(msg)s'),
-                            {'name': name, 'msg': out})
-            else:
-                self._client._raise_cli_error(cmd=command_create,
-                                              rc=rc,
-                                              out=out)
-        return rc
-
-    def create_secondary_lun(self, pool_name, lun_name, lun_size,
-                             provisioning,
-                             tiering, poll=False):
-        """Creates secondary LUN in remote device."""
-        data = self._secondary_client.create_lun_with_advance_feature(
-            pool=pool_name,
-            name=lun_name,
-            size=lun_size,
-            provisioning=provisioning,
-            tiering=tiering,
-            consistencygroup_id=None,
-            ignore_thresholds=False,
-            poll=poll)
-        return data['lun_id']
-
-    def delete_secondary_lun(self, lun_name):
-        """Deletes secondary LUN in remote device."""
-        self._secondary_client.delete_lun(lun_name)
-
-    def destroy_mirror(self, name, poll=False):
-        command_destroy = ('mirror', '-sync', '-destroy',
-                           '-name', name, '-force', '-o')
-        out, rc = self._client.command_execute(*command_destroy, poll=poll)
-        if rc != 0 or out.strip():
-            if VNXError.has_error(out, VNXError.MIRROR_NOT_FOUND):
-                LOG.warning(_LW('MirrorView %(name)s was already deleted. '
-                                'Message: %(msg)s'),
-                            {'name': name, 'msg': out})
-            else:
-                self._client._raise_cli_error(cmd=command_destroy,
-                                              rc=rc,
-                                              out=out)
-        return out, rc
-
-    def add_image(self, name, secondary_lun_id, poll=False):
-        """Adds secondary image to mirror."""
-        secondary_array_ip = self._secondary_client.active_storage_ip
-        command_add = ('mirror', '-sync', '-addimage',
-                       '-name', name, '-arrayhost', secondary_array_ip,
-                       '-lun', secondary_lun_id, '-recoverypolicy', 'auto',
-                       '-syncrate', 'high')
-        out, rc = self._client.command_execute(*command_add, poll=poll)
-        if rc != 0:
-            self._client._raise_cli_error(cmd=command_add,
-                                          rc=rc,
-                                          out=out)
-        return out, rc
-
-    def remove_image(self, name, mirror_view=None, poll=False):
-        """Removes secondary image(s) from mirror."""
-        if not mirror_view:
-            mirror_view = self.get_image(name, poll=True)
-        image_uid = self._extract_image_uid(mirror_view, 'secondary')
-        command_remove = ('mirror', '-sync', '-removeimage',
-                          '-name', name, '-imageuid', image_uid,
-                          '-o')
-        out, rc = self._client.command_execute(*command_remove, poll=poll)
-        if rc != 0:
-            self._client._raise_cli_error(cmd=command_remove,
-                                          rc=rc,
-                                          out=out)
-        return out, rc
-
-    def get_image(self, name, use_secondary=False, poll=False):
-        """Returns mirror view properties.
-
-        :param name: mirror view name
-        :param use_secondary: get image info from secodnary or not
-        :return: dict of mirror view properties as below:
-
-        .. code-block:: python
-
-            {
-                'MirrorView Name': 'mirror name',
-                'MirrorView Description': 'some desciption here',
-                {...},
-                'images': [
-                    {
-                        'Image UID': '50:06:01:60:88:60:08:0F',
-                        'Is Image Primary': 'YES',
-                        {...}
-                        'Preferred SP': 'A'
-                    },
-                    {
-                        'Image UID': '50:06:01:60:88:60:03:BA',
-                        'Is Image Primary': 'NO',
-                        {...},
-                        'Synchronizing Progress(%)': 100
-                    }
-                ]
-            }
-
-        """
-        if use_secondary:
-            client = self._secondary_client
-        else:
-            client = self._client
-        command_get = ('mirror', '-sync', '-list', '-name', name)
-        out, rc = client.command_execute(
-            *command_get, poll=poll)
-        if rc != 0:
-            if VNXError.has_error(out, VNXError.MIRROR_NOT_FOUND):
-                LOG.warning(_LW('Getting MirrorView %(name)s failed.'
-                                ' Message: %(msg)s.'),
-                            {'name': name, 'msg': out})
-                return None
-            else:
-                client._raise_cli_error(cmd=command_get,
-                                        rc=rc,
-                                        out=out)
-        mvs = {}
-        mvs_info, images_info = re.split('Images:\s*', out)
-        for line in mvs_info.strip('\n').split('\n'):
-            k, v = re.split(':\s*', line, 1)
-            mvs[k] = v if not v or re.search('\D', v) else int(v)
-        mvs['images'] = []
-        for image_raw in re.split('\n\n+', images_info.strip('\n')):
-            image = {}
-            for line in image_raw.split('\n'):
-                k, v = re.split(':\s*', line, 1)
-                image[k] = v if not v or re.search('\D', v) else int(v)
-            mvs['images'].append(image)
-        return mvs
-
-    def fracture_image(self, name, mirror_view=None, poll=False):
-        """Stops the synchronization between LUNs."""
-        if not mirror_view:
-            mirror_view = self.get_image(name, poll=True)
-        image_uid = self._extract_image_uid(mirror_view, 'secondary')
-        command_fracture = ('mirror', '-sync', '-fractureimage', '-name', name,
-                            '-imageuid', image_uid, '-o')
-        out, rc = self._client.command_execute(*command_fracture, poll=poll)
-        if rc != 0:
-            self._client._raise_cli_error(cmd=command_fracture,
-                                          rc=rc,
-                                          out=out)
-        return out, rc
-
-    def sync_image(self, name, mirror_view=None, poll=False):
-        """Synchronizes the secondary image and wait for completion."""
-        if not mirror_view:
-            mirror_view = self.get_image(name, poll=True)
-        image_state = mirror_view['images'][1]['Image State']
-        if image_state == self.SYNCHRONIZED_STATE:
-            LOG.debug('replication %(name)s is already in %(state)s.',
-                      {'name': name, 'state': image_state})
-            return "", 0
-        image_uid = self._extract_image_uid(mirror_view, 'secondary')
-        command_sync = ('mirror', '-sync', '-syncimage', '-name', name,
-                        '-imageuid', image_uid, '-o')
-        out, rc = self._client.command_execute(*command_sync, poll=poll)
-        if rc != 0:
-            self._client._raise_cli_error(cmd=command_sync,
-                                          rc=rc,
-                                          out=out)
-
-        def inner():
-            tmp_mirror = self.get_image(name)
-            for image in tmp_mirror['images']:
-                if 'Image State' in image:
-                    # Only secondary image contains this field
-                    return (image['Image State'] == self.SYNCHRONIZED_STATE and
-                            image['Synchronizing Progress(%)'] == 100)
-        self._client._wait_for_a_condition(inner)
-        return out, rc
-
-    def promote_image(self, name, mirror_view=None, poll=False):
-        """Promotes the secondary image on secondary system.
-
-        NOTE: Only when "Image State" in 'Consistent' or 'Synchnonized'
-              can be promoted.
-        """
-        if not mirror_view:
-            mirror_view = self.get_image(name, use_secondary=True, poll=True)
-        image_uid = self._extract_image_uid(mirror_view, 'secondary')
-
-        command_promote = ('mirror', '-sync', '-promoteimage', '-name', name,
-                           '-imageuid', image_uid, '-o')
-        out, rc = self._secondary_client.command_execute(
-            *command_promote, poll=poll)
-        if rc != 0:
-            raise self._client._raise_cli_error(command_promote, rc, out)
-        return out, rc
-
-    def _extract_image_uid(self, mirror_view, image_type='primary'):
-        """Returns primary or secondary image uid from mirror objects.
-
-        :param mirror_view: parsed mirror view.
-        :param image_type: 'primary' or 'secondary'.
-        """
-        images = mirror_view['images']
-        for image in images:
-            is_primary = image['Is Image Primary']
-            if image_type == 'primary' and is_primary == 'YES':
-                image_uid = image['Image UID']
-                break
-            if image_type == 'secondary' and is_primary == 'NO':
-                image_uid = image['Image UID']
-                break
-        return image_uid
-
-
-class MirrorCreateTask(task.Task):
-    """Creates a MirrorView with primary lun for replication.
-
-    Reversion strategy: Destroy the created MirrorView.
-    """
-    def __init__(self, mirror_name, primary_lun_id, **kwargs):
-        super(MirrorCreateTask, self).__init__()
-        self.mirror_name = mirror_name
-        self.primary_lun_id = primary_lun_id
-
-    def execute(self, mirror, *args, **kwargs):
-        LOG.debug('%s.execute', self.__class__.__name__)
-        mirror.create_mirror(self.mirror_name, self.primary_lun_id, poll=True)
-
-    def revert(self, result, mirror, *args, **kwargs):
-        method_name = '%s.revert' % self.__class__.__name__
-        LOG.debug(method_name)
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('%(method)s: destroying mirror '
-                            'view %(name)s.'),
-                        {'method': method_name,
-                         'name': self.mirror_name})
-            mirror.destroy_mirror(self.mirror_name, poll=True)
-
-
-class MirrorSecLunCreateTask(task.Task):
-    """Creates a secondary LUN on secondary system.
-
-    Reversion strategy: Delete secondary LUN.
-    """
-    def __init__(self, pool_name, lun_name, lun_size, provisioning, tiering):
-        super(MirrorSecLunCreateTask, self).__init__(provides='sec_lun_id')
-        self.pool_name = pool_name
-        self.lun_name = lun_name
-        self.lun_size = lun_size
-        self.provisioning = provisioning
-        self.tiering = tiering
-
-    def execute(self, mirror, *args, **kwargs):
-        LOG.debug('%s.execute', self.__class__.__name__)
-        sec_lun_id = mirror.create_secondary_lun(
-            self.pool_name, self.lun_name, self.lun_size,
-            self.provisioning, self.tiering)
-        return sec_lun_id
-
-    def revert(self, result, mirror, *args, **kwargs):
-        method_name = '%s.revert' % self.__class__.__name__
-        LOG.debug(method_name)
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('%(method)s: destroying secondary LUN '
-                            '%(name)s.'),
-                        {'method': method_name,
-                         'name': self.lun_name})
-            mirror.delete_secondary_lun(self.lun_name)
-
-
-class MirrorAddImageTask(task.Task):
-    """Add the secondary image to MirrorView.
-
-    Reversion strategy: Remove the secondary image.
-    """
-    def __init__(self, mirror_name):
-        super(MirrorAddImageTask, self).__init__()
-        self.mirror_name = mirror_name
-
-    def execute(self, mirror, sec_lun_id, *args, **kwargs):
-        LOG.debug('%s.execute', self.__class__.__name__)
-        mirror.add_image(self.mirror_name, sec_lun_id, poll=True)
-
-    def revert(self, result, mirror, *args, **kwargs):
-        method_name = '%s.revert' % self.__class__.__name__
-        LOG.debug(method_name)
-        if isinstance(result, failure.Failure):
-            return
-        else:
-            LOG.warning(_LW('%(method)s: removing secondary image '
-                            'from %(name)s.'),
-                        {'method': method_name,
-                         'name': self.mirror_name})
-            mirror.remove_image(self.mirror_name, poll=True)
diff --git a/cinder/volume/drivers/emc/vnx/__init__.py b/cinder/volume/drivers/emc/vnx/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/volume/drivers/emc/vnx/adapter.py b/cinder/volume/drivers/emc/vnx/adapter.py
new file mode 100644
index 00000000000..bfef035b91f
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/adapter.py
@@ -0,0 +1,1466 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import math
+import random
+import re
+
+from oslo_log import log as logging
+from oslo_utils import importutils
+import six
+
+storops = importutils.try_import('storops')
+if storops:
+    from storops import exception as storops_ex
+
+from cinder import exception
+from cinder.i18n import _, _LI, _LE, _LW
+from cinder.objects import fields
+from cinder.volume.drivers.emc.vnx import client
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import taskflows as emc_taskflow
+from cinder.volume.drivers.emc.vnx import utils
+from cinder.zonemanager import utils as zm_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class CommonAdapter(object):
+
+    VERSION = '08.00.00'
+    VENDOR = 'EMC'
+
+    def __init__(self, configuration, active_backend_id):
+        self.config = configuration
+        self.active_backend_id = active_backend_id
+        self.client = None
+        self.protocol = None
+        self.serial_number = None
+        self.mirror_view = None
+        self.storage_pools = None
+        self.max_retries = 5
+        self.allowed_ports = None
+        self.force_delete_lun_in_sg = None
+        self.max_over_subscription_ratio = None
+        self.ignore_pool_full_threshold = None
+        self.reserved_percentage = None
+        self.destroy_empty_sg = None
+        self.itor_auto_dereg = None
+
+    def do_setup(self):
+        self._normalize_config()
+        self.client = client.Client(
+            self.config.san_ip,
+            self.config.san_login,
+            self.config.san_password,
+            self.config.storage_vnx_authentication_type,
+            self.config.naviseccli_path,
+            self.config.storage_vnx_security_file_dir)
+        # Replication related
+        self.mirror_view = self.build_mirror_view(self.config, True)
+        self.serial_number = self.client.get_serial()
+        self.storage_pools = self.parse_pools()
+        self.force_delete_lun_in_sg = (
+            self.config.force_delete_lun_in_storagegroup)
+        self.max_over_subscription_ratio = (
+            self.config.max_over_subscription_ratio)
+        self.ignore_pool_full_threshold = (
+            self.config.ignore_pool_full_threshold)
+        self.reserved_percentage = self.config.reserved_percentage
+        self.protocol = self.config.storage_protocol
+        self.destroy_empty_sg = self.config.destroy_empty_storage_group
+        self.itor_auto_dereg = self.config.initiator_auto_deregistration
+        self.set_extra_spec_defaults()
+
+    def _normalize_config(self):
+        # Check option `naviseccli_path`.
+        # Set to None (then pass to storops) if it is not set or set to an
+        # empty string.
+        naviseccli_path = self.config.naviseccli_path
+        if naviseccli_path is None or len(naviseccli_path.strip()) == 0:
+            LOG.warning(_LW('[%(group)s] naviseccli_path is not set or set to '
+                            'an empty string. None will be passed into '
+                            'storops.'), {'group': self.config.config_group})
+            self.config.naviseccli_path = None
+
+        # Check option `storage_vnx_pool_names`.
+        # Raise error if it is set to an empty list.
+        pool_names = self.config.storage_vnx_pool_names
+        if pool_names is not None:
+            # Filter out the empty string in the list.
+            pool_names = [name.strip()
+                          for name in filter(lambda x: len(x.strip()) != 0,
+                                             pool_names)]
+            if len(pool_names) == 0:
+                raise exception.InvalidConfigurationValue(
+                    option='[{group}] storage_vnx_pool_names'.format(
+                        group=self.config.config_group),
+                    value=pool_names)
+            self.config.storage_vnx_pool_names = pool_names
+
+        # Check option `io_port_list`.
+        # Raise error if it is set to an empty list.
+        io_port_list = self.config.io_port_list
+        if io_port_list is not None:
+            io_port_list = [port.strip().upper()
+                            for port in filter(lambda x: len(x.strip()) != 0,
+                                               io_port_list)]
+            if len(io_port_list) == 0:
+                # io_port_list is allowed to be an empty list, which means
+                # none of the ports will be registered.
+                raise exception.InvalidConfigurationValue(
+                    option='[{group}] io_port_list'.format(
+                        group=self.config.config_group),
+                    value=io_port_list)
+            self.config.io_port_list = io_port_list
+
+        if self.config.ignore_pool_full_threshold:
+            LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
+                            'LUN creation will still be forced even if the '
+                            'pool full threshold is exceeded.'),
+                        {'group': self.config.config_group})
+
+        if self.config.destroy_empty_storage_group:
+            LOG.warning(_LW('[%(group)s] destroy_empty_storage_group: True. '
+                            'Empty storage group will be deleted after volume '
+                            'is detached.'),
+                        {'group': self.config.config_group})
+
+        if not self.config.initiator_auto_registration:
+            LOG.info(_LI('[%(group)s] initiator_auto_registration: False. '
+                         'Initiator auto registration is not enabled. '
+                         'Please register initiator manually.'),
+                     {'group': self.config.config_group})
+
+        if self.config.force_delete_lun_in_storagegroup:
+            LOG.warning(_LW(
+                '[%(group)s] force_delete_lun_in_storagegroup=True'),
+                {'group': self.config.config_group})
+
+        if self.config.ignore_pool_full_threshold:
+            LOG.warning(_LW('[%(group)s] ignore_pool_full_threshold: True. '
+                            'LUN creation will still be forced even if the '
+                            'pool full threshold is exceeded.'),
+                        {'group': self.config.config_group})
+
+    def _build_port_str(self, port):
+        raise NotImplementedError()
+
+    def validate_ports(self, all_ports, ports_whitelist):
+        # `ports_whitelist` passed the _normalize_config, then it could be only
+        # None or valid list in which the items are stripped and converted to
+        # upper case.
+        result_ports = None
+        if ports_whitelist is None:
+            result_ports = all_ports
+        else:
+            # Split the whitelist, remove spaces around the comma,
+            # and remove the empty item.
+            port_strs_configed = set(ports_whitelist)
+            # For iSCSI port, the format is 'A-1-1',
+            # while for FC, it is 'A-2'.
+            valid_port_map = {self._build_port_str(port): port
+                              for port in all_ports}
+
+            invalid_port_strs = port_strs_configed - set(valid_port_map.keys())
+            if invalid_port_strs:
+                msg = (_('[%(group)s] Invalid %(protocol)s ports %(port)s '
+                         'specified for io_port_list.') % {
+                             'group': self.config.config_group,
+                             'protocol': self.config.storage_protocol,
+                             'port': ','.join(invalid_port_strs)})
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
+            result_ports = [valid_port_map[port_str]
+                            for port_str in port_strs_configed]
+
+        if not result_ports:
+            raise exception.VolumeBackendAPIException(
+                data=_('No valid ports.'))
+        return result_ports
+
+    def set_extra_spec_defaults(self):
+        provision_default = storops.VNXProvisionEnum.THICK
+        tier_default = None
+        if self.client.is_fast_enabled():
+            tier_default = storops.VNXTieringEnum.HIGH_AUTO
+        common.ExtraSpecs.set_defaults(provision_default, tier_default)
+
+    def create_volume(self, volume):
+        """Creates a EMC volume."""
+        volume_size = volume['size']
+        volume_name = volume['name']
+
+        volume_metadata = utils.get_metadata(volume)
+        pool = utils.get_pool_from_host(volume.host)
+        specs = common.ExtraSpecs.from_volume(volume)
+
+        provision = specs.provision
+        tier = specs.tier
+
+        volume_metadata['snapcopy'] = 'False'
+        LOG.info(_LI('Create Volume: %(volume)s  Size: %(size)s '
+                     'pool: %(pool)s '
+                     'provision: %(provision)s '
+                     'tier: %(tier)s '),
+                 {'volume': volume_name,
+                  'size': volume_size,
+                  'pool': pool,
+                  'provision': provision,
+                  'tier': tier})
+
+        lun = self.client.create_lun(
+            pool, volume_name, volume_size,
+            provision, tier, volume.consistencygroup_id,
+            ignore_thresholds=self.config.ignore_pool_full_threshold)
+        location = self._build_provider_location(
+            lun_type='lun',
+            lun_id=lun.lun_id,
+            base_lun_name=volume.name)
+        # Setup LUN Replication/MirrorView between devices.
+        # Secondary LUN will inherit properties from primary LUN.
+        rep_update = self.setup_lun_replication(
+            volume, lun.lun_id)
+        model_update = {'provider_location': location,
+                        'metadata': volume_metadata}
+        model_update.update(rep_update)
+        return model_update
+
+    def retype(self, ctxt, volume, new_type, diff, host):
+        """Changes volume from one type to another."""
+        new_specs = common.ExtraSpecs.from_volume_type(new_type)
+        new_specs.validate(self.client.get_vnx_enabler_status())
+        lun = self.client.get_lun(name=volume.name)
+        if volume.volume_type_id:
+            old_specs = common.ExtraSpecs.from_volume(volume)
+        else:
+            # Get extra specs from the LUN properties when the lun
+            # has no volume type.
+            utils.update_res_without_poll(lun)
+            old_specs = common.ExtraSpecs.from_lun(lun)
+        old_provision = old_specs.provision
+        old_tier = old_specs.tier
+        need_migration = utils.retype_need_migration(
+            volume, old_provision, new_specs.provision, host)
+        turn_on_compress = utils.retype_need_turn_on_compression(
+            old_provision, new_specs.provision)
+        change_tier = utils.retype_need_change_tier(
+            old_tier, new_specs.tier)
+
+        if need_migration or turn_on_compress:
+            if self.client.lun_has_snapshot(lun):
+                LOG.debug('Driver is not able to do retype because the volume '
+                          '%s has a snapshot.',
+                          volume.id)
+                return False
+
+        if need_migration:
+            LOG.debug('Driver needs to use storage-assisted migration '
+                      'to retype the volume.')
+            return self._migrate_volume(volume, host, new_specs)
+        if turn_on_compress:
+            # Turn on compression feature on the volume
+            self.client.enable_compression(lun)
+        if change_tier:
+            # Modify lun to change tiering policy
+            lun.tier = new_specs.tier
+        return True
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Constructs a work flow to create a volume from snapshot.
+
+        :param volume: new volume
+        :param snapshot: base snapshot
+        This flow will do the following:
+
+        1. Create a snap mount point (SMP) for the snapshot.
+        2. Attach the snapshot to the SMP created in the first step.
+        3. Create a temporary lun prepare for migration.
+           (Skipped if snapcopy='true')
+        4. Start a migration between the SMP and the temp lun.
+           (Skipped if snapcopy='true')
+        """
+        volume_metadata = utils.get_metadata(volume)
+        pool = utils.get_pool_from_host(volume.host)
+
+        specs = common.ExtraSpecs.from_volume(volume)
+        provision = specs.provision
+        tier = specs.tier
+        base_lun_name = utils.get_base_lun_name(snapshot.volume)
+        rep_update = dict()
+        if utils.snapcopy_enabled(volume):
+            new_lun_id = emc_taskflow.fast_create_volume_from_snapshot(
+                client=self.client,
+                snap_name=snapshot.name,
+                new_snap_name=utils.construct_snap_name(volume),
+                lun_name=volume.name,
+                base_lun_name=base_lun_name,
+                pool_name=pool)
+            location = self._build_provider_location(
+                lun_type='smp',
+                lun_id=new_lun_id,
+                base_lun_name=base_lun_name)
+            volume_metadata['snapcopy'] = 'True'
+        else:
+            new_lun_id = emc_taskflow.create_volume_from_snapshot(
+                client=self.client,
+                snap_name=snapshot.name,
+                lun_name=volume.name,
+                lun_size=volume.size,
+                base_lun_name=base_lun_name,
+                pool_name=pool,
+                provision=provision,
+                tier=tier)
+            location = self._build_provider_location(
+                lun_type='lun',
+                lun_id=new_lun_id,
+                base_lun_name=volume.name)
+            volume_metadata['snapcopy'] = 'False'
+            rep_update = self.setup_lun_replication(volume, new_lun_id)
+
+        model_update = {'provider_location': location,
+                        'metadata': volume_metadata}
+        model_update.update(rep_update)
+        return model_update
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a clone of the specified volume."""
+        volume_metadata = utils.get_metadata(volume)
+        pool = utils.get_pool_from_host(volume.host)
+
+        specs = common.ExtraSpecs.from_volume(volume)
+        provision = specs.provision
+        tier = specs.tier
+        base_lun_name = utils.get_base_lun_name(src_vref)
+
+        source_lun_id = self.client.get_lun_id(src_vref)
+        snap_name = utils.construct_snap_name(volume)
+        rep_update = dict()
+        if utils.snapcopy_enabled(volume):
+            # snapcopy feature enabled
+            new_lun_id = emc_taskflow.fast_create_cloned_volume(
+                client=self.client,
+                snap_name=snap_name,
+                lun_id=source_lun_id,
+                lun_name=volume.name,
+                base_lun_name=base_lun_name
+            )
+            location = self._build_provider_location(
+                lun_type='smp',
+                lun_id=new_lun_id,
+                base_lun_name=base_lun_name)
+        else:
+            new_lun_id = emc_taskflow.create_cloned_volume(
+                client=self.client,
+                snap_name=snap_name,
+                lun_id=source_lun_id,
+                lun_name=volume.name,
+                lun_size=volume.size,
+                base_lun_name=base_lun_name,
+                pool_name=pool,
+                provision=provision,
+                tier=tier)
+            self.client.delete_snapshot(snap_name)
+            # After migration, volume's base lun is itself
+            location = self._build_provider_location(
+                lun_type='lun',
+                lun_id=new_lun_id,
+                base_lun_name=volume.name)
+            volume_metadata['snapcopy'] = 'False'
+            rep_update = self.setup_lun_replication(volume, new_lun_id)
+
+        model_update = {'provider_location': location,
+                        'metadata': volume_metadata}
+        model_update.update(rep_update)
+        return model_update
+
+    def migrate_volume(self, context, volume, host):
+        """Leverage the VNX on-array migration functionality.
+
+        This method is invoked at the source backend.
+        """
+        specs = common.ExtraSpecs.from_volume(volume)
+        return self._migrate_volume(volume, host, specs)
+
+    def _migrate_volume(self, volume, host, extra_specs):
+        """Migrates volume.
+
+        :param extra_specs: Instance of ExtraSpecs. The new volume will be
+            changed to align with the new extra specs.
+        """
+        r = utils.validate_storage_migration(
+            volume, host, self.serial_number, self.protocol)
+        if not r:
+            return r, None
+        rate = utils.get_migration_rate(volume)
+
+        new_pool = utils.get_pool_from_host(host['host'])
+        lun_id = self.client.get_lun_id(volume)
+        lun_name = volume.name
+        provision = extra_specs.provision
+        tier = extra_specs.tier
+
+        emc_taskflow.run_migration_taskflow(
+            self.client, lun_id, lun_name, volume.size,
+            new_pool, provision, tier, rate)
+
+        # A smp will become a LUN after migration
+        if utils.is_volume_smp(volume):
+            self.client.delete_snapshot(
+                utils.construct_snap_name(volume))
+        volume_metadata = utils.get_metadata(volume)
+        pl = self._build_provider_location(
+            lun_type='lun',
+            lun_id=lun_id,
+            base_lun_name=volume.name)
+        volume_metadata['snapcopy'] = 'False'
+        model_update = {'provider_location': pl,
+                        'metadata': volume_metadata}
+        return True, model_update
+
+    def create_consistencygroup(self, context, group):
+        cg_name = group.id
+        utils.validate_cg_type(group)
+        model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
+        self.client.create_consistency_group(cg_name=cg_name)
+        return model_update
+
+    def delete_consistencygroup(self, context, group, volumes):
+        """Deletes a consistency group."""
+        cg_name = group.id
+
+        model_update = {}
+        volumes_model_update = []
+        model_update['status'] = group.status
+        LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
+                 {'cg_name': cg_name})
+
+        self.client.delete_consistency_group(cg_name)
+
+        for volume in volumes:
+            try:
+                self.client.delete_lun(volume.name)
+                volumes_model_update.append(
+                    {'id': volume.id,
+                     'status': fields.ConsistencyGroupStatus.DELETED})
+            except storops_ex.VNXDeleteLunError:
+                volumes_model_update.append(
+                    {'id': volume.id,
+                     'status': fields.ConsistencyGroupStatus.ERROR_DELETING})
+
+        return model_update, volumes_model_update
+
+    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
+        """Creates a CG snapshot(snap group)."""
+        model_update = {}
+        snapshots_model_update = []
+        LOG.info(_LI('Creating CG snapshot for consistency group'
+                     ': %(group_name)s'),
+                 {'group_name': cgsnapshot.consistencygroup_id})
+
+        self.client.create_cg_snapshot(cgsnapshot.id,
+                                       cgsnapshot.consistencygroup_id)
+        for snapshot in snapshots:
+            snapshots_model_update.append(
+                {'id': snapshot.id, 'status': 'available'})
+        model_update['status'] = 'available'
+
+        return model_update, snapshots_model_update
+
+    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
+        """Deletes a CG snapshot(snap group)."""
+        model_update = {}
+        snapshots_model_update = []
+        model_update['status'] = cgsnapshot.status
+        LOG.info(_LI('Deleting CG snapshot %(snap_name)s for consistency '
+                     'group: %(group_name)s'),
+                 {'snap_name': cgsnapshot.id,
+                  'group_name': cgsnapshot.consistencygroup_id})
+
+        self.client.delete_cg_snapshot(cgsnapshot.id)
+        for snapshot in snapshots:
+            snapshots_model_update.append(
+                {'id': snapshot.id, 'status': 'deleted'})
+        model_update['status'] = 'deleted'
+
+        return model_update, snapshots_model_update
+
+    def create_cg_from_cgsnapshot(self, context, group,
+                                  volumes, cgsnapshot, snapshots):
+        # 1. Copy a temp CG snapshot from CG snapshot
+        #    and allow RW for it
+        # 2. Create SMPs from source volumes
+        # 3. Attach SMPs to the CG snapshot
+        # 4. Create migration target LUNs
+        # 5. Migrate from SMPs to LUNs one by one
+        # 6. Wait completion of migration
+        # 7. Create a new CG, add all LUNs to it
+        # 8. Delete the temp CG snapshot
+        cg_name = group.id
+        src_cg_snap_name = cgsnapshot.id
+        pool_name = utils.get_pool_from_host(group.host)
+        lun_sizes = []
+        lun_names = []
+        src_lun_names = []
+        specs_list = []
+        for volume, snapshot in zip(volumes, snapshots):
+            lun_sizes.append(volume.size)
+            lun_names.append(volume.name)
+            src_lun_names.append(snapshot.volume.name)
+            specs_list.append(common.ExtraSpecs.from_volume(volume))
+
+        lun_id_list = emc_taskflow.create_cg_from_cg_snapshot(
+            client=self.client,
+            cg_name=cg_name,
+            src_cg_name=None,
+            cg_snap_name=None,
+            src_cg_snap_name=src_cg_snap_name,
+            pool_name=pool_name,
+            lun_sizes=lun_sizes,
+            lun_names=lun_names,
+            src_lun_names=src_lun_names,
+            specs_list=specs_list)
+
+        volume_model_updates = []
+        for volume, lun_id in zip(volumes, lun_id_list):
+            model_update = {
+                'id': volume.id,
+                'provider_location':
+                    self._build_provider_location(
+                        lun_id=lun_id,
+                        lun_type='lun',
+                        base_lun_name=volume.name
+                    )}
+            volume_model_updates.append(model_update)
+        return None, volume_model_updates
+
+    def create_cloned_cg(self, context, group,
+                         volumes, source_cg, source_vols):
+        # 1. Create temp CG snapshot from source_cg
+        # Same with steps 2-8 of create_cg_from_cgsnapshot
+        pool_name = utils.get_pool_from_host(group.host)
+        lun_sizes = []
+        lun_names = []
+        src_lun_names = []
+        specs_list = []
+        for volume, source_vol in zip(volumes, source_vols):
+            lun_sizes.append(volume.size)
+            lun_names.append(volume.name)
+            src_lun_names.append(source_vol.name)
+            specs_list.append(common.ExtraSpecs.from_volume(volume))
+
+        lun_id_list = emc_taskflow.create_cloned_cg(
+            client=self.client,
+            cg_name=group.id,
+            src_cg_name=source_cg.id,
+            pool_name=pool_name,
+            lun_sizes=lun_sizes,
+            lun_names=lun_names,
+            src_lun_names=src_lun_names,
+            specs_list=specs_list)
+
+        volume_model_updates = []
+        for volume, lun_id in zip(volumes, lun_id_list):
+            model_update = {
+                'id': volume.id,
+                'provider_location':
+                    self._build_provider_location(
+                        lun_id=lun_id,
+                        lun_type='lun',
+                        base_lun_name=volume.name
+                    )}
+            volume_model_updates.append(model_update)
+        return None, volume_model_updates
+
+    def parse_pools(self):
+        pool_names = self.config.storage_vnx_pool_names
+        array_pools = self.client.get_pools()
+        if pool_names:
+            pool_names = set([po.strip() for po in pool_names])
+            array_pool_names = set([po.name for po in array_pools])
+            nonexistent_pools = pool_names.difference(array_pool_names)
+            pool_names.difference_update(nonexistent_pools)
+            if not pool_names:
+                msg = _('All the specified storage pools to be managed '
+                        'do not exist. Please check your configuration. '
+                        'Non-existent pools: %s') % ','.join(nonexistent_pools)
+                raise exception.VolumeBackendAPIException(data=msg)
+            if nonexistent_pools:
+                LOG.warning(_LW('The following specified storage pools '
+                                'do not exist: %(nonexistent)s. '
+                                'This host will only manage the storage '
+                                'pools: %(exist)s'),
+                            {'nonexistent': ','.join(nonexistent_pools),
+                             'exist': ','.join(pool_names)})
+            else:
+                LOG.debug('This host will manage the storage pools: %s.',
+                          ','.join(pool_names))
+        else:
+            pool_names = [p.name for p in array_pools]
+            LOG.info(_LI('No storage pool is configured. This host will '
+                         'manage all the pools on the VNX system.'))
+
+        return list(filter(lambda pool: pool.name in pool_names, array_pools))
+
+    def get_enabler_stats(self):
+        stats = dict()
+        stats['compression_support'] = self.client.is_compression_enabled()
+        stats['fast_support'] = self.client.is_fast_enabled()
+        stats['deduplication_support'] = self.client.is_dedup_enabled()
+        stats['thin_provisioning_support'] = self.client.is_thin_enabled()
+        stats['consistencygroup_support'] = self.client.is_snap_enabled()
+        stats['replication_enabled'] = True if self.mirror_view else False
+        return stats
+
+    def get_pool_stats(self, enabler_stats=None):
+        stats = enabler_stats if enabler_stats else self.get_enabler_stats()
+        self.storage_pools = self.parse_pools()
+        pool_feature = self.client.get_pool_feature()
+        pools_stats = list()
+        for pool in self.storage_pools:
+            pool_stats = {
+                'pool_name': pool.name,
+                'total_capacity_gb': pool.user_capacity_gbs,
+                'provisioned_capacity_gb': pool.total_subscribed_capacity_gbs
+            }
+
+            # Handle pool state Initializing, Ready, Faulted, Offline
+            # or Deleting.
+            if pool.state in common.PoolState.VALID_CREATE_LUN_STATE:
+                pool_stats['free_capacity_gb'] = 0
+                LOG.warning(_LW('Storage Pool [%(pool)s] is [%(state)s].'),
+                            {'pool': pool.name,
+                             'state': pool.state})
+            else:
+                pool_stats['free_capacity_gb'] = pool.available_capacity_gbs
+
+                if (pool_feature.max_pool_luns <=
+                        pool_feature.total_pool_luns):
+                    LOG.warning(_LW('Maximum number of Pool LUNs %(max_luns)s '
+                                    'have been created for %(pool_name)s. '
+                                    'No more LUN creation can be done.'),
+                                {'max_luns': pool_feature.max_pool_luns,
+                                 'pool_name': pool.name})
+                    pool_stats['free_capacity_gb'] = 0
+
+            if not self.reserved_percentage:
+                # Since the admin is not sure of what value is proper,
+                # the driver will calculate the recommended value.
+
+                # Some extra capacity will be used by meta data of pool LUNs.
+                # The overhead is about LUN_Capacity * 0.02 + 3 GB
+                # reserved_percentage will be used to make sure the scheduler
+                # takes the overhead into consideration.
+                # Assume that all the remaining capacity is to be used to
+                # create a thick LUN, reserved_percentage is estimated as
+                # follows:
+                reserved = (((0.02 * pool.available_capacity_gbs + 3) /
+                             (1.02 * pool.user_capacity_gbs)) * 100)
+                # Take pool full threshold into consideration
+                if not self.ignore_pool_full_threshold:
+                    reserved += 100 - pool.percent_full_threshold
+                pool_stats['reserved_percentage'] = int(math.ceil(min(reserved,
+                                                                      100)))
+            else:
+                pool_stats['reserved_percentage'] = self.reserved_percentage
+
+            array_serial = self.serial_number
+            pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' %
+                                           {'pool_name': pool.name,
+                                            'array_serial': array_serial})
+            pool_stats['fast_cache_enabled'] = pool.fast_cache
+
+            # Copy advanced feature stats from backend stats
+            pool_stats['compression_support'] = stats['compression_support']
+            pool_stats['fast_support'] = stats['fast_support']
+            pool_stats['deduplication_support'] = (
+                stats['deduplication_support'])
+            pool_stats['thin_provisioning_support'] = (
+                stats['thin_provisioning_support'])
+            pool_stats['thick_provisioning_support'] = True
+            pool_stats['consistencygroup_support'] = (
+                stats['consistencygroup_support'])
+            pool_stats['max_over_subscription_ratio'] = (
+                self.max_over_subscription_ratio)
+            # Add replication v2.1 support
+            self.append_replication_stats(pool_stats)
+            pools_stats.append(pool_stats)
+        return pools_stats
+
+    def append_replication_stats(self, stats):
+        if self.mirror_view:
+            stats['replication_enabled'] = True
+            stats['replication_count'] = 1
+            stats['replication_type'] = ['sync']
+        else:
+            stats['replication_enabled'] = False
+        stats['replication_targets'] = [
+            device.backend_id for device in common.ReplicationDeviceList(
+                self.config)]
+
+    def update_volume_stats(self):
+        stats = self.get_enabler_stats()
+        stats['pools'] = self.get_pool_stats(stats)
+        stats['storage_protocol'] = self.config.storage_protocol
+        stats['driver_version'] = self.VERSION
+        stats['vendor_name'] = self.VENDOR
+        self.append_replication_stats(stats)
+        return stats
+
+    def delete_volume(self, volume):
+        """Deletes an EMC volume."""
+        self.cleanup_lun_replication(volume)
+        self.client.delete_lun(volume.name, force=self.force_delete_lun_in_sg)
+
+    def extend_volume(self, volume, new_size):
+        """Extends an EMC volume."""
+        self.client.expand_lun(volume.name, new_size, poll=False)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        src_lun_id = self.client.get_lun_id(snapshot.volume)
+        self.client.create_snapshot(src_lun_id, snapshot.name)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        self.client.delete_snapshot(snapshot.name)
+
+    def _get_referenced_lun(self, existing_ref):
+        lun = None
+        if 'source-id' in existing_ref:
+            lun = self.client.get_lun(lun_id=existing_ref['source-id'])
+        elif 'source-name' in existing_ref:
+            lun = self.client.get_lun(name=existing_ref['source-name'])
+        else:
+            reason = _('Reference must contain source-id or source-name key.')
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=existing_ref, reason=reason)
+        if not lun.existed:
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=existing_ref,
+                reason=_("LUN doesn't exist."))
+        return lun
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Returns size of volume to be managed by manage_existing."""
+        lun = self._get_referenced_lun(existing_ref)
+        target_pool = utils.get_pool_from_host(volume.host)
+        if target_pool and lun.pool_name != target_pool:
+            reason = (_('The imported lun is in pool %(lun_pool)s '
+                        'which is not managed by the host %(host)s.')
+                      % {'lun_pool': lun.pool_name,
+                         'host': volume['host']})
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=existing_ref, reason=reason)
+        return lun.total_capacity_gb
+
+    def manage_existing(self, volume, existing_ref):
+        """Imports the existing backend storage object as a volume.
+
+        manage_existing_ref:{
+            'source-id':<lun id in VNX>
+        }
+        or
+        manage_existing_ref:{
+            'source-name':<lun name in VNX>
+        }
+
+        When the volume has a volume_type, the driver inspects that and
+        compare against the properties of the referenced backend storage
+        object.  If they are incompatible, raise a
+        ManageExistingVolumeTypeMismatch exception.
+        """
+        lun = self._get_referenced_lun(existing_ref)
+        if volume.volume_type_id:
+            type_specs = common.ExtraSpecs.from_volume(volume)
+            if not type_specs.match_with_lun(lun):
+                raise exception.ManageExistingVolumeTypeMismatch(
+                    reason=_("The volume to be managed is a %(provision)s LUN "
+                             "and the tiering setting is %(tier)s. This "
+                             "doesn't match with the type %(type)s.")
+                    % {'provision': lun.provision,
+                       'tier': lun.tier,
+                       'type': volume.volume_type_id})
+        lun.rename(volume.name)
+        if lun.is_snap_mount_point:
+            lun_type = 'smp'
+            base_lun_name = lun.primary_lun
+        else:
+            lun_type = 'lun'
+            base_lun_name = volume.name
+        pl = self._build_provider_location(
+            lun_id=lun.lun_id,
+            lun_type=lun_type,
+            base_lun_name=base_lun_name)
+        return {'provider_location': pl}
+
+    def unmanage(self, volume):
+        """Unmanages a volume."""
+        pass
+
+    def build_host(self, connector):
+        raise NotImplementedError
+
+    def assure_storage_group(self, host):
+        """Assures that the storage group with name of `host` exists.
+
+        If the storage group doesn't exist, create a one.
+        """
+        sg = self.client.get_storage_group(host.name)
+        is_new_sg = False
+        if not sg.existed:
+            sg = self.client.create_storage_group(host.name)
+            is_new_sg = True
+        return (sg, is_new_sg)
+
+    def assure_host_access(self, storage_group, host, volume, is_new_sg):
+        """Assures that `host` is connected to the Array.
+
+        It first registers initiators to `storage_group` then add `volume` to
+        `storage_group`.
+
+        :param storage_group: object of storops storage group to which the
+                              host access is registered.
+        :param host: `common.Host` object with initiator information.
+        :param volume: `common.Volume` object with volume information.
+        :param is_new_sg: flag indicating whether the `storage_group` is newly
+                          created or not.
+        """
+        if not self.config.initiator_auto_registration:
+            if is_new_sg:
+                # Invoke connect_host on storage group to register all
+                # host information.
+                # Call connect_host only once when sg is newly created.
+                storage_group.connect_host(host.name)
+        else:
+            self.auto_register_initiator(storage_group, host)
+
+        return self.client.add_lun_to_sg(
+            storage_group,
+            self.client.get_lun(lun_id=volume.vnx_lun_id),
+            self.max_retries)
+
+    def auto_register_initiator(self, storage_group, host):
+        """Registers the initiators to storage group.
+
+        :param storage_group: storage group object to which the initiator is
+                              registered.
+        :param host: information of initiator, etc.
+
+        The behavior depends on the combination of the already-registered
+        initiators of SG and configured white list of ports (that is
+        `self.allowed_ports`).
+        Note that `self.allowed_ports` is the list of all iSCSI/FC ports on
+        array if no white list is configured.
+
+        1. Port white list is empty (not configured or configured to `[]`):
+        a) Skip the initiators registered to any port.
+        b) For the not-registered initiators, register to `self.allowed_ports`.
+        2. Port white list is not empty:
+        a) For the initiators registered to port_n, register to
+        `self.allowed_ports` except port_n (could be a list).
+        b) For the not-registered initiators, register to `self.allowed_ports`.
+        """
+
+        host_initiators = set(host.initiators)
+        sg_initiators = set(storage_group.initiator_uid_list)
+        unreg_initiators = host_initiators - sg_initiators
+        # Case 1.b and case 2.b.
+        initiator_port_map = {unreg_id: set(self.allowed_ports)
+                              for unreg_id in unreg_initiators}
+
+        if self.config.io_port_list is not None:
+            reg_initiators = host_initiators & sg_initiators
+            # Case 2.a
+            initiator_port_map.update({
+                reg_id: (set(self.allowed_ports) -
+                         set(storage_group.get_ports(reg_id)))
+                for reg_id in reg_initiators})
+
+        self.client.register_initiator(storage_group, host, initiator_port_map)
+
+    def prepare_target_data(self, storage_group, host, volume, hlu):
+        raise NotImplementedError()
+
+    def initialize_connection(self, cinder_volume, connector):
+        """Initializes the connection to `cinder_volume`."""
+        volume = common.Volume(
+            cinder_volume.name, cinder_volume.id,
+            vnx_lun_id=self.client.get_lun_id(cinder_volume))
+        return self._initialize_connection(volume, connector)
+
+    def _initialize_connection(self, volume, connector):
+        """Helps to initialize the connection.
+
+        To share common codes with initialize_connection_snapshot.
+
+        :param volume: `common.Volume` object with volume information.
+        :param connector: connector information from Nova.
+        """
+        host = self.build_host(connector)
+        sg, is_new_sg = self.assure_storage_group(host)
+        hlu = self.assure_host_access(sg, host, volume, is_new_sg)
+        return self.prepare_target_data(sg, host, volume, hlu)
+
+    def terminate_connection(self, cinder_volume, connector):
+        """Terminates the connection to `cinder_volume`."""
+        volume = common.Volume(
+            cinder_volume.name, cinder_volume.id,
+            vnx_lun_id=self.client.get_lun_id(cinder_volume))
+        return self._terminate_connection(volume, connector)
+
+    def _terminate_connection(self, volume, connector):
+        """Helps to terminate the connection.
+
+        To share common codes with terminate_connection_snapshot.
+
+        :param volume: `common.Volume` object with volume information.
+        :param connector: connector information from Nova.
+        """
+        host = self.build_host(connector)
+        sg = self.client.get_storage_group(host.name)
+        self.remove_host_access(volume, host, sg)
+
+        # build_terminate_connection return data should go before
+        # terminate_connection_cleanup. The storage group may be deleted in
+        # the terminate_connection_cleanup which is needed during getting
+        # return data
+        self.update_storage_group_if_required(sg)
+        re = self.build_terminate_connection_return_data(host, sg)
+        self.terminate_connection_cleanup(host, sg)
+
+        return re
+
+    def update_storage_group_if_required(self, sg):
+        if sg.existed and self.destroy_empty_sg:
+            utils.update_res_with_poll(sg)
+
+    def remove_host_access(self, volume, host, sg):
+        """Removes the host access from `volume`.
+
+        :param volume: `common.Volume` object with volume information.
+        :param host: `common.Host` object with host information.
+        :param sg: object of `storops` storage group.
+        """
+        lun = self.client.get_lun(lun_id=volume.vnx_lun_id)
+        hostname = host.name
+        if not sg.existed:
+            LOG.warning(_LW("Storage Group %s is not found. "
+                            "Nothing can be done in terminate_connection()."),
+                        hostname)
+        else:
+            try:
+                sg.detach_alu(lun)
+            except storops_ex.VNXDetachAluNotFoundError:
+                LOG.warning(_LW("Volume %(vol)s is not in Storage Group"
+                                " %(sg)s."),
+                            {'vol': volume.name, 'sg': hostname})
+
+    def build_terminate_connection_return_data(self, host, sg):
+        raise NotImplementedError()
+
+    def terminate_connection_cleanup(self, host, sg):
+        if not sg.existed:
+            return
+
+        if self.destroy_empty_sg:
+            if not self.client.sg_has_lun_attached(sg):
+                self._destroy_empty_sg(host, sg)
+
+    def _destroy_empty_sg(self, host, sg):
+        try:
+            LOG.info(_LI("Storage Group %s is empty."), sg.name)
+            sg.disconnect_host(sg.name)
+            sg.remove()
+            if self.itor_auto_dereg:
+                self._deregister_initiator(host)
+        except storops_ex.StoropsException:
+            LOG.warning(_LW("Failed to destroy Storage Group %s."),
+                        sg.name)
+            try:
+                sg.connect_host(sg.name)
+            except storops_ex.StoropsException:
+                LOG.warning(_LW("Failed to connect host %(host)s "
+                                "back to storage group %(sg)s."),
+                            {'host': sg.name, 'sg': sg.name})
+
+    def _deregister_initiator(self, host):
+        initiators = host.initiators
+        try:
+            self.client.deregister_initiators(initiators)
+        except storops_ex:
+            LOG.warning(_LW("Failed to deregister the initiators %s"),
+                        initiators)
+
+    def _is_allowed_port(self, port):
+        return port in self.allowed_ports
+
+    def _build_provider_location(
+            self, lun_id=None, lun_type=None, base_lun_name=None):
+        return utils.build_provider_location(
+            system=self.serial_number,
+            lun_type=lun_type,
+            lun_id=lun_id,
+            base_lun_name=base_lun_name,
+            version=self.VERSION)
+
+    def update_consistencygroup(self, context, group, add_volumes,
+                                remove_volumes):
+        cg = self.client.get_cg(name=group.id)
+        lun_ids_to_add = [self.client.get_lun_id(volume)
+                          for volume in add_volumes]
+        lun_ids_to_remove = [self.client.get_lun_id(volume)
+                             for volume in remove_volumes]
+        self.client.update_consistencygroup(cg, lun_ids_to_add,
+                                            lun_ids_to_remove)
+        return ({'status': fields.ConsistencyGroupStatus.AVAILABLE},
+                None,
+                None)
+
+    def create_export_snapshot(self, context, snapshot, connector):
+        self.client.create_mount_point(snapshot.volume_name,
+                                       utils.construct_smp_name(snapshot.id))
+
+    def remove_export_snapshot(self, context, snapshot):
+        self.client.delete_lun(utils.construct_smp_name(snapshot.id))
+
+    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
+        """Initializes connection for snapshot mount point."""
+        smp_name = utils.construct_smp_name(snapshot.id)
+        self.client.attach_snapshot(smp_name, snapshot.name)
+        lun = self.client.get_lun(name=smp_name)
+        volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id)
+        self._initialize_connection(volume, connector)
+
+    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
+        """Terminates connection for snapshot mount point."""
+        smp_name = utils.construct_smp_name(snapshot.id)
+        lun = self.client.get_lun(name=smp_name)
+        volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id)
+        connection_info = self._terminate_connection(volume, connector)
+        self.client.detach_snapshot(smp_name)
+        return connection_info
+
+    def setup_lun_replication(self, volume, primary_lun_id):
+        """Setup replication for LUN, this only happens in primary system."""
+        specs = common.ExtraSpecs.from_volume(volume)
+        provision = specs.provision
+        tier = specs.tier
+        rep_update = {'replication_driver_data': None,
+                      'replication_status': fields.ReplicationStatus.DISABLED}
+        if specs.is_replication_enabled:
+            LOG.debug('Starting setup replication '
+                      'for volume: %s.', volume.id)
+            lun_size = volume.size
+            mirror_name = utils.construct_mirror_name(volume)
+            pool_name = utils.get_pool_from_host(volume.host)
+            emc_taskflow.create_mirror_view(
+                self.mirror_view, mirror_name,
+                primary_lun_id, pool_name,
+                volume.name, lun_size,
+                provision, tier)
+
+            LOG.info(_LI('Successfully setup replication for %s.'), volume.id)
+            rep_update.update({'replication_status':
+                               fields.ReplicationStatus.ENABLED})
+        return rep_update
+
+    def cleanup_lun_replication(self, volume):
+        specs = common.ExtraSpecs.from_volume(volume)
+        if specs.is_replication_enabled:
+            LOG.debug('Starting cleanup replication from volume: '
+                      '%s.', volume.id)
+            mirror_name = utils.construct_mirror_name(volume)
+            mirror_view = self.build_mirror_view(self.config, True)
+            mirror_view.destroy_mirror(mirror_name, volume.name)
+            LOG.info(
+                _LI('Successfully destroyed replication for volume: %s'),
+                volume.id)
+
+    def build_mirror_view(self, configuration, failover=True):
+        """Builds a mirror view operation class.
+
+        :param configuration: driver configuration
+        :param failover: True if from primary to configured array,
+        False if from configured array to primary.
+        """
+        rep_devices = configuration.replication_device
+        if not rep_devices:
+            LOG.info(_LI('Replication is not configured on backend: %s.'),
+                     configuration.config_group)
+            return None
+        elif len(rep_devices) == 1:
+            if not self.client.is_mirror_view_enabled():
+                error_msg = _('Replication is configured, '
+                              'but no MirrorView/S enabler installed on VNX.')
+                raise exception.InvalidInput(reason=error_msg)
+            rep_list = common.ReplicationDeviceList(configuration)
+            device = rep_list[0]
+            secondary_client = client.Client(
+                ip=device.san_ip,
+                username=device.san_login,
+                password=device.san_password,
+                scope=device.storage_vnx_authentication_type,
+                naviseccli=self.client.naviseccli,
+                sec_file=device.storage_vnx_security_file_dir)
+            if failover:
+                mirror_view = common.VNXMirrorView(
+                    self.client, secondary_client)
+            else:
+                # For fail-back, we need to take care of reversed ownership.
+                mirror_view = common.VNXMirrorView(
+                    secondary_client, self.client)
+            return mirror_view
+        else:
+            error_msg = _('VNX Cinder driver does not support '
+                          'multiple replication targets.')
+            raise exception.InvalidInput(reason=error_msg)
+
+    def validate_backend_id(self, backend_id):
+        # Currently, VNX driver only support 1 remote device.
+        replication_device = common.ReplicationDeviceList(self.config)[0]
+        if backend_id not in (
+                'default', replication_device.backend_id):
+            raise exception.InvalidInput(
+                reason='Invalid backend_id specified.')
+
+    def failover_host(self, context, volumes, secondary_backend_id):
+        """Fails over the volume back and forth.
+
+        Driver needs to update following info for failed-over volume:
+        1. provider_location: update serial number and lun id
+        2. replication_status: new status for replication-enabled volume
+        """
+        volume_update_list = []
+        self.validate_backend_id(secondary_backend_id)
+        if secondary_backend_id != 'default':
+            rep_status = fields.ReplicationStatus.FAILED_OVER
+            mirror_view = self.build_mirror_view(self.config, True)
+        else:
+            rep_status = fields.ReplicationStatus.ENABLED
+            mirror_view = self.build_mirror_view(self.config, False)
+
+        def failover_volume(volume, new_status):
+            mirror_name = utils.construct_mirror_name(volume)
+
+            provider_location = volume.provider_location
+            try:
+                mirror_view.promote_image(mirror_name)
+            except storops_ex.VNXMirrorException as ex:
+                msg = _LE(
+                    'Failed to failover volume %(volume_id)s '
+                    'to %(target)s: %(error)s.')
+                LOG.error(msg, {'volume_id': volume.id,
+                                'target': secondary_backend_id,
+                                'error': ex},)
+                new_status = fields.ReplicationStatus.ERROR
+            else:
+                # Transfer ownership to secondary_backend_id and
+                # update provider_location field
+                secondary_client = mirror_view.secondary_client
+                updated = dict()
+                updated['system'] = secondary_client.get_serial()
+                updated['id'] = six.text_type(
+                    secondary_client.get_lun(name=volume.name).lun_id)
+                provider_location = utils.update_provider_location(
+                    provider_location, updated)
+            model_update = {'volume_id': volume.id,
+                            'updates':
+                                {'replication_status': new_status,
+                                 'provider_location': provider_location}}
+            volume_update_list.append(model_update)
+        for volume in volumes:
+            specs = common.ExtraSpecs.from_volume(volume)
+            if specs.is_replication_enabled:
+                failover_volume(volume, rep_status)
+            else:
+                # Since the array has been failed-over
+                # volumes without replication should be in error.
+                volume_update_list.append({
+                    'volume_id': volume.id,
+                    'updates': {'status': 'error'}})
+        return secondary_backend_id, volume_update_list
+
+    def get_pool_name(self, volume):
+        return self.client.get_pool_name(volume.name)
+
+    def update_migrated_volume(self, context, volume, new_volume,
+                               original_volume_status=None):
+        """Updates metadata after host-assisted migration."""
+        metadata = utils.get_metadata(volume)
+        metadata['snapcopy'] = ('True' if utils.is_volume_smp(new_volume)
+                                else 'False')
+        return {'provider_location': new_volume.provider_location,
+                'metadata': metadata}
+
+
+class ISCSIAdapter(CommonAdapter):
+    def __init__(self, configuration, active_backend_id):
+        super(ISCSIAdapter, self).__init__(configuration, active_backend_id)
+        self.iscsi_initiator_map = None
+
+    def do_setup(self):
+        super(ISCSIAdapter, self).do_setup()
+
+        self.iscsi_initiator_map = self.config.iscsi_initiators
+        self.allowed_ports = self.validate_ports(
+            self.client.get_iscsi_targets(),
+            self.config.io_port_list)
+        LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].',
+                  {'group': self.config.config_group,
+                   'ports': ','.join(
+                       [port.display_name for port in self.allowed_ports])})
+
+    def _normalize_config(self):
+        super(ISCSIAdapter, self)._normalize_config()
+
+        # Check option `iscsi_initiators`.
+        # Set to None if it is not set or set to an empty string.
+        # Raise error if it is set to an empty string.
+        iscsi_initiators = self.config.iscsi_initiators
+        option = '[{group}] iscsi_initiators'.format(
+            group=self.config.config_group)
+        if iscsi_initiators is None:
+            return
+        elif len(iscsi_initiators.strip()) == 0:
+            raise exception.InvalidConfigurationValue(option=option,
+                                                      value=iscsi_initiators)
+        else:
+            try:
+                self.config.iscsi_initiators = json.loads(iscsi_initiators)
+            except ValueError:
+                raise exception.InvalidConfigurationValue(
+                    option=option,
+                    value=iscsi_initiators)
+            if not isinstance(self.config.iscsi_initiators, dict):
+                raise exception.InvalidConfigurationValue(
+                    option=option,
+                    value=iscsi_initiators)
+            LOG.info(_LI("[%(group)s] iscsi_initiators is configured: "
+                         "%(value)s"),
+                     {'group': self.config.config_group,
+                      'value': self.config.iscsi_initiators})
+
+    def update_volume_stats(self):
+        """Retrieves stats info."""
+        stats = super(ISCSIAdapter, self).update_volume_stats()
+        self.allowed_ports = self.validate_ports(
+            self.client.get_iscsi_targets(),
+            self.config.io_port_list)
+        backend_name = self.config.safe_get('volume_backend_name')
+        stats['volume_backend_name'] = backend_name or 'VNXISCSIDriver'
+        return stats
+
+    def _build_port_str(self, port):
+        return '%(sp)s-%(pid)s-%(vpid)s' % {
+            'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B',
+            'pid': port.port_id,
+            'vpid': port.vport_id}
+
+    def build_host(self, connector):
+        return common.Host(connector['host'], [connector['initiator']],
+                           ip=connector['ip'])
+
+    def arrange_io_ports(self, reg_port_white_list, iscsi_initiator_ips):
+        """Arranges IO ports.
+
+        Arranges the registered IO ports and puts a pingable port in the
+        first place as the main portal.
+        """
+
+        random.shuffle(reg_port_white_list)
+        random.shuffle(iscsi_initiator_ips)
+
+        main_portal_index = None
+        for index, port in enumerate(reg_port_white_list):
+            for initiator_ip in iscsi_initiator_ips:
+                if self.client.ping_node(port, initiator_ip):
+                    main_portal_index = index
+                    break
+            else:
+                # For loop fell through without finding a pingable initiator.
+                continue
+            break
+
+        if main_portal_index is not None:
+            reg_port_white_list.insert(
+                0, reg_port_white_list.pop(main_portal_index))
+
+        return reg_port_white_list
+
+    def prepare_target_data(self, storage_group, host, volume, hlu):
+        """Prepares the target data for Nova.
+
+        :param storage_group: object of `storops` storage group.
+        :param host: `common.Host` object with initiator information.
+        :param volume: `common.Volume` object with volume information.
+        :param hlu: the HLU number assigned to volume.
+        """
+
+        target_io_ports = utils.sift_port_white_list(
+            self.allowed_ports, storage_group.get_ports(host.initiators[0]))
+
+        if not target_io_ports:
+            msg = (_('Failed to find available iSCSI targets for %s.')
+                   % storage_group.name)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        if self.iscsi_initiator_map and host.name in self.iscsi_initiator_map:
+            iscsi_initiator_ips = list(self.iscsi_initiator_map[host.name])
+            target_io_ports = self.arrange_io_ports(target_io_ports,
+                                                    iscsi_initiator_ips)
+
+        iscsi_target_data = common.ISCSITargetData(volume.id, False)
+        iqns = [port.wwn for port in target_io_ports]
+        portals = ["%s:3260" % port.ip_address for port in target_io_ports]
+        iscsi_target_data = common.ISCSITargetData(
+            volume.id, True, iqn=iqns[0], iqns=iqns, portal=portals[0],
+            portals=portals, lun=hlu, luns=[hlu] * len(target_io_ports))
+        LOG.debug('Prepared iSCSI targets for %(host)s: %(target_data)s.',
+                  {'host': host.name, 'target_data': iscsi_target_data})
+
+        return iscsi_target_data.to_dict()
+
+    def build_terminate_connection_return_data(self, host, sg):
+        return None
+
+
+class FCAdapter(CommonAdapter):
+    def __init__(self, configuration, active_backend_id):
+        super(FCAdapter, self).__init__(configuration, active_backend_id)
+        self.lookup_service = None
+
+    def do_setup(self):
+        super(FCAdapter, self).do_setup()
+
+        self.lookup_service = zm_utils.create_lookup_service()
+        self.allowed_ports = self.validate_ports(
+            self.client.get_fc_targets(),
+            self.config.io_port_list)
+        LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].',
+                  {'group': self.config.config_group,
+                   'ports': ','.join(
+                       [port.display_name for port in self.allowed_ports])})
+
+    def update_volume_stats(self):
+        """Retrieves stats info."""
+        stats = super(FCAdapter, self).update_volume_stats()
+        backend_name = self.config.safe_get('volume_backend_name')
+        stats['volume_backend_name'] = backend_name or 'VNXFCDriver'
+        return stats
+
+    def _build_port_str(self, port):
+        return '%(sp)s-%(pid)s' % {
+            'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B',
+            'pid': port.port_id}
+
+    def build_host(self, connector):
+        if 'wwnns' not in connector or 'wwpns' not in connector:
+            msg = _('Host %s has no FC initiators') % connector['host']
+            LOG.error(msg)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        wwnns = connector['wwnns']
+        wwpns = connector['wwpns']
+        wwns = [(node + port).upper() for (node, port) in zip(wwnns, wwpns)]
+        # WWNS is like '20000090FA534CD110000090FA534CD1', convert it to
+        # '20:00:00:90:FA:53:4C:D1:10:00:00:90:FA:53:4C:D1'
+        # Note that use // division operator due to the change behavior of
+        # / division operator in Python 3.
+        wwns = [re.sub(r'\S\S', lambda m: m.group(0) + ':', wwn,
+                       len(wwn) // 2 - 1)
+                for wwn in wwns]
+
+        return common.Host(connector['host'], wwns, wwpns=wwpns)
+
+    def prepare_target_data(self, storage_group, host, volume, hlu):
+        """Prepares the target data for Nova.
+
+        :param storage_group: object of `storops` storage group.
+        :param host: `common.Host` object with initiator information.
+        :param volume: `common.Volume` object with volume information.
+        :param hlu: the HLU number assigned to volume.
+        """
+
+        if self.lookup_service is None:
+            registed_ports = []
+            for wwn in host.initiators:
+                registed_ports.extend(storage_group.get_ports(wwn))
+
+            reg_port_white_list = utils.sift_port_white_list(
+                self.allowed_ports,
+                registed_ports)
+
+            if not reg_port_white_list:
+                msg = (_('Failed to find available FC targets for %s.')
+                       % storage_group.name)
+                raise exception.VolumeBackendAPIException(data=msg)
+
+            target_wwns = [utils.truncate_fc_port_wwn(port.wwn)
+                           for port in reg_port_white_list]
+            return common.FCTargetData(volume.id, True, wwn=target_wwns,
+                                       lun=hlu).to_dict()
+        else:
+            target_wwns, initiator_target_map = (
+                self._get_tgt_list_and_initiator_tgt_map(
+                    storage_group, host, True))
+            return common.FCTargetData(
+                volume.id, True, wwn=target_wwns, lun=hlu,
+                initiator_target_map=initiator_target_map).to_dict()
+
+    def update_storage_group_if_required(self, sg):
+        if sg.existed and (self.destroy_empty_sg or self.lookup_service):
+            utils.update_res_with_poll(sg)
+
+    def build_terminate_connection_return_data(self, host, sg):
+        conn_info = {'driver_volume_type': 'fibre_channel',
+                     'data': {}}
+        if self.lookup_service is None:
+            return conn_info
+
+        if not sg.existed or self.client.sg_has_lun_attached(sg):
+            return conn_info
+
+        itor_tgt_map = self._get_initiator_tgt_map(sg, host, False)
+        conn_info['data']['initiator_target_map'] = itor_tgt_map
+
+        return conn_info
+
+    def _get_initiator_tgt_map(
+            self, sg, host, allowed_port_only=False):
+        return self._get_tgt_list_and_initiator_tgt_map(
+            sg, host, allowed_port_only)[1]
+
+    def _get_tgt_list_and_initiator_tgt_map(
+            self, sg, host, allowed_port_only=False):
+        fc_initiators = host.wwpns
+        fc_ports_wwns = list(map(utils.truncate_fc_port_wwn,
+                                 self._get_wwns_of_online_fc_ports(
+                                     sg, allowed_port_only=allowed_port_only)))
+        mapping = (
+            self.lookup_service.
+            get_device_mapping_from_network(fc_initiators, fc_ports_wwns))
+        return utils.convert_to_tgt_list_and_itor_tgt_map(mapping)
+
+    def _get_wwns_of_online_fc_ports(self, sg, allowed_port_only=False):
+        ports = sg.fc_ports
+        if allowed_port_only:
+            ports = list(filter(lambda po: self._is_allowed_port(po), ports))
+
+        fc_port_wwns = self.client.get_wwn_of_online_fc_ports(ports)
+
+        return fc_port_wwns
diff --git a/cinder/volume/drivers/emc/vnx/client.py b/cinder/volume/drivers/emc/vnx/client.py
new file mode 100644
index 00000000000..a706fb6d1d9
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/client.py
@@ -0,0 +1,552 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import importutils
+
+storops = importutils.try_import('storops')
+if storops:
+    from storops import exception as storops_ex
+
+from cinder import exception
+from cinder.i18n import _, _LW, _LE
+from cinder import utils as cinder_utils
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import const
+from cinder.volume.drivers.emc.vnx import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Condition(object):
+    """Defines some condition checker which are used in wait_until, .etc."""
+
+    @staticmethod
+    def is_lun_io_ready(lun):
+        utils.update_res_without_poll(lun)
+        if not lun.existed:
+            return False
+        lun_state = lun.state
+        if lun_state == common.LUNState.INITIALIZING:
+            return False
+        elif lun_state in [common.LUNState.READY,
+                           common.LUNState.FAULTED]:
+            return lun.operation == 'None'
+        else:
+            # Quick exit wait_until when the lun is other state to avoid
+            # long-time timeout.
+            msg = (_('Volume %(name)s was created in VNX, '
+                     'but in %(state)s state.')
+                   % {'name': lun.name,
+                      'state': lun_state})
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    @staticmethod
+    def is_object_existed(vnx_obj):
+        utils.update_res_without_poll(vnx_obj)
+        return vnx_obj.existed
+
+    @staticmethod
+    def is_lun_ops_ready(lun):
+        utils.update_res_without_poll(lun)
+        return 'None' == lun.operation
+
+    @staticmethod
+    def is_lun_expanded(lun, new_size):
+        utils.update_res_without_poll(lun)
+        return new_size == lun.total_capacity_gb
+
+    @staticmethod
+    def is_mirror_synced(mirror):
+        utils.update_res_without_poll(mirror)
+        return (
+            mirror.secondary_image.state ==
+            storops.VNXMirrorImageState.SYNCHRONIZED)
+
+
+class Client(object):
+    def __init__(self, ip, username, password, scope,
+                 naviseccli, sec_file):
+        self.naviseccli = naviseccli
+        self.vnx = storops.VNXSystem(ip=ip,
+                                     username=username,
+                                     password=password,
+                                     scope=scope,
+                                     naviseccli=naviseccli,
+                                     sec_file=sec_file)
+
+    def create_lun(self, pool, name, size, provision,
+                   tier, cg_id=None, ignore_thresholds=False):
+        pool = self.vnx.get_pool(name=pool)
+        try:
+            lun = pool.create_lun(lun_name=name,
+                                  size_gb=size,
+                                  provision=provision,
+                                  tier=tier,
+                                  ignore_thresholds=ignore_thresholds)
+        except storops_ex.VNXLunNameInUseError:
+            lun = self.vnx.get_lun(name=name)
+
+        utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun)
+        if cg_id:
+            cg = self.vnx.get_cg(name=cg_id)
+            cg.add_member(lun)
+        return lun
+
+    def get_lun(self, name=None, lun_id=None):
+        return self.vnx.get_lun(name=name, lun_id=lun_id)
+
+    def get_lun_id(self, volume):
+        """Retrieves the LUN ID of volume."""
+        if volume.provider_location:
+            return int(utils.extract_provider_location(
+                volume.provider_location, 'id'))
+        else:
+            # In some cases, cinder will not update volume info in DB with
+            # provider_location returned by us. We need to retrieve the id
+            # from array. For example, cinder backup-create doesn't use the
+            # provider_location returned from create_cloned_volume.
+            lun = self.get_lun(name=volume.name)
+            return lun.lun_id
+
+    def delete_lun(self, name, force=False):
+        """Deletes a LUN or mount point."""
+        lun = self.get_lun(name=name)
+        smp_attached_snap = (lun.attached_snapshot if lun.is_snap_mount_point
+                             else None)
+
+        try:
+            # Do not delete the snapshots of the lun.
+            lun.delete(force_detach=True, detach_from_sg=force)
+            if smp_attached_snap:
+                smp_attached_snap.delete()
+        except storops_ex.VNXLunNotFoundError as ex:
+            LOG.warning(_LW("LUN %(name)s is already deleted. "
+                            "Message: %(msg)s"),
+                        {'name': name, 'msg': ex.message})
+            pass  # Ignore the failure that due to retry.
+
+    @cinder_utils.retry(const.VNXLunPreparingError, retries=1,
+                        backoff_rate=1)
+    def expand_lun(self, name, new_size, poll=True):
+
+        lun = self.get_lun(name=name)
+
+        try:
+            lun.poll = poll
+            lun.expand(new_size, ignore_thresholds=True)
+        except storops_ex.VNXLunExpandSizeError as ex:
+            LOG.warning(_LW("LUN %(name)s is already expanded. "
+                            "Message: %(msg)s."),
+                        {'name': name, 'msg': ex.message})
+
+        except storops_ex.VNXLunPreparingError as ex:
+            # The error means the operation cannot be performed because the LUN
+            # is 'Preparing'. Wait for a while so that the LUN may get out of
+            # the transitioning state.
+            with excutils.save_and_reraise_exception():
+                LOG.warning(_LW("LUN %(name)s is not ready for extension: "
+                                "%(msg)s"),
+                            {'name': name, 'msg': ex.message})
+
+                utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
+
+        utils.wait_until(Condition.is_lun_expanded, lun=lun, new_size=new_size)
+
+    def modify_lun(self):
+        pass
+
+    @cinder_utils.retry(exceptions=const.VNXTargetNotReadyError,
+                        interval=15,
+                        retries=5, backoff_rate=1)
+    def migrate_lun(self, src_id, dst_id,
+                    rate=const.MIGRATION_RATE_HIGH):
+        src = self.vnx.get_lun(lun_id=src_id)
+        src.migrate(dst_id, rate)
+
+    def session_finished(self, src_lun):
+        session = self.vnx.get_migration_session(src_lun)
+        if not session.existed:
+            return True
+        elif session.current_state in ('FAULTED', 'STOPPED'):
+            LOG.warning(_LW('Session is %s, need to handled then.'),
+                        session.current_state)
+            return True
+        else:
+            return False
+
+    def verify_migration(self, src_id, dst_id, dst_wwn):
+        """Verify whether migration session finished successfully.
+
+        :param src_id:  source LUN id
+        :param dst_id:  destination LUN id
+        :param dst_wwn: destination LUN WWN
+        :returns Boolean: True or False
+        """
+        src_lun = self.vnx.get_lun(lun_id=src_id)
+
+        utils.wait_until(condition=self.session_finished,
+                         interval=common.INTERVAL_30_SEC,
+                         src_lun=src_lun)
+        new_lun = self.vnx.get_lun(lun_id=dst_id)
+        new_wwn = new_lun.wwn
+        if not new_wwn or new_wwn != dst_wwn:
+            return True
+        else:
+            return False
+
+    def cleanup_migration(self, src_id, dst_id):
+        """Invoke when migration meets error.
+
+        :param src_id:  source LUN id
+        :param dst_id:  destination LUN id
+        """
+        # if migration session is still there
+        # we need to cancel the session
+        session = self.vnx.get_migration_session(src_id)
+        src_lun = self.vnx.get_lun(lun_id=src_id)
+        if session.existed:
+            LOG.warning(_LW('Cancelling migration session: '
+                            '%(src_id)s -> %(dst_id)s.'),
+                        {'src_id': src_id,
+                         'dst_id': dst_id})
+            src_lun.cancel_migrate()
+
+    def create_snapshot(self, lun_id, snap_name):
+        """Creates a snapshot."""
+
+        lun = self.get_lun(lun_id=lun_id)
+        try:
+            lun.create_snap(snap_name, allow_rw=True, auto_delete=False)
+        except storops_ex.VNXSnapNameInUseError as ex:
+            LOG.warning(_LW('Snapshot %(name)s already exists. '
+                            'Message: %(msg)s'),
+                        {'name': snap_name, 'msg': ex.message})
+
+    def delete_snapshot(self, snapshot_name):
+        """Deletes a snapshot."""
+
+        snap = self.vnx.get_snap(name=snapshot_name)
+        try:
+            snap.delete()
+        except storops_ex.VNXSnapNotExistsError as ex:
+            LOG.warning(_LW("Snapshot %(name)s may be deleted already. "
+                            "Message: %(msg)s"),
+                        {'name': snapshot_name, 'msg': ex.message})
+        except storops_ex.VNXDeleteAttachedSnapError as ex:
+            with excutils.save_and_reraise_exception():
+                LOG.warning(_LW("Failed to delete snapshot %(name)s "
+                                "which is in use. Message: %(msg)s"),
+                            {'name': snapshot_name, 'msg': ex.message})
+
+    def copy_snapshot(self, snap_name, new_snap_name):
+        snap = self.vnx.get_snap(name=snap_name)
+        snap.copy(new_name=new_snap_name)
+
+    def create_mount_point(self, lun_name, smp_name):
+        lun = self.vnx.get_lun(name=lun_name)
+        try:
+            return lun.create_mount_point(name=smp_name)
+        except storops_ex.VNXLunNameInUseError as ex:
+            LOG.warning(_LW('Mount point %(name)s already exists. '
+                            'Message: %(msg)s'),
+                        {'name': smp_name, 'msg': ex.message})
+            # Ignore the failure that due to retry.
+            return self.vnx.get_lun(name=smp_name)
+
+    def attach_snapshot(self, smp_name, snap_name):
+        lun = self.vnx.get_lun(name=smp_name)
+        try:
+            lun.attach_snap(snap=snap_name)
+        except storops_ex.VNXSnapAlreadyMountedError as ex:
+            LOG.warning(_LW("Snapshot %(snap_name)s is attached to "
+                            "snapshot mount point %(smp_name)s already. "
+                            "Message: %(msg)s"),
+                        {'snap_name': snap_name,
+                         'smp_name': smp_name,
+                         'msg': ex.message})
+
+    def detach_snapshot(self, smp_name):
+        lun = self.vnx.get_lun(name=smp_name)
+        try:
+            lun.detach_snap()
+        except storops_ex.VNXSnapNotAttachedError as ex:
+            LOG.warning(_LW("Snapshot mount point %(smp_name)s is not "
+                            "currently attached. Message: %(msg)s"),
+                        {'smp_name': smp_name, 'msg': ex.message})
+
+    def modify_snapshot(self, snap_name, allow_rw=None, auto_delete=None):
+        snap = self.vnx.get_snap(name=snap_name)
+        snap.modify(allow_rw=allow_rw, auto_delete=auto_delete)
+
+    def create_consistency_group(self, cg_name, lun_id_list=None):
+        try:
+            cg = self.vnx.create_cg(name=cg_name, members=lun_id_list)
+        except storops_ex.VNXConsistencyGroupNameInUseError:
+            cg = self.vnx.get_cg(name=cg_name)
+        # Wait until cg is found on VNX, or deletion will fail afterwards
+        utils.wait_until(Condition.is_object_existed, vnx_obj=cg)
+        return cg
+
+    def delete_consistency_group(self, cg_name):
+        cg = self.vnx.get_cg(cg_name)
+        try:
+            cg.delete()
+        except storops_ex.VNXConsistencyGroupNotFoundError:
+            pass
+
+    def create_cg_snapshot(self, cg_snap_name, cg_name):
+        cg = self.vnx.get_cg(cg_name)
+        try:
+            snap = cg.create_snap(cg_snap_name, allow_rw=True)
+        except storops_ex.VNXSnapNameInUseError:
+            snap = self.vnx.get_snap(cg_snap_name)
+        utils.wait_until(Condition.is_object_existed,
+                         vnx_obj=snap)
+        return snap
+
+    def delete_cg_snapshot(self, cg_snap_name):
+        self.delete_snapshot(cg_snap_name)
+
+    def get_serial(self):
+        return self.vnx.serial
+
+    def get_pools(self):
+        return self.vnx.get_pool()
+
+    def get_pool(self, name):
+        return self.vnx.get_pool(name=name)
+
+    def get_iscsi_targets(self, sp=None, port_id=None, vport_id=None):
+        return self.vnx.get_iscsi_port(sp=sp, port_id=port_id,
+                                       vport_id=vport_id,
+                                       has_ip=True)
+
+    def get_fc_targets(self, sp=None, port_id=None):
+        return self.vnx.get_fc_port(sp=sp, port_id=port_id)
+
+    def get_enablers(self):
+        return self.vnx.get_ndu()
+
+    def is_fast_enabled(self):
+        return self.vnx.is_auto_tiering_enabled()
+
+    def is_compression_enabled(self):
+        return self.vnx.is_compression_enabled()
+
+    def is_dedup_enabled(self):
+        return self.vnx.is_dedup_enabled()
+
+    def is_fast_cache_enabled(self):
+        return self.vnx.is_fast_cache_enabled()
+
+    def is_thin_enabled(self):
+        return self.vnx.is_thin_enabled()
+
+    def is_snap_enabled(self):
+        return self.vnx.is_snap_enabled()
+
+    def is_mirror_view_enabled(self):
+        return self.vnx.is_mirror_view_sync_enabled()
+
+    def get_pool_feature(self):
+        return self.vnx.get_pool_feature()
+
+    def lun_has_snapshot(self, lun):
+        """Checks lun has snapshot.
+
+        :param lun: instance of VNXLun
+        """
+        snaps = lun.get_snap()
+        return len(snaps) != 0
+
+    def enable_compression(self, lun):
+        """Enables compression on lun.
+
+        :param lun: instance of VNXLun
+        """
+        try:
+            lun.enable_compression(ignore_thresholds=True)
+        except storops_ex.VNXCompressionAlreadyEnabledError:
+            LOG.warning(_LW("Compression has already been enabled on %s."),
+                        lun.name)
+
+    def get_vnx_enabler_status(self):
+        return common.VNXEnablerStatus(
+            dedup=self.is_dedup_enabled(),
+            compression=self.is_compression_enabled(),
+            thin=self.is_thin_enabled(),
+            fast=self.is_fast_enabled(),
+            snap=self.is_snap_enabled())
+
+    def create_storage_group(self, name):
+        try:
+            return self.vnx.create_sg(name)
+        except storops_ex.VNXStorageGroupNameInUseError as ex:
+            # Ignore the failure due to retry
+            LOG.warning(_LW('Storage group %(name)s already exists. '
+                            'Message: %(msg)s'),
+                        {'name': name, 'msg': ex.message})
+            return self.vnx.get_sg(name=name)
+
+    def get_storage_group(self, name):
+        return self.vnx.get_sg(name)
+
+    def register_initiator(self, storage_group, host, initiator_port_map):
+        """Registers the initiators of `host` to the `storage_group`.
+
+        :param storage_group: the storage group object.
+        :param host: the ip and name information of the initiator.
+        :param initiator_port_map: the dict specifying which initiators are
+                                   bound to which ports.
+        """
+        for (initiator_id, ports_to_bind) in initiator_port_map.items():
+            for port in ports_to_bind:
+                try:
+                    storage_group.connect_hba(port, initiator_id, host.name,
+                                              host_ip=host.ip)
+                except storops_ex.VNXStorageGroupError as ex:
+                    LOG.warning(_LW('Failed to set path to port %(port)s for '
+                                    'initiator %(hba_id)s. Message: %(msg)s'),
+                                {'port': port, 'hba_id': initiator_id,
+                                 'msg': ex.message})
+
+        if initiator_port_map:
+            utils.update_res_with_poll(storage_group)
+
+    def ping_node(self, port, ip_address):
+        iscsi_port = self.get_iscsi_targets(sp=port.sp,
+                                            port_id=port.port_id,
+                                            vport_id=port.vport_id)
+        try:
+            iscsi_port.ping_node(ip_address, count=1)
+            return True
+        except storops_ex.VNXPingNodeError:
+            return False
+
+    def add_lun_to_sg(self, storage_group, lun, max_retries):
+        """Adds the `lun` to `storage_group`."""
+        try:
+            return storage_group.attach_alu(lun, max_retries)
+        except storops_ex.VNXAluAlreadyAttachedError as ex:
+            # Ignore the failure due to retry.
+            return storage_group.get_hlu(lun)
+        except storops_ex.VNXNoHluAvailableError as ex:
+            with excutils.save_and_reraise_exception():
+                # Reach the max times of retry, fail the attach action.
+                LOG.error(_LE('Failed to add %(lun)s into %(sg)s after '
+                              '%(tried)s tries. Reach the max retry times. '
+                              'Message: %(msg)s'),
+                          {'lun': lun.lun_id, 'sg': storage_group.name,
+                           'tried': max_retries, 'msg': ex.message})
+
+    def get_wwn_of_online_fc_ports(self, ports):
+        """Returns wwns of online fc ports.
+
+        wwn of a certain port will not be included in the return list when it
+        is not present or down.
+        """
+        wwns = set()
+        ports_with_all_info = self.vnx.get_fc_port()
+        for po in ports:
+            online_list = list(
+                filter(lambda p: (p == po and p.link_status == 'Up'
+                                  and p.port_status == 'Online'),
+                       ports_with_all_info))
+
+            wwns.update([p.wwn for p in online_list])
+        return list(wwns)
+
+    def sg_has_lun_attached(self, sg):
+        return bool(sg.get_alu_hlu_map())
+
+    def deregister_initiators(self, initiators):
+        if not isinstance(initiators, list):
+            initiators = [initiators]
+        for initiator_uid in initiators:
+            self.vnx.remove_hba(initiator_uid)
+
+    def update_consistencygroup(self, cg, lun_ids_to_add, lun_ids_to_remove):
+        lun_ids_in_cg = (set([l.lun_id for l in cg.lun_list]) if cg.lun_list
+                         else set())
+
+        # lun_ids_to_add and lun_ids_to_remove never overlap.
+        lun_ids_updated = ((lun_ids_in_cg | set(lun_ids_to_add)) -
+                           set(lun_ids_to_remove))
+
+        if lun_ids_updated:
+            cg.replace_member(*[self.get_lun(lun_id=lun_id)
+                                for lun_id in lun_ids_updated])
+        else:
+            # Need to remove all LUNs from cg. However, replace_member cannot
+            # handle empty list. So use delete_member.
+            cg.delete_member(*[self.get_lun(lun_id=lun_id)
+                               for lun_id in lun_ids_in_cg])
+
+    def get_cg(self, name):
+        return self.vnx.get_cg(name=name)
+
+    def get_available_ip(self):
+        return self.vnx.alive_sp_ip
+
+    def get_mirror(self, mirror_name):
+        return self.vnx.get_mirror_view(mirror_name)
+
+    def create_mirror(self, mirror_name, primary_lun_id):
+        src_lun = self.vnx.get_lun(lun_id=primary_lun_id)
+        try:
+            mv = self.vnx.create_mirror_view(mirror_name, src_lun)
+        except storops_ex.VNXMirrorNameInUseError:
+            mv = self.vnx.get_mirror_view(mirror_name)
+        return mv
+
+    def delete_mirror(self, mirror_name):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        try:
+            mv.delete()
+        except storops_ex.VNXMirrorNotFoundError:
+            pass
+
+    def add_image(self, mirror_name, sp_ip, secondary_lun_id):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        mv.add_image(sp_ip, secondary_lun_id)
+        # Secondary image info usually did not appear, so
+        # here add a poll to update.
+        utils.update_res_with_poll(mv)
+        utils.wait_until(Condition.is_mirror_synced, mirror=mv)
+
+    def remove_image(self, mirror_name):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        mv.remove_image()
+
+    def fracture_image(self, mirror_name):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        mv.fracture_image()
+
+    def sync_image(self, mirror_name):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        mv.sync_image()
+        utils.wait_until(Condition.is_mirror_synced, mirror=mv)
+
+    def promote_image(self, mirror_name):
+        mv = self.vnx.get_mirror_view(mirror_name)
+        mv.promote_image()
+
+    def get_pool_name(self, lun_name):
+        lun = self.get_lun(name=lun_name)
+        utils.update_res_without_poll(lun)
+        return lun.pool_name
diff --git a/cinder/volume/drivers/emc/vnx/common.py b/cinder/volume/drivers/emc/vnx/common.py
new file mode 100644
index 00000000000..81eb78f03b4
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/common.py
@@ -0,0 +1,483 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+VNX Common Utils
+"""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+storops = importutils.try_import('storops')
+
+from cinder import exception
+from cinder.i18n import _, _LW
+from cinder.volume.drivers.emc.vnx import const
+from cinder.volume import volume_types
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TIMEOUT = 60 * 60 * 24 * 365
+
+INTERVAL_5_SEC = 5
+INTERVAL_20_SEC = 20
+INTERVAL_30_SEC = 30
+INTERVAL_60_SEC = 60
+
+EMC_VNX_OPTS = [
+    cfg.StrOpt('storage_vnx_authentication_type',
+               default='global',
+               help='VNX authentication scope type. '
+               'By default, the value is global.'),
+    cfg.StrOpt('storage_vnx_security_file_dir',
+               help='Directory path that contains the VNX security file. '
+               'Make sure the security file is generated first.'),
+    cfg.StrOpt('naviseccli_path',
+               help='Naviseccli Path.'),
+    cfg.ListOpt('storage_vnx_pool_names',
+                help='Comma-separated list of storage pool names to be used.'),
+    cfg.IntOpt('default_timeout',
+               default=DEFAULT_TIMEOUT,
+               help='Default timeout for CLI operations in minutes. '
+               'For example, LUN migration is a typical long '
+               'running operation, which depends on the LUN size and '
+               'the load of the array. '
+               'An upper bound in the specific deployment can be set to '
+               'avoid unnecessary long wait. '
+               'By default, it is 365 days long.'),
+    cfg.IntOpt('max_luns_per_storage_group',
+               default=255,
+               help='Default max number of LUNs in a storage group.'
+               ' By default, the value is 255.'),
+    cfg.BoolOpt('destroy_empty_storage_group',
+                default=False,
+                help='To destroy storage group '
+                'when the last LUN is removed from it. '
+                'By default, the value is False.'),
+    # iscsi_initiators is a dict which key is string and value is a list.
+    # This could be a DictOpt. Unfortunately DictOpt doesn't support the value
+    # of list type.
+    cfg.StrOpt('iscsi_initiators',
+               help='Mapping between hostname and '
+               'its iSCSI initiator IP addresses.'),
+    cfg.ListOpt('io_port_list',
+                help='Comma separated iSCSI or FC ports '
+                'to be used in Nova or Cinder.'),
+    cfg.BoolOpt('initiator_auto_registration',
+                default=False,
+                help='Automatically register initiators. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('initiator_auto_deregistration',
+                default=False,
+                help='Automatically deregister initiators after the related '
+                'storage group is destroyed. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('check_max_pool_luns_threshold',
+                default=False,
+                help='Report free_capacity_gb as 0 when the limit to '
+                'maximum number of pool LUNs is reached. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('force_delete_lun_in_storagegroup',
+                default=False,
+                help='Delete a LUN even if it is in Storage Groups. '
+                'By default, the value is False.'),
+    cfg.BoolOpt('ignore_pool_full_threshold',
+                default=False,
+                help='Force LUN creation even if '
+                'the full threshold of pool is reached. '
+                'By default, the value is False.')
+]
+
+CONF.register_opts(EMC_VNX_OPTS)
+
+
+PROTOCOL_FC = 'fc'
+PROTOCOL_ISCSI = 'iscsi'
+
+
+class ExtraSpecs(object):
+    _provision_key = 'provisioning:type'
+    _tier_key = 'storagetype:tiering'
+    _replication_key = 'replication_enabled'
+
+    PROVISION_DEFAULT = const.PROVISION_THICK
+    TIER_DEFAULT = None
+
+    def __init__(self, extra_specs):
+        self.specs = extra_specs
+        self._provision = self._get_provision()
+        self.provision = self._provision
+        self._tier = self._get_tier()
+        self.tier = self._tier
+        self.apply_default_values()
+
+    def apply_default_values(self):
+        self.provision = (ExtraSpecs.PROVISION_DEFAULT
+                          if self.provision is None
+                          else self.provision)
+        # Can not set Tier when provision is set to deduped. So don't set the
+        # tier default when provision is deduped.
+        if self.provision != storops.VNXProvisionEnum.DEDUPED:
+            self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None
+                         else self.tier)
+
+    @classmethod
+    def set_defaults(cls, provision_default, tier_default):
+        cls.PROVISION_DEFAULT = provision_default
+        cls.TIER_DEFAULT = tier_default
+
+    def _get_provision(self):
+        value = self._parse_to_enum(self._provision_key,
+                                    storops.VNXProvisionEnum)
+        return value
+
+    def _get_tier(self):
+        return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum)
+
+    @property
+    def is_replication_enabled(self):
+        return self.specs.get('replication_enabled', '').lower() == '<is> true'
+
+    def _parse_to_enum(self, key, enum_class):
+        value = (self.specs[key]
+                 if key in self.specs else None)
+        if value is not None:
+            try:
+                value = enum_class.parse(value)
+            except ValueError:
+                reason = (_("The value %(value)s for key %(key)s in extra "
+                            "specs is invalid."),
+                          {'key': key, 'value': value})
+                raise exception.InvalidVolumeType(reason=reason)
+        return value
+
+    @classmethod
+    def from_volume(cls, volume):
+        specs = {}
+        type_id = volume['volume_type_id']
+        if type_id is not None:
+            specs = volume_types.get_volume_type_extra_specs(type_id)
+
+        return cls(specs)
+
+    @classmethod
+    def from_volume_type(cls, type):
+        return cls(type['extra_specs'])
+
+    @classmethod
+    def from_lun(cls, lun):
+        ex = cls({})
+        ex.provision = lun.provision
+        ex.tier = (lun.tier
+                   if lun.provision != storops.VNXProvisionEnum.DEDUPED
+                   else None)
+        return ex
+
+    def match_with_lun(self, lun):
+        ex = ExtraSpecs.from_lun(lun)
+        return (self.provision == ex.provision and
+                self.tier == ex.tier)
+
+    def validate(self, enabler_status):
+        """Checks whether the extra specs are valid.
+
+        :param enabler_status: Instance of VNXEnablerStatus
+        """
+        if "storagetype:pool" in self.specs:
+            LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
+                            "since driver version 5.1.0. This key will be "
+                            "ignored."))
+
+        if (self._provision == storops.VNXProvisionEnum.DEDUPED and
+                self._tier is not None):
+            msg = _("Can not set tiering policy for a deduplicated volume. "
+                    "Set the tiering policy on the pool where the "
+                    "deduplicated volume locates.")
+            raise exception.InvalidVolumeType(reason=msg)
+
+        if (self._provision == storops.VNXProvisionEnum.COMPRESSED
+                and not enabler_status.compression_enabled):
+            msg = _("Compression Enabler is not installed. "
+                    "Can not create compressed volume.")
+            raise exception.InvalidVolumeType(reason=msg)
+
+        if (self._provision == storops.VNXProvisionEnum.DEDUPED
+                and not enabler_status.dedup_enabled):
+            msg = _("Deduplication Enabler is not installed. "
+                    "Can not create deduplicated volume.")
+            raise exception.InvalidVolumeType(reason=msg)
+
+        if (self._provision in [storops.VNXProvisionEnum.THIN,
+                                storops.VNXProvisionEnum.COMPRESSED,
+                                storops.VNXProvisionEnum.DEDUPED]
+                and not enabler_status.thin_enabled):
+            msg = _("ThinProvisioning Enabler is not installed. "
+                    "Can not create thin volume.")
+            raise exception.InvalidVolumeType(reason=msg)
+
+        if (self._tier is not None
+                and not enabler_status.fast_enabled):
+            msg = _("FAST VP Enabler is not installed. "
+                    "Can not set tiering policy for the volume.")
+            raise exception.InvalidVolumeType(reason=msg)
+        return True
+
+    def __len__(self):
+        return len(self.specs)
+
+    def __getitem__(self, key):
+        return self.specs[key]
+
+    def __iter__(self):
+        return iter(self.specs)
+
+    def __contains__(self, item):
+        return item in self.specs
+
+    def __eq__(self, other):
+        if isinstance(other, ExtraSpecs):
+            return self.specs == other.specs
+        elif isinstance(other, dict):
+            return self.specs == other
+        else:
+            return False
+
+    def __hash__(self):
+        return self.specs.__hash__()
+
+
+class LUNState(object):
+    INITIALIZING = 'Initializing'
+    READY = 'Ready'
+    FAULTED = 'Faulted'
+
+
+class PoolState(object):
+    INITIALIZING = 'Initializing'
+    OFFLINE = 'Offline'
+    DELETING = 'Deleting'
+    VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING)
+
+
+class VNXEnablerStatus(object):
+
+    def __init__(self,
+                 dedup=False,
+                 compression=False,
+                 fast=False,
+                 thin=False,
+                 snap=False):
+        self.dedup_enabled = dedup
+        self.compression_enabled = compression
+        self.fast_enabled = fast
+        self.thin_enabled = thin
+        self.snap_enabled = snap
+
+
+class WaitUtilTimeoutException(exception.VolumeDriverException):
+    """Raised when timeout occurs in wait_until."""
+    # TODO(Ryan) put this exception under Cinder shared module.
+    pass
+
+
+class Host(object):
+    """The model of a host which acts as an initiator to access the storage."""
+
+    def __init__(self, name, initiators, ip=None, wwpns=None):
+        # ip and wwpns are optional.
+        self.name = name
+        if not self.name:
+            raise ValueError(('Name of host cannot be empty.'))
+        self.initiators = initiators
+        if not self.initiators:
+            raise ValueError(_('Initiators of host cannot be empty.'))
+        self.ip = ip
+        self.wwpns = wwpns
+
+
+class Volume(object):
+    """The internal volume which is used to pass in method call."""
+
+    def __init__(self, name, id, vnx_lun_id=None):
+        self.name = name
+        self.id = id
+        self.vnx_lun_id = vnx_lun_id
+
+
+class ISCSITargetData(dict):
+    def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None,
+                 portal='unknown', portals=None, lun='unknown', luns=None):
+        data = {'volume_id': volume_id, 'target_discovered': is_discovered,
+                'target_iqn': iqn, 'target_iqns': iqns,
+                'target_portal': portal, 'target_portals': portals,
+                'target_lun': lun, 'target_luns': luns}
+        self['driver_volume_type'] = 'iscsi'
+        self['data'] = data
+
+    def to_dict(self):
+        """Converts to the dict.
+
+        It helps serialize and deserialize the data before returning to nova.
+        """
+        return {key: value for (key, value) in self.items()}
+
+
+class FCTargetData(dict):
+    def __init__(self, volume_id, is_discovered, wwn=None, lun=None,
+                 initiator_target_map=None):
+        data = {'volume_id': volume_id, 'target_discovered': is_discovered,
+                'target_lun': lun, 'target_wwn': wwn,
+                'initiator_target_map': initiator_target_map}
+        self['driver_volume_type'] = 'fibre_channel'
+        self['data'] = data
+
+    def to_dict(self):
+        """Converts to the dict.
+
+        It helps serialize and deserialize the data before returning to nova.
+        """
+        return {key: value for (key, value) in self.items()}
+
+
+class ReplicationDevice(object):
+    def __init__(self, replication_device):
+        self.replication_device = replication_device
+
+    @property
+    def backend_id(self):
+        return self.replication_device['backend_id']
+
+    @property
+    def san_ip(self):
+        return self.replication_device['san_ip']
+
+    @property
+    def san_login(self):
+        return self.replication_device['san_login']
+
+    @property
+    def san_password(self):
+        return self.replication_device['san_password']
+
+    @property
+    def storage_vnx_authentication_type(self):
+        return self.replication_device['storage_vnx_authentication_type']
+
+    @property
+    def storage_vnx_security_file_dir(self):
+        return self.replication_device['storage_vnx_security_file_dir']
+
+
+class ReplicationDeviceList(list):
+    """Replication devices configured in cinder.conf
+
+    Cinder supports multiple replication_device,  while VNX driver
+    only support one replication_device for now.
+    """
+
+    def __init__(self, configuration):
+        self.list = []
+        self.configuration = configuration
+        self._device_map = dict()
+        self.parse_configuration()
+
+    def parse_configuration(self):
+        if self.configuration.replication_device:
+            for replication_device in self.configuration.replication_device:
+                rd = ReplicationDevice(replication_device)
+                self._device_map[rd.backend_id] = rd
+                self.list.append(rd)
+        return self._device_map
+
+    def get_device(self, backend_id):
+        try:
+            device = self._device_map[backend_id]
+        except KeyError:
+            device = None
+            LOG.warning(_LW('Unable to find secondary device named: %s'),
+                        backend_id)
+        return device
+
+    @property
+    def devices(self):
+        return self._device_map.values()
+
+    def __len__(self):
+        return len(self.list)
+
+    def __iter__(self):
+        self._iter = self.list.__iter__()
+        return self
+
+    def next(self):
+        return next(self._iter)
+
+    def __next__(self):
+        return self.next()
+
+    def __getitem__(self, item):
+        return self.list[item]
+
+
+class VNXMirrorView(object):
+    def __init__(self, primary_client, secondary_client):
+        self.primary_client = primary_client
+        self.secondary_client = secondary_client
+
+    def create_mirror(self, name, primary_lun_id):
+        self.primary_client.create_mirror(name, primary_lun_id)
+
+    def create_secondary_lun(self, pool_name, lun_name, size, provision, tier):
+        return self.secondary_client.create_lun(
+            pool_name, lun_name, size, provision, tier)
+
+    def delete_secondary_lun(self, lun_name):
+        self.secondary_client.delete_lun(lun_name)
+
+    def delete_mirror(self, mirror_name):
+        self.primary_client.delete_mirror(mirror_name)
+
+    def add_image(self, mirror_name, secondary_lun_id):
+        sp_ip = self.secondary_client.get_available_ip()
+        self.primary_client.add_image(mirror_name, sp_ip, secondary_lun_id)
+
+    def remove_image(self, mirror_name):
+        self.primary_client.remove_image(mirror_name)
+
+    def fracture_image(self, mirror_name):
+        self.primary_client.fracture_image(mirror_name)
+
+    def promote_image(self, mirror_name):
+        self.secondary_client.promote_image(mirror_name)
+
+    def destroy_mirror(self, mirror_name, secondary_lun_name):
+        """Destroy the mirror view's related VNX objects.
+
+        NOTE: primary lun will not be deleted here.
+        :param mirror_name: name of mirror to be destroyed
+        :param secondary_lun_name: name of LUN name
+        """
+        mv = self.primary_client.get_mirror(mirror_name)
+        if not mv.existed:
+            # We will skip the mirror operations if not existed
+            LOG.warning(_LW('Mirror view %s was deleted already.'),
+                        mirror_name)
+            return
+        self.fracture_image(mirror_name)
+        self.remove_image(mirror_name)
+        self.delete_mirror(mirror_name)
+        self.delete_secondary_lun(lun_name=secondary_lun_name)
diff --git a/cinder/volume/drivers/emc/vnx/const.py b/cinder/volume/drivers/emc/vnx/const.py
new file mode 100644
index 00000000000..b17295a01ac
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/const.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+VNX Constants
+
+This module includes re-declaration from storops which directly used
+by driver in module scope. That's to say:
+If a constant from storops is used in class level, function signature,
+module level, a re-declaration is needed in this file to avoid some static
+import error when storops is not installed.
+"""
+
+from oslo_utils import importutils
+
+storops = importutils.try_import('storops')
+
+if storops:
+    from storops import exception as storops_ex
+    VNXLunPreparingError = storops_ex.VNXLunPreparingError
+    VNXTargetNotReadyError = storops_ex.VNXTargetNotReadyError
+    MIGRATION_RATE_HIGH = storops.VNXMigrationRate.HIGH
+    PROVISION_THICK = storops.VNXProvisionEnum.THICK
+else:
+    VNXLunPreparingError = None
+    MIGRATION_RATE_HIGH = None
+    PROVISION_THICK = None
+    VNXTargetNotReadyError = None
diff --git a/cinder/volume/drivers/emc/emc_cli_fc.py b/cinder/volume/drivers/emc/vnx/driver.py
similarity index 63%
rename from cinder/volume/drivers/emc/emc_cli_fc.py
rename to cinder/volume/drivers/emc/vnx/driver.py
index 00236e85530..d9193497d1c 100644
--- a/cinder/volume/drivers/emc/emc_cli_fc.py
+++ b/cinder/volume/drivers/emc/vnx/driver.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
+# Copyright (c) 2016 EMC Corporation, Inc.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,13 +12,15 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
-"""Fibre Channel Driver for EMC VNX array based on CLI."""
+"""Cinder Driver for EMC VNX based on CLI."""
 
 from oslo_log import log as logging
 
 from cinder import interface
 from cinder.volume import driver
-from cinder.volume.drivers.emc import emc_vnx_cli
+from cinder.volume.drivers.emc.vnx import adapter
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.emc.vnx import utils
 from cinder.zonemanager import utils as zm_utils
 
 
@@ -26,13 +28,17 @@ LOG = logging.getLogger(__name__)
 
 
 @interface.volumedriver
-class EMCCLIFCDriver(driver.FibreChannelDriver):
-    """EMC FC Driver for VNX using CLI.
+class EMCVNXDriver(driver.TransferVD,
+                   driver.ManageableVD,
+                   driver.ExtendVD,
+                   driver.SnapshotVD,
+                   driver.ManageableSnapshotsVD,
+                   driver.MigrateVD,
+                   driver.ConsistencyGroupVD,
+                   driver.BaseVD):
+    """EMC Cinder Driver for VNX using CLI.
 
     Version history:
-
-    .. code-block:: none
-
         1.0.0 - Initial driver
         2.0.0 - Thick/thin provisioning, robust enhancement
         3.0.0 - Array-based Backend Support, FC Basic Support,
@@ -66,54 +72,63 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
         7.0.0 - Clone consistency group support
                 Replication v2 support(managed)
                 Configurable migration rate support
+        8.0.0 - New VNX Cinder driver
     """
 
     def __init__(self, *args, **kwargs):
-        super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
-        self.cli = emc_vnx_cli.getEMCVnxCli(
-            'FC',
-            configuration=self.configuration,
-            active_backend_id=kwargs.get('active_backend_id'))
-        self.VERSION = self.cli.VERSION
+        super(EMCVNXDriver, self).__init__(*args, **kwargs)
+        utils.init_ops(self.configuration)
+        self.protocol = self.configuration.storage_protocol.lower()
+        self.active_backend_id = kwargs.get('active_backend_id', None)
+        self.adapter = None
+
+    def do_setup(self, context):
+        if self.protocol == common.PROTOCOL_FC:
+            self.adapter = adapter.FCAdapter(self.configuration,
+                                             self.active_backend_id)
+        else:
+            self.adapter = adapter.ISCSIAdapter(self.configuration,
+                                                self.active_backend_id)
+        self.adapter.do_setup()
 
     def check_for_setup_error(self):
         pass
 
     def create_volume(self, volume):
         """Creates a volume."""
-        return self.cli.create_volume(volume)
+        return self.adapter.create_volume(volume)
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Creates a volume from a snapshot."""
-        return self.cli.create_volume_from_snapshot(volume, snapshot)
+        return self.adapter.create_volume_from_snapshot(volume, snapshot)
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a cloned volume."""
-        return self.cli.create_cloned_volume(volume, src_vref)
+        return self.adapter.create_cloned_volume(volume, src_vref)
 
     def extend_volume(self, volume, new_size):
         """Extend a volume."""
-        self.cli.extend_volume(volume, new_size)
+        self.adapter.extend_volume(volume, new_size)
 
     def delete_volume(self, volume):
         """Deletes a volume."""
-        self.cli.delete_volume(volume)
+        self.adapter.delete_volume(volume)
 
     def migrate_volume(self, ctxt, volume, host):
         """Migrate volume via EMC migration functionality."""
-        return self.cli.migrate_volume(ctxt, volume, host)
+        return self.adapter.migrate_volume(ctxt, volume, host)
 
     def retype(self, ctxt, volume, new_type, diff, host):
         """Convert the volume to be of the new type."""
-        return self.cli.retype(ctxt, volume, new_type, diff, host)
+        return self.adapter.retype(ctxt, volume, new_type, diff, host)
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
-        self.cli.create_snapshot(snapshot)
+        self.adapter.create_snapshot(snapshot)
 
     def delete_snapshot(self, snapshot):
         """Deletes a snapshot."""
-        self.cli.delete_snapshot(snapshot)
+        self.adapter.delete_snapshot(snapshot)
 
     def ensure_export(self, context, volume):
         """Driver entry point to get the export info for an existing volume."""
@@ -144,22 +159,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
         The initiator_target_map is a map that represents the remote wwn(s)
         and a list of wwns which are visible to the remote wwn(s).
         Example return values:
-
+        FC:
             {
-                'driver_volume_type': 'fibre_channel'
-                'data': {
-                    'target_discovered': True,
-                    'target_lun': 1,
-                    'target_wwn': '1234567890123',
-                    'initiator_target_map': {
-                        '1122334455667788': ['1234567890123']
-                    }
-                }
-            }
-
-            or
-
-             {
                 'driver_volume_type': 'fibre_channel'
                 'data': {
                     'target_discovered': True,
@@ -171,28 +172,44 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
                     }
                 }
             }
-
+        iSCSI:
+            {
+                'driver_volume_type': 'iscsi'
+                'data': {
+                    'target_discovered': True,
+                    'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
+                                    'iqn.2010-10.org.openstack:volume-00002'],
+                    'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
+                    'target_luns': [1, 1],
+                }
+            }
         """
-        conn_info = self.cli.initialize_connection(volume,
-                                                   connector)
+        LOG.debug("Entering initialize_connection"
+                  " - connector: %(connector)s.",
+                  {'connector': connector})
+        conn_info = self.adapter.initialize_connection(volume,
+                                                       connector)
         LOG.debug("Exit initialize_connection"
-                  " - Returning FC connection info: %(conn_info)s.",
+                  " - Returning connection info: %(conn_info)s.",
                   {'conn_info': conn_info})
         return conn_info
 
     @zm_utils.RemoveFCZone
     def terminate_connection(self, volume, connector, **kwargs):
         """Disallow connection from connector."""
-        conn_info = self.cli.terminate_connection(volume, connector)
+        LOG.debug("Entering terminate_connection"
+                  " - connector: %(connector)s.",
+                  {'connector': connector})
+        conn_info = self.adapter.terminate_connection(volume, connector)
         LOG.debug("Exit terminate_connection"
-                  " - Returning FC connection info: %(conn_info)s.",
+                  " - Returning connection info: %(conn_info)s.",
                   {'conn_info': conn_info})
         return conn_info
 
     def get_volume_stats(self, refresh=False):
         """Get volume stats.
 
-        If 'refresh' is True, run update the stats first.
+        :param refresh: True to get updated data
         """
         if refresh:
             self.update_volume_stats()
@@ -202,11 +219,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
     def update_volume_stats(self):
         """Retrieve stats info from volume group."""
         LOG.debug("Updating volume stats.")
-        data = self.cli.update_volume_stats()
-        backend_name = self.configuration.safe_get('volume_backend_name')
-        data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
-        data['storage_protocol'] = 'FC'
-        self._stats = data
+        self._stats = self.adapter.update_volume_stats()
 
     def manage_existing(self, volume, existing_ref):
         """Manage an existing lun in the array.
@@ -217,101 +230,96 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
         volume['name'] which is how drivers traditionally map between a
         cinder volume and the associated backend storage object.
 
-        .. code-block:: none
-
-            manage_existing_ref:{
-                'source-id':<lun id in VNX>
-            }
-
-            or
-
-            manage_existing_ref:{
-                'source-name':<lun name in VNX>
-            }
-
+        manage_existing_ref:{
+            'source-id':<lun id in VNX>
+        }
+        or
+        manage_existing_ref:{
+            'source-name':<lun name in VNX>
+        }
         """
-        return self.cli.manage_existing(volume, existing_ref)
+        return self.adapter.manage_existing(volume, existing_ref)
 
     def manage_existing_get_size(self, volume, existing_ref):
         """Return size of volume to be managed by manage_existing."""
-        return self.cli.manage_existing_get_size(volume, existing_ref)
+        return self.adapter.manage_existing_get_size(volume, existing_ref)
 
     def create_consistencygroup(self, context, group):
         """Creates a consistencygroup."""
-        return self.cli.create_consistencygroup(context, group)
+        return self.adapter.create_consistencygroup(context, group)
 
     def delete_consistencygroup(self, context, group, volumes):
         """Deletes a consistency group."""
-        return self.cli.delete_consistencygroup(
+        return self.adapter.delete_consistencygroup(
             context, group, volumes)
 
     def create_cgsnapshot(self, context, cgsnapshot, snapshots):
         """Creates a cgsnapshot."""
-        return self.cli.create_cgsnapshot(
+        return self.adapter.create_cgsnapshot(
             context, cgsnapshot, snapshots)
 
     def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
         """Deletes a cgsnapshot."""
-        return self.cli.delete_cgsnapshot(
+        return self.adapter.delete_cgsnapshot(
             context, cgsnapshot, snapshots)
 
     def get_pool(self, volume):
         """Returns the pool name of a volume."""
-        return self.cli.get_pool(volume)
+        return self.adapter.get_pool_name(volume)
 
     def update_consistencygroup(self, context, group,
                                 add_volumes,
                                 remove_volumes):
         """Updates LUNs in consistency group."""
-        return self.cli.update_consistencygroup(context, group,
-                                                add_volumes,
-                                                remove_volumes)
+        return self.adapter.update_consistencygroup(context, group,
+                                                    add_volumes,
+                                                    remove_volumes)
 
     def unmanage(self, volume):
         """Unmanages a volume."""
-        return self.cli.unmanage(volume)
+        return self.adapter.unmanage(volume)
 
     def create_consistencygroup_from_src(self, context, group, volumes,
                                          cgsnapshot=None, snapshots=None,
                                          source_cg=None, source_vols=None):
         """Creates a consistency group from source."""
-        return self.cli.create_consistencygroup_from_src(context,
-                                                         group,
-                                                         volumes,
-                                                         cgsnapshot,
-                                                         snapshots,
-                                                         source_cg,
-                                                         source_vols)
+        if cgsnapshot:
+            return self.adapter.create_cg_from_cgsnapshot(
+                context, group, volumes, cgsnapshot, snapshots)
+        elif source_cg:
+            return self.adapter.create_cloned_cg(
+                context, group, volumes, source_cg, source_vols)
 
     def update_migrated_volume(self, context, volume, new_volume,
                                original_volume_status=None):
         """Returns model update for migrated volume."""
-        return self.cli.update_migrated_volume(context, volume, new_volume,
-                                               original_volume_status)
+        return self.adapter.update_migrated_volume(context, volume, new_volume,
+                                                   original_volume_status)
 
     def create_export_snapshot(self, context, snapshot, connector):
         """Creates a snapshot mount point for snapshot."""
-        return self.cli.create_export_snapshot(context, snapshot, connector)
+        return self.adapter.create_export_snapshot(
+            context, snapshot, connector)
 
     def remove_export_snapshot(self, context, snapshot):
         """Removes snapshot mount point for snapshot."""
-        return self.cli.remove_export_snapshot(context, snapshot)
+        return self.adapter.remove_export_snapshot(context, snapshot)
 
     def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
         """Allows connection to snapshot."""
-        return self.cli.initialize_connection_snapshot(snapshot,
-                                                       connector,
-                                                       **kwargs)
+        return self.adapter.initialize_connection_snapshot(snapshot,
+                                                           connector,
+                                                           **kwargs)
 
     def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
         """Disallows connection to snapshot."""
-        return self.cli.terminate_connection_snapshot(snapshot,
-                                                      connector,
-                                                      **kwargs)
+        return self.adapter.terminate_connection_snapshot(snapshot,
+                                                          connector,
+                                                          **kwargs)
 
     def backup_use_temp_snapshot(self):
         return True
 
     def failover_host(self, context, volumes, secondary_id=None):
-        """Failovers volume from primary device to secondary."""
-        return self.cli.failover_host(context, volumes, secondary_id)
+        """Fail-overs volumes from primary device to secondary."""
+        return self.adapter.failover_host(context, volumes, secondary_id)
diff --git a/cinder/volume/drivers/emc/vnx/taskflows.py b/cinder/volume/drivers/emc/vnx/taskflows.py
new file mode 100644
index 00000000000..15b061616d3
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/taskflows.py
@@ -0,0 +1,579 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+storops = importutils.try_import('storops')
+
+import taskflow.engines
+from taskflow.patterns import linear_flow
+from taskflow import task
+from taskflow.types import failure
+
+from cinder import exception
+from cinder.volume.drivers.emc.vnx import const
+from cinder.volume.drivers.emc.vnx import utils
+from cinder.i18n import _, _LI, _LW
+
+LOG = logging.getLogger(__name__)
+
+
+class MigrateLunTask(task.Task):
+    """Starts a migration between two LUNs/SMPs.
+
+    Reversion strategy: Cleanup the migration session
+    """
+    def __init__(self, name=None, provides=None, inject=None,
+                 rebind=None, wait_for_completion=True):
+        super(MigrateLunTask, self).__init__(name=name,
+                                             provides=provides,
+                                             inject=inject,
+                                             rebind=rebind)
+        self.wait_for_completion = wait_for_completion
+
+    def execute(self, client, src_id, dst_id, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        dst_lun = client.get_lun(lun_id=dst_id)
+        dst_wwn = dst_lun.wwn
+        client.migrate_lun(src_id, dst_id)
+        if self.wait_for_completion:
+            migrated = client.verify_migration(src_id, dst_id, dst_wwn)
+            if not migrated:
+                msg = _("Failed to migrate volume between source vol %(src)s"
+                        " and dest vol %(dst)s.") % {
+                            'src': src_id, 'dst': dst_id}
+                LOG.error(msg)
+                raise exception.VolumeBackendAPIException(data=msg)
+
+    def revert(self, result, client, src_id, dst_id, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method)s: cleanup migration session: '
+                        '%(src_id)s -> %(dst_id)s.'),
+                    {'method': method_name,
+                     'src_id': src_id,
+                     'dst_id': dst_id})
+        client.cleanup_migration(src_id, dst_id)
+
+
+class CreateLunTask(task.Task):
+    """Creates a new lun task.
+
+    Reversion strategy: Delete the lun.
+    """
+    def __init__(self, name=None, provides=('new_lun_id', 'new_lun_wwn'),
+                 inject=None):
+        super(CreateLunTask, self).__init__(name=name,
+                                            provides=provides,
+                                            inject=inject)
+        if provides and not isinstance(provides, tuple):
+            raise ValueError('Only tuple is allowed for [provides].')
+
+    def execute(self, client, pool_name, lun_name, lun_size,
+                provision, tier, ignore_thresholds=False,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        lun = client.create_lun(pool=pool_name,
+                                name=lun_name,
+                                size=lun_size,
+                                provision=provision,
+                                tier=tier,
+                                ignore_thresholds=ignore_thresholds)
+        return lun.lun_id, lun.wwn
+
+    def revert(self, result, client, lun_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        if isinstance(result, failure.Failure):
+            return
+        else:
+            LOG.warning(_LW('%(method_name)s: delete lun %(lun_name)s'),
+                        {'method_name': method_name, 'lun_name': lun_name})
+            client.delete_lun(lun_name)
+
+
+class CopySnapshotTask(task.Task):
+    """Task to copy a volume snapshot/consistency group snapshot.
+
+    Reversion Strategy: Delete the copied snapshot/cgsnapshot
+    """
+    def execute(self, client, snap_name, new_snap_name,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        client.copy_snapshot(snap_name,
+                             new_snap_name)
+
+    def revert(self, result, client, snap_name, new_snap_name,
+               *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: delete the '
+                        'copied snapshot %(new_name)s of '
+                        '%(source_name)s.'),
+                    {'method_name': method_name,
+                     'new_name': new_snap_name,
+                     'source_name': snap_name})
+        client.delete_snapshot(new_snap_name)
+
+
+class CreateSMPTask(task.Task):
+    """Creates a snap mount point (SMP) for the source snapshot.
+
+    Reversion strategy: Delete the SMP.
+    """
+    def __init__(self, name=None, provides='smp_id', inject=None):
+        super(CreateSMPTask, self).__init__(name=name,
+                                            provides=provides,
+                                            inject=inject)
+
+    def execute(self, client, smp_name, base_lun_name,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+
+        client.create_mount_point(base_lun_name, smp_name)
+        lun = client.get_lun(name=smp_name)
+        return lun.lun_id
+
+    def revert(self, result, client, smp_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: delete mount point %(name)s'),
+                    {'method_name': method_name,
+                     'name': smp_name})
+        client.delete_lun(smp_name)
+
+
+class AttachSnapTask(task.Task):
+    """Attaches the snapshot to the SMP created before.
+
+    Reversion strategy: Detach the SMP.
+    """
+    def execute(self, client, smp_name, snap_name,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        client.attach_snapshot(smp_name, snap_name)
+
+    def revert(self, result, client, smp_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: detach mount point %(smp_name)s'),
+                    {'method_name': method_name,
+                     'smp_name': smp_name})
+        client.detach_snapshot(smp_name)
+
+
+class CreateSnapshotTask(task.Task):
+    """Creates a snapshot of a volume.
+
+    Reversion Strategy: Delete the created snapshot.
+    """
+    def execute(self, client, snap_name, lun_id, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'),
+                 {'snapshot': snap_name,
+                  'lun': lun_id})
+        client.create_snapshot(lun_id, snap_name)
+
+    def revert(self, result, client, snap_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: '
+                        'delete temp snapshot %(snap_name)s'),
+                    {'method_name': method_name,
+                     'snap_name': snap_name})
+        client.delete_snapshot(snap_name)
+
+
+class AllowReadWriteTask(task.Task):
+    """Task to modify a Snapshot to allow ReadWrite on it."""
+    def execute(self, client, snap_name, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        client.modify_snapshot(snap_name, allow_rw=True)
+
+    def revert(self, result, client, snap_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: '
+                        'setting snapshot %(snap_name)s to read-only.'),
+                    {'method_name': method_name,
+                     'snap_name': snap_name})
+        client.modify_snapshot(snap_name, allow_rw=False)
+
+
+class WaitMigrationsTask(task.Task):
+    """Task to wait migrations to be completed."""
+    def __init__(self, src_id_template, dst_id_template,
+                 dst_wwn_template, num_of_members, *args, **kwargs):
+        self.migrate_tuples = [
+            (src_id_template % x, dst_id_template % x, dst_wwn_template % x)
+            for x in range(num_of_members)]
+        src_id_keys = sorted(set(
+            [src_id_template % i for i in range(num_of_members)]))
+        dst_id_keys = sorted(set(
+            [dst_id_template % i for i in range(num_of_members)]))
+        dst_wwn_keys = sorted(set(
+            [dst_wwn_template % i for i in range(num_of_members)]))
+
+        super(WaitMigrationsTask, self).__init__(
+            requires=(src_id_keys + dst_id_keys + dst_wwn_keys),
+            *args, **kwargs)
+
+    def execute(self, client, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        for src_id_key, dst_id_key, dst_wwn_key in self.migrate_tuples:
+            src_id = kwargs[src_id_key]
+            dst_id = kwargs[dst_id_key]
+            dst_wwn = kwargs[dst_wwn_key]
+            migrated = client.verify_migration(src_id,
+                                               dst_id,
+                                               dst_wwn)
+            if not migrated:
+                msg = _("Failed to migrate volume %(src)s.") % {'src': src_id}
+                raise exception.VolumeBackendAPIException(data=msg)
+
+
+class CreateConsistencyGroupTask(task.Task):
+    """Task to create a consistency group."""
+    def __init__(self, lun_id_key_template, num_of_members,
+                 *args, **kwargs):
+        self.lun_id_keys = sorted(set(
+            [lun_id_key_template % i for i in range(num_of_members)]))
+        super(CreateConsistencyGroupTask, self).__init__(
+            requires=self.lun_id_keys, *args, **kwargs)
+
+    def execute(self, client, new_cg_name, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        lun_ids = [kwargs[key] for key in self.lun_id_keys]
+        client.create_consistency_group(new_cg_name,
+                                        lun_ids)
+
+
+class CreateCGSnapshotTask(task.Task):
+    """Task to create a CG snapshot."""
+    def __init__(self, provides='new_cg_snap_name', *args, **kwargs):
+        super(CreateCGSnapshotTask, self).__init__(
+            provides=provides, *args, **kwargs)
+
+    def execute(self, client, cg_snap_name, cg_name, *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        return client.create_cg_snapshot(cg_snap_name, cg_name)
+
+    def revert(self, client, cg_snap_name, cg_name, *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method_name)s: '
+                        'deleting CG snapshot %(snap_name)s.'),
+                    {'method_name': method_name,
+                     'snap_name': cg_snap_name})
+        client.delete_cg_snapshot(cg_snap_name)
+
+
+class CreateMirrorTask(task.Task):
+    """Creates a MirrorView with primary lun for replication.
+
+    Reversion strategy: Destroy the created MirrorView.
+    """
+    def execute(self, mirror, mirror_name, primary_lun_id,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        mirror.create_mirror(mirror_name, primary_lun_id)
+
+    def revert(self, result, mirror, mirror_name,
+               *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method)s: removing mirror '
+                        'view %(name)s.'),
+                    {'method': method_name,
+                     'name': mirror_name})
+        mirror.delete_mirror(mirror_name)
+
+
+class AddMirrorImageTask(task.Task):
+    """Add the secondary image to MirrorView.
+
+    Reversion strategy: Remove the secondary image.
+    """
+    def execute(self, mirror, mirror_name, secondary_lun_id,
+                *args, **kwargs):
+        LOG.debug('%s.execute', self.__class__.__name__)
+        mirror.add_image(mirror_name, secondary_lun_id)
+
+    def revert(self, result, mirror, mirror_name,
+               *args, **kwargs):
+        method_name = '%s.revert' % self.__class__.__name__
+        LOG.warning(_LW('%(method)s: removing secondary image '
+                        'from %(name)s.'),
+                    {'method': method_name,
+                     'name': mirror_name})
+        mirror.remove_image(mirror_name)
+
+
+def run_migration_taskflow(client,
+                           lun_id,
+                           lun_name,
+                           lun_size,
+                           pool_name,
+                           provision,
+                           tier,
+                           rate=const.MIGRATION_RATE_HIGH):
+    # Step 1: create target LUN
+    # Step 2: start and migrate migration session
+    tmp_lun_name = utils.construct_tmp_lun_name(lun_name)
+    flow_name = 'migrate_lun'
+    store_spec = {'client': client,
+                  'pool_name': pool_name,
+                  'lun_name': tmp_lun_name,
+                  'lun_size': lun_size,
+                  'provision': provision,
+                  'tier': tier,
+                  'ignore_thresholds': True,
+                  'src_id': lun_id,
+                  }
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(CreateLunTask(),
+                  MigrateLunTask(rebind={'dst_id': 'new_lun_id'}))
+    engine = taskflow.engines.load(
+        work_flow, store=store_spec)
+    engine.run()
+
+
+def fast_create_volume_from_snapshot(client,
+                                     snap_name,
+                                     new_snap_name,
+                                     lun_name,
+                                     base_lun_name,
+                                     pool_name):
+    # Step 1: copy snapshot
+    # Step 2: allow read/write for snapshot
+    # Step 3: create smp LUN
+    # Step 4: attach the snapshot
+    flow_name = 'create_snapcopy_volume_from_snapshot'
+
+    store_spec = {'client': client,
+                  'snap_name': snap_name,
+                  'new_snap_name': new_snap_name,
+                  'pool_name': pool_name,
+                  'smp_name': lun_name,
+                  'base_lun_name': base_lun_name,
+                  'ignore_thresholds': True,
+                  }
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(CopySnapshotTask(),
+                  AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}),
+                  CreateSMPTask(),
+                  AttachSnapTask(rebind={'snap_name': 'new_snap_name'}))
+    engine = taskflow.engines.load(
+        work_flow, store=store_spec)
+    engine.run()
+    lun_id = engine.storage.fetch('smp_id')
+    return lun_id
+
+
+def create_volume_from_snapshot(client, snap_name, lun_name,
+                                lun_size, base_lun_name, pool_name,
+                                provision, tier):
+    # Step 1: create smp from base lun
+    # Step 2: attach snapshot to smp
+    # Step 3: Create new LUN
+    # Step 4: migrate the smp to new LUN
+    tmp_lun_name = '%s_dest' % lun_name
+    flow_name = 'create_volume_from_snapshot'
+    store_spec = {'client': client,
+                  'snap_name': snap_name,
+                  'smp_name': lun_name,
+                  'lun_name': tmp_lun_name,
+                  'lun_size': lun_size,
+                  'base_lun_name': base_lun_name,
+                  'pool_name': pool_name,
+                  'provision': provision,
+                  'tier': tier,
+                  }
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(CreateSMPTask(),
+                  AttachSnapTask(),
+                  CreateLunTask(),
+                  MigrateLunTask(
+                      rebind={'src_id': 'smp_id',
+                              'dst_id': 'new_lun_id'}))
+    engine = taskflow.engines.load(
+        work_flow, store=store_spec)
+    engine.run()
+    lun_id = engine.storage.fetch('smp_id')
+    return lun_id
+
+
+def fast_create_cloned_volume(client, snap_name, lun_id,
+                              lun_name, base_lun_name):
+    flow_name = 'create_cloned_snapcopy_volume'
+    store_spec = {
+        'client': client,
+        'snap_name': snap_name,
+        'lun_id': lun_id,
+        'smp_name': lun_name,
+        'base_lun_name': base_lun_name}
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(CreateSnapshotTask(),
+                  CreateSMPTask(),
+                  AttachSnapTask())
+    engine = taskflow.engines.load(work_flow, store=store_spec)
+    engine.run()
+    lun_id = engine.storage.fetch('smp_id')
+    return lun_id
+
+
+def create_cloned_volume(client, snap_name, lun_id, lun_name,
+                         lun_size, base_lun_name, pool_name,
+                         provision, tier):
+    tmp_lun_name = '%s_dest' % lun_name
+    flow_name = 'create_cloned_volume'
+    store_spec = {'client': client,
+                  'snap_name': snap_name,
+                  'lun_id': lun_id,
+                  'smp_name': lun_name,
+                  'lun_name': tmp_lun_name,
+                  'lun_size': lun_size,
+                  'base_lun_name': base_lun_name,
+                  'pool_name': pool_name,
+                  'provision': provision,
+                  'tier': tier,
+                  }
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(
+        CreateSnapshotTask(),
+        CreateSMPTask(),
+        AttachSnapTask(),
+        CreateLunTask(),
+        MigrateLunTask(
+            rebind={'src_id': 'smp_id', 'dst_id': 'new_lun_id'}))
+    engine = taskflow.engines.load(
+        work_flow, store=store_spec)
+    engine.run()
+    lun_id = engine.storage.fetch('smp_id')
+    return lun_id
+
+
+def create_cg_from_cg_snapshot(client, cg_name, src_cg_name,
+                               cg_snap_name, src_cg_snap_name,
+                               pool_name, lun_sizes, lun_names,
+                               src_lun_names, specs_list, copy_snap=True):
+    prepare_tasks = []
+    store_spec = {}
+
+    if copy_snap:
+        flow_name = 'create_cg_from_cg_snapshot'
+        temp_cg_snap = utils.construct_tmp_cg_snap_name(cg_name)
+        snap_name = temp_cg_snap
+        store_spec.update({'snap_name': src_cg_snap_name,
+                           'new_snap_name': snap_name})
+        prepare_tasks.append(
+            CopySnapshotTask())
+        prepare_tasks.append(
+            AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}))
+    else:
+        flow_name = 'create_cg_from_cg'
+        snap_name = cg_snap_name
+        store_spec.update({'cg_name': src_cg_name,
+                           'cg_snap_name': snap_name})
+        prepare_tasks.append(CreateCGSnapshotTask())
+
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(*prepare_tasks)
+    new_src_id_template = 'new_src_id_%s'
+    new_dst_id_template = 'new_dst_id_%s'
+    new_dst_wwn_template = 'new_dst_wwn_%s'
+
+    common_store_spec = {
+        'client': client,
+        'pool_name': pool_name,
+        'ignore_thresholds': True,
+        'new_cg_name': cg_name
+    }
+    store_spec.update(common_store_spec)
+
+    # Create LUNs for CG
+    for i, lun_name in enumerate(lun_names):
+        sub_store_spec = {
+            'lun_name': utils.construct_tmp_lun_name(lun_name),
+            'lun_size': lun_sizes[i],
+            'provision': specs_list[i].provision,
+            'tier': specs_list[i].tier,
+            'base_lun_name': src_lun_names[i],
+            'smp_name': lun_name,
+            'snap_name': snap_name,
+        }
+        work_flow.add(CreateSMPTask(name="CreateSMPTask_%s" % i,
+                                    inject=sub_store_spec,
+                                    provides=new_src_id_template % i),
+                      AttachSnapTask(name="AttachSnapTask_%s" % i,
+                                     inject=sub_store_spec),
+                      CreateLunTask(name="CreateLunTask_%s" % i,
+                                    inject=sub_store_spec,
+                                    provides=(new_dst_id_template % i,
+                                              new_dst_wwn_template % i)),
+                      MigrateLunTask(
+                          name="MigrateLunTask_%s" % i,
+                          inject=sub_store_spec,
+                          rebind={'src_id': new_src_id_template % i,
+                                  'dst_id': new_dst_id_template % i},
+                          wait_for_completion=False))
+
+    # Wait all migration session finished
+    work_flow.add(WaitMigrationsTask(new_src_id_template,
+                                     new_dst_id_template,
+                                     new_dst_wwn_template,
+                                     len(lun_names)),
+                  CreateConsistencyGroupTask(new_src_id_template,
+                                             len(lun_names)))
+    engine = taskflow.engines.load(work_flow, store=store_spec)
+    engine.run()
+    # Fetch all created LUNs and add them into CG
+    lun_id_list = []
+    for i, lun_name in enumerate(lun_names):
+        lun_id = engine.storage.fetch(new_src_id_template % i)
+        lun_id_list.append(lun_id)
+
+    client.delete_cg_snapshot(snap_name)
+    return lun_id_list
+
+
+def create_cloned_cg(client, cg_name, src_cg_name,
+                     pool_name, lun_sizes, lun_names,
+                     src_lun_names, specs_list):
+    cg_snap_name = utils.construct_tmp_cg_snap_name(cg_name)
+    return create_cg_from_cg_snapshot(
+        client, cg_name, src_cg_name,
+        cg_snap_name, None,
+        pool_name, lun_sizes, lun_names,
+        src_lun_names, specs_list, copy_snap=False)
+
+
+def create_mirror_view(mirror_view, mirror_name,
+                       primary_lun_id, pool_name,
+                       lun_name, lun_size, provision, tier):
+    flow_name = 'create_mirror_view'
+    store_specs = {
+        'mirror': mirror_view,
+        'mirror_name': mirror_name,
+        'primary_lun_id': primary_lun_id,
+        'pool_name': pool_name,
+        'lun_name': lun_name,
+        'lun_size': lun_size,
+        'provision': provision,
+        'tier': tier,
+        'ignore_thresholds': True
+    }
+    # NOTE: should create LUN on secondary device/array
+    work_flow = linear_flow.Flow(flow_name)
+    work_flow.add(CreateMirrorTask(),
+                  CreateLunTask(
+                      name='CreateSecondaryLunTask',
+                      provides=('secondary_lun_id', 'secondary_lun_wwn'),
+                      inject={'client': mirror_view.secondary_client}),
+                  AddMirrorImageTask())
+    engine = taskflow.engines.load(work_flow, store=store_specs)
+    engine.run()
diff --git a/cinder/volume/drivers/emc/vnx/utils.py b/cinder/volume/drivers/emc/vnx/utils.py
new file mode 100644
index 00000000000..46fb7b0bbae
--- /dev/null
+++ b/cinder/volume/drivers/emc/vnx/utils.py
@@ -0,0 +1,339 @@
+# Copyright (c) 2016 EMC Corporation, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+import time
+
+from oslo_log import log as logging
+from oslo_service import loopingcall
+from oslo_utils import excutils
+from oslo_utils import importutils
+
+storops = importutils.try_import('storops')
+
+from cinder import exception
+from cinder.i18n import _, _LW
+from cinder.volume.drivers.emc.vnx import common
+from cinder.volume.drivers.san.san import san_opts
+from cinder.volume import utils as vol_utils
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+
+def init_ops(configuration):
+    configuration.append_config_values(common.EMC_VNX_OPTS)
+    configuration.append_config_values(san_opts)
+
+
+def get_metadata(volume):
+    # Since versionedobjects is partially merged, metadata
+    # may come from 'volume_metadata' or 'metadata', here
+    # we need to take care both of them.
+    volume_metadata = {}
+    if 'volume_metadata' in volume:
+        for metadata in volume['volume_metadata']:
+            volume_metadata[metadata['key']] = metadata['value']
+        return volume_metadata
+    return volume['metadata'] if 'metadata' in volume else {}
+
+
+def dump_provider_location(location_dict):
+    return '|'.join([k + '^' + v for k, v in location_dict.items()])
+
+
+def build_provider_location(system, lun_type, lun_id, base_lun_name, version):
+    """Builds provider_location for volume or snapshot.
+
+    :param system: VNX serial number
+    :param lun_id: LUN ID in VNX
+    :param lun_type: 'lun' or 'smp'
+    :param base_lun_name: primary LUN name,
+                          it will be used when creating snap lun
+    :param version: driver version
+    """
+    location_dict = {'system': system,
+                     'type': lun_type,
+                     'id': six.text_type(lun_id),
+                     'base_lun_name': six.text_type(base_lun_name),
+                     'version': version}
+    return dump_provider_location(location_dict)
+
+
+def extract_provider_location(provider_location, key):
+    """Extracts value of the specified field from provider_location string.
+
+    :param provider_location: provider_location string
+    :param key: field name of the value that to be extracted
+    :return: value of the specified field if it exists, otherwise,
+             None is returned
+    """
+    if not provider_location:
+        return None
+
+    kvps = provider_location.split('|')
+    for kvp in kvps:
+        fields = kvp.split('^')
+        if len(fields) == 2 and fields[0] == key:
+            return fields[1]
+
+
+def update_provider_location(provider_location, items):
+    """Updates provider_location with new dict items.
+
+    :param provider_location: volume's provider_location.
+    :param items: dict items for updating.
+    """
+    location_dict = {tp.split('^')[0]: tp.split('^')[1]
+                     for tp in provider_location.split('|')}
+    for key, value in items.items():
+        location_dict[key] = value
+    return dump_provider_location(location_dict)
+
+
+def get_pool_from_host(host):
+    return vol_utils.extract_host(host, 'pool')
+
+
+def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
+               reraise_arbiter=lambda ex: True, *args, **kwargs):
+    start_time = time.time()
+    if not timeout:
+        timeout = common.DEFAULT_TIMEOUT
+
+    def _inner():
+        try:
+            test_value = condition(*args, **kwargs)
+        except Exception as ex:
+            test_value = False
+            with excutils.save_and_reraise_exception(
+                    reraise=reraise_arbiter(ex)):
+                LOG.debug('Exception raised when executing %(condition_name)s'
+                          'in wait_until. Message: %(msg)s',
+                          {'condition_name': condition.__name__,
+                           'msg': ex.message})
+        if test_value:
+            raise loopingcall.LoopingCallDone()
+
+        if int(time.time()) - start_time > timeout:
+            msg = (_('Timeout waiting for %(condition_name)s in wait_until.')
+                   % {'condition_name': condition.__name__})
+            LOG.error(msg)
+            raise common.WaitUtilTimeoutException(msg)
+
+    timer = loopingcall.FixedIntervalLoopingCall(_inner)
+    timer.start(interval=interval).wait()
+
+
+def validate_storage_migration(volume, target_host, src_serial, src_protocol):
+    if 'location_info' not in target_host['capabilities']:
+        LOG.warning(_LW("Failed to get pool name and "
+                        "serial number. 'location_info' "
+                        "from %s."), target_host['host'])
+        return False
+    info = target_host['capabilities']['location_info']
+    LOG.debug("Host for migration is %s.", info)
+    try:
+        serial_number = info.split('|')[1]
+    except AttributeError:
+        LOG.warning(_LW('Error on getting serial number '
+                        'from %s.'), target_host['host'])
+        return False
+    if serial_number != src_serial:
+        LOG.debug('Skip storage-assisted migration because '
+                  'target and source backend are not managing'
+                  'the same array.')
+        return False
+    if (target_host['capabilities']['storage_protocol'] != src_protocol
+            and get_original_status(volume) == 'in-use'):
+        LOG.debug('Skip storage-assisted migration because '
+                  'in-use volume can not be '
+                  'migrate between different protocols.')
+        return False
+    return True
+
+
+def retype_need_migration(volume, old_provision, new_provision, host):
+    if volume['host'] != host['host']:
+        return True
+
+    lun_type = extract_provider_location(volume['provider_location'], 'type')
+    if lun_type == 'smp':
+        return True
+
+    if old_provision != new_provision:
+        if retype_need_turn_on_compression(old_provision, new_provision):
+            return False
+        else:
+            return True
+
+    return False
+
+
+def retype_need_turn_on_compression(old_provision, new_provision):
+    return (old_provision in [storops.VNXProvisionEnum.THIN,
+                              storops.VNXProvisionEnum.THICK]
+            and new_provision == storops.VNXProvisionEnum.COMPRESSED)
+
+
+def retype_need_change_tier(old_tier, new_tier):
+    return new_tier is not None and old_tier != new_tier
+
+
+def get_original_status(volume):
+    if not volume['volume_attachment']:
+        return 'available'
+    else:
+        return 'in-use'
+
+
+def construct_snap_name(volume):
+    """Return snapshot name."""
+    if snapcopy_enabled(volume):
+        return 'snap-as-vol-' + six.text_type(volume.name_id)
+    else:
+        return 'tmp-snap-' + six.text_type(volume.name_id)
+
+
+def construct_mirror_name(volume):
+    """Constructs MirrorView name for volume."""
+    return 'mirror_' + six.text_type(volume.id)
+
+
+def construct_tmp_cg_snap_name(cg_name):
+    """Return CG snapshot name."""
+    return 'tmp-snap-' + six.text_type(cg_name)
+
+
+def construct_tmp_lun_name(lun_name):
+    """Constructs a time-based temporary LUN name."""
+    return '%(src)s-%(ts)s' % {'src': lun_name,
+                               'ts': int(time.time())}
+
+
+def construct_smp_name(snap_id):
+    return 'tmp-smp-' + six.text_type(snap_id)
+
+
+def snapcopy_enabled(volume):
+    meta = get_metadata(volume)
+    return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true'
+
+
+def get_migration_rate(volume):
+    metadata = get_metadata(volume)
+    rate = metadata.get('migrate_rate', None)
+    if rate:
+        if rate.lower() in storops.VNXMigrationRate.values():
+            return storops.VNXMigrationRate.parse(rate.lower())
+        else:
+            LOG.warning(_LW('Unknown migration rate specified, '
+                            'using [high] as migration rate.'))
+
+            return storops.VNXMigrationRate.HIGH
+
+
+def validate_cg_type(group):
+    if group.get('volume_type_id') is None:
+        return
+    for type_id in group['volume_type_id'].split(","):
+        if type_id:
+            specs = volume_types.get_volume_type_extra_specs(type_id)
+            extra_specs = common.ExtraSpecs(specs)
+            if extra_specs.provision == storops.VNXProvisionEnum.COMPRESSED:
+                msg = _("Failed to create consistency group %s "
+                        "because VNX consistency group cannot "
+                        "accept compressed LUNs as members."
+                        ) % group['id']
+                raise exception.InvalidInput(reason=msg)
+
+
+def update_res_without_poll(res):
+    with res.with_no_poll():
+        res.update()
+
+
+def update_res_with_poll(res):
+    with res.with_poll():
+        res.update()
+
+
+def get_base_lun_name(volume):
+    """Returns base LUN name for LUN/snapcopy LUN."""
+    base_name = extract_provider_location(
+        volume.provider_location, 'base_lun_name')
+    if base_name is None or base_name == 'None':
+        return volume.name
+    return base_name
+
+
+def sift_port_white_list(port_white_list, registered_io_ports):
+    """Filters out the unregistered ports.
+
+    Goes through the `port_white_list`, and filters out the ones not
+    registered (that is not in `registered_io_ports`).
+    """
+    valid_port_list = []
+    LOG.debug('Filter ports in [%(white)s}] but not in [%(reg_ports)s].',
+              {'white': ','.join(
+                  [port.display_name for port in port_white_list]),
+               'reg_ports': ','.join(
+                   [port.display_name for port in registered_io_ports])})
+    for io_port in port_white_list:
+        if io_port not in registered_io_ports:
+            LOG.debug('Skipped SP port %(port)s due to it is not registered. '
+                      'The registered IO ports: %(reg_ports)s.',
+                      {'port': io_port, 'reg_ports': registered_io_ports})
+        else:
+            valid_port_list.append(io_port)
+
+    return valid_port_list
+
+
+def convert_to_tgt_list_and_itor_tgt_map(zone_mapping):
+    """Function to process data from lookup service.
+
+    :param zone_mapping: mapping is the data from the zone lookup service
+         with below format
+        {
+             <San name>: {
+                 'initiator_port_wwn_list':
+                 ('200000051e55a100', '200000051e55a121'..)
+                 'target_port_wwn_list':
+                 ('100000051e55a100', '100000051e55a121'..)
+             }
+        }
+    """
+    target_wwns = []
+    itor_tgt_map = {}
+    for san_name in zone_mapping:
+        one_map = zone_mapping[san_name]
+        for target in one_map['target_port_wwn_list']:
+            if target not in target_wwns:
+                target_wwns.append(target)
+        for initiator in one_map['initiator_port_wwn_list']:
+            itor_tgt_map[initiator] = one_map['target_port_wwn_list']
+    LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s",
+              {'tgt_wwns': target_wwns,
+               'itor_tgt_map': itor_tgt_map})
+    return target_wwns, itor_tgt_map
+
+
+def truncate_fc_port_wwn(wwn):
+    return wwn.replace(':', '')[16:]
+
+
+def is_volume_smp(volume):
+    return 'smp' == extract_provider_location(volume.provider_location, 'type')
diff --git a/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml b/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml
new file mode 100644
index 00000000000..c6858fa1359
--- /dev/null
+++ b/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml
@@ -0,0 +1,16 @@
+---
+features:
+  - Adds VNX new Cinder driver which is based on storops.
+    storops is a library released to pypi.
+
+upgrade:
+  - For EMC VNX backends, please upgrade to use
+    'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver'.
+    new option
+      For FC driver, adds 'storage_protocol = fc' to driver section.
+      For iSCSI driver, adds 'storage_protocol = iscsi' to driver section.
+deprecations:
+  - Old VNX FC(cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver)/
+    iSCSI(cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver) drivers
+    were deprecated. Please refer to upgrade section for information about
+    new driver.