Replication, hard-coding and dict.name issues in K2
Replication was tested with self-replication due to unavailability of K2 arrays. Source and target K2 arrays were same. _delete_volume_replica and _get_replica_status are not working with different source and target K2 arrays while testing replication. Hard-coded value of RPO(500) is raising exception while retype: rpo must be either 1 min or multiple of 5 min vol_type.name is raising below exception: AttributeError: 'dict' object has no attribute 'name' Change-Id: I86e840f9fbc886ef661ddf1de77d433adb901ae5 Closes-Bug: #1612602 Co-Authored-By: VenkataKrishna Reddy <Venkata.Krishna.ctr@kaminario.com>
This commit is contained in:
parent
ba7d9c63a9
commit
b7dcc4ae19
@ -486,7 +486,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
|
|
||||||
LOG.debug("Searching and deleting snapshots for volume groups:"
|
LOG.debug("Searching and deleting snapshots for volume groups:"
|
||||||
"%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name})
|
"%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name})
|
||||||
vg = self.target.search('volume_groups', name=vg_name).hits
|
vg = self.client.search('volume_groups', name=vg_name).hits
|
||||||
rvg = self.target.search('volume_groups', name=rvg_name).hits
|
rvg = self.target.search('volume_groups', name=rvg_name).hits
|
||||||
snaps = self.client.search('snapshots', volume_group=vg).hits
|
snaps = self.client.search('snapshots', volume_group=vg).hits
|
||||||
for s in snaps:
|
for s in snaps:
|
||||||
@ -637,9 +637,9 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
return "cview-{0}".format(vid)
|
return "cview-{0}".format(vid)
|
||||||
|
|
||||||
@kaminario_logger
|
@kaminario_logger
|
||||||
def get_rep_name(self, sname):
|
def get_rep_name(self, name):
|
||||||
"""Return the replication session name."""
|
"""Return the corresponding replication names."""
|
||||||
return "r{0}".format(sname)
|
return "r{0}".format(name)
|
||||||
|
|
||||||
@kaminario_logger
|
@kaminario_logger
|
||||||
def _delete_host_by_name(self, name):
|
def _delete_host_by_name(self, name):
|
||||||
@ -758,7 +758,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
'kaminario:thin_prov_type')
|
'kaminario:thin_prov_type')
|
||||||
if specs_val == 'nodedup':
|
if specs_val == 'nodedup':
|
||||||
return False
|
return False
|
||||||
elif CONF.kaminario_nodedup_substring in vol_type.name:
|
elif CONF.kaminario_nodedup_substring in vol_type.get('name'):
|
||||||
LOG.info(_LI("'kaminario_nodedup_substring' option is "
|
LOG.info(_LI("'kaminario_nodedup_substring' option is "
|
||||||
"deprecated in favour of 'kaminario:thin_prov_"
|
"deprecated in favour of 'kaminario:thin_prov_"
|
||||||
"type' in extra-specs and will be removed in "
|
"type' in extra-specs and will be removed in "
|
||||||
@ -779,12 +779,12 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
return replica
|
return replica
|
||||||
|
|
||||||
def _get_replica_status(self, vg_name):
|
def _get_replica_status(self, vg_name):
|
||||||
status = False
|
vg = self.client.search("volume_groups", name=vg_name).hits[0]
|
||||||
rvg = self.client.search("replication/peer_volume_groups",
|
if self.client.search("replication/sessions",
|
||||||
name=vg_name)
|
local_volume_group=vg).total != 0:
|
||||||
if rvg.total != 0:
|
return True
|
||||||
status = True
|
else:
|
||||||
return status
|
return False
|
||||||
|
|
||||||
def manage_existing(self, volume, existing_ref):
|
def manage_existing(self, volume, existing_ref):
|
||||||
vol_name = existing_ref['source-name']
|
vol_name = existing_ref['source-name']
|
||||||
@ -885,7 +885,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
LOG.debug("Searching volume with name: %(name)s",
|
LOG.debug("Searching volume with name: %(name)s",
|
||||||
{'name': vol_name})
|
{'name': vol_name})
|
||||||
vol = self.client.search("volumes", name=vol_name).hits[0]
|
vol = self.client.search("volumes", name=vol_name).hits[0]
|
||||||
self._create_volume_replica(volume, vg, vol, 500)
|
self._create_volume_replica(volume, vg, vol, self.replica.rpo)
|
||||||
|
|
||||||
def _delete_replication(self, volume):
|
def _delete_replication(self, volume):
|
||||||
vg_name = self.get_volume_group_name(volume.id)
|
vg_name = self.get_volume_group_name(volume.id)
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- Fixed _delete_volume_replica and _get_replica_status in
|
||||||
|
Kaminario K2 iSCSI and FC Cinder drivers with different source
|
||||||
|
and target K2 arrays while testing replication.
|
||||||
|
Removed hard-coding of RPO and fixed volume_type.name issue.
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user