diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py b/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py
index adbdbbd641b..236285af01e 100644
--- a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py
+++ b/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py
@@ -2893,6 +2893,24 @@ class DS8KProxyTest(test.TestCase):
         self.assertRaises(exception.VolumeDriverException,
                           self.driver.create_group, self.ctxt, group)
 
+    @ddt.data('group_replication_enabled',
+              'consistent_group_replication_enabled')
+    def test_create_replication_group_update_replication_status(self, key):
+        """create replication group should update replication_status."""
+        self.configuration.lss_range_for_cg = '20-23'
+        self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
+        self.driver = FakeDS8KProxy(self.storage_info, self.logger,
+                                    self.exception, self)
+        self.driver.setup(self.ctxt)
+
+        group_type = group_types.create(self.ctxt, 'group', {key: '<is> True'})
+        group = self._create_group(host=TEST_GROUP_HOST,
+                                   group_type_id=group_type.id)
+        model_update = self.driver.create_group(self.ctxt, group)
+        self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
+        self.assertEqual(fields.ReplicationStatus.ENABLED,
+                         model_update['replication_status'])
+
     def test_delete_consistency_group_sucessfully(self):
         """test a successful consistency group deletion."""
         self.driver = FakeDS8KProxy(self.storage_info, self.logger,
@@ -2955,7 +2973,8 @@ class DS8KProxyTest(test.TestCase):
         vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
                                        {'replication_enabled': '<is> True'})
         location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
-        data = json.dumps({'vol_hex_id': TEST_VOLUME_ID})
+        data = json.dumps(
+            {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}})
         volume = self._create_volume(volume_type_id=vol_type.id,
                                      provider_location=location,
                                      replication_driver_data=data,
@@ -2981,7 +3000,8 @@ class DS8KProxyTest(test.TestCase):
         vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
                                        {'replication_enabled': '<is> True'})
         location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
-        data = json.dumps({'vol_hex_id': TEST_VOLUME_ID})
+        data = json.dumps(
+            {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}})
         volume = self._create_volume(volume_type_id=vol_type.id,
                                      provider_location=location,
                                      replication_driver_data=data,
@@ -3222,8 +3242,7 @@ class DS8KProxyTest(test.TestCase):
                 self.ctxt, group, [volume], None, None, src_group, [src_vol]))
         self.assertEqual('2200',
                          volumes_model_update[0]['metadata']['vol_hex_id'])
-        self.assertEqual(fields.GroupStatus.AVAILABLE,
-                         model_update['status'])
+        self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
 
     @mock.patch.object(eventlet, 'sleep')
     @mock.patch.object(helper.DS8KCommonHelper, '_create_lun')
@@ -3263,11 +3282,13 @@ class DS8KProxyTest(test.TestCase):
                 [snapshot], None, None))
         self.assertEqual(
             '2200', volumes_model_update[0]['metadata']['vol_hex_id'])
-        self.assertEqual(fields.GroupStatus.AVAILABLE,
-                         model_update['status'])
+        self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
 
-    def test_create_group_from_generic_group(self):
-        """create group from generic group is not implemented."""
+    @mock.patch.object(eventlet, 'sleep')
+    @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy')
+    def test_create_group_from_replication_group(self, mock_get_flashcopy,
+                                                 mock_sleep):
+        """create group from replication group."""
         self.configuration.replication_device = [TEST_REPLICATION_DEVICE]
         self.driver = FakeDS8KProxy(self.storage_info, self.logger,
                                     self.exception, self)
@@ -3283,7 +3304,8 @@ class DS8KProxyTest(test.TestCase):
         vol_type = volume_types.create(self.ctxt, 'VOL_TYPE',
                                        {'replication_enabled': '<is> True'})
         location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
-        data = json.dumps({'vol_hex_id': TEST_VOLUME_ID})
+        data = json.dumps(
+            {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}})
         src_volume = self._create_volume(volume_type_id=vol_type.id,
                                          provider_location=location,
                                          replication_driver_data=data,
@@ -3292,11 +3314,12 @@ class DS8KProxyTest(test.TestCase):
         group = self._create_group(host=TEST_GROUP_HOST,
                                    group_type_id=group_type.id)
         volume = self._create_volume(group_id=group.id)
-
-        self.assertRaises(NotImplementedError,
-                          self.driver.create_group_from_src,
-                          self.ctxt, group, [volume],
-                          None, None, src_group, [src_volume])
+        mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}]
+        model_update, volumes_model_update = self.driver.create_group_from_src(
+            self.ctxt, group, [volume], None, None, src_group, [src_volume])
+        self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'])
+        self.assertEqual(fields.ReplicationStatus.ENABLED,
+                         model_update['replication_status'])
 
     @mock.patch.object(eventlet, 'sleep')
     @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs')
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
index 2aa34e32637..5f3fc03b4f5 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
@@ -1035,7 +1035,9 @@ class DS8KProxy(proxy.IBMStorageProxy):
     @proxy.logger
     def create_group(self, ctxt, group):
         """Create consistency group of FlashCopy or RemoteCopy."""
+        model_update = {}
         grp = Group(group)
+        # verify replication.
         if (grp.group_replication_enabled or
                 grp.consisgroup_replication_enabled):
             for volume_type in group.volume_types:
@@ -1046,25 +1048,28 @@ class DS8KProxy(proxy.IBMStorageProxy):
                              'is for replication type, but volume '
                              '%(vtype)s is a non-replication one.'
                              % {'grp': grp.id, 'vtype': volume_type.id})
+            model_update['replication_status'] = (
+                fields.ReplicationStatus.ENABLED)
+        # verify consistency group.
         if (grp.consisgroup_snapshot_enabled or
                 grp.consisgroup_replication_enabled):
             self._assert(self._helper.backend['lss_ids_for_cg'],
                          'No LSS(s) for CG, please make sure you have '
                          'reserved LSS for CG via param lss_range_for_cg.')
-            model_update = {}
             if grp.consisgroup_replication_enabled:
                 self._helper.verify_rest_version_for_pprc_cg()
                 target_helper = self._replication.get_target_helper()
                 target_helper.verify_rest_version_for_pprc_cg()
-                model_update['replication_status'] = (
-                    fields.ReplicationStatus.ENABLED)
+
+        # driver will create replication group because base cinder
+        # doesn't update replication_status of the group, otherwise
+        # base cinder can take over it.
+        if (grp.consisgroup_snapshot_enabled or
+                grp.consisgroup_replication_enabled or
+                grp.group_replication_enabled):
             model_update.update(self._helper.create_group(group))
             return model_update
         else:
-            # NOTE(jiamin): If grp.group_replication_enabled is True, the
-            # default implementation will handle the creation of the group
-            # and driver just makes sure each volume type in group has
-            # enabled replication.
             raise NotImplementedError()
 
     @proxy.logger
@@ -1229,12 +1234,19 @@ class DS8KProxy(proxy.IBMStorageProxy):
         """Create volume group from volume group or volume group snapshot."""
         grp = Group(group)
         if (not grp.consisgroup_snapshot_enabled and
-                not grp.consisgroup_replication_enabled):
+                not grp.consisgroup_replication_enabled and
+                not grp.group_replication_enabled):
             raise NotImplementedError()
 
-        model_update = {'status': fields.GroupStatus.AVAILABLE}
+        model_update = {
+            'status': fields.GroupStatus.AVAILABLE,
+            'replication_status': fields.ReplicationStatus.DISABLED
+        }
+        if (grp.group_replication_enabled or
+                grp.consisgroup_replication_enabled):
+            model_update['replication_status'] = (
+                fields.ReplicationStatus.ENABLED)
         volumes_model_update = []
-
         if group_snapshot and sorted_snapshots:
             src_luns = [Lun(snapshot, is_snapshot=True)
                         for snapshot in sorted_snapshots]
@@ -1267,7 +1279,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
             volume_model_update = tgt_lun.get_volume_update()
             volume_model_update.update({
                 'id': tgt_lun.os_id,
-                'status': model_update['status']
+                'status': model_update['status'],
+                'replication_status': model_update['replication_status']
             })
             volumes_model_update.append(volume_model_update)