From 67a1b5a224f1ac239fbb42c9a6d1206cb765e897 Mon Sep 17 00:00:00 2001 From: zhangbailin Date: Fri, 25 Aug 2017 04:15:33 -0700 Subject: [PATCH] Modify some spelling mistakes in cinder While I am reading this code, I found some spelling mistakes in the comments and corrected them here. Change-Id: If76bac8b763257edb5ebb106a8d4ae74dfa2a710 --- cinder/image/image_utils.py | 2 +- cinder/scheduler/filters/capacity_filter.py | 2 +- cinder/volume/api.py | 2 +- cinder/volume/drivers/nimble.py | 2 +- cinder/volume/drivers/qnap.py | 2 +- cinder/volume/manager.py | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index 8c5988f653e..25f241734eb 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -408,7 +408,7 @@ def fetch_to_volume_format(context, image_service, % {'fmt': fmt, 'backing_file': backing_file, }) # NOTE(e0ne): check for free space in destination directory before - # image convertion. + # image conversion. check_available_space(dest, virt_size, image_id) # NOTE(jdg): I'm using qemu-img convert to write diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py index 9789ea84a8f..20e4b6941eb 100644 --- a/cinder/scheduler/filters/capacity_filter.py +++ b/cinder/scheduler/filters/capacity_filter.py @@ -156,7 +156,7 @@ class CapacityFilter(filters.BaseBackendFilter): "grouping": grouping, "grouping_name": backend_state.backend_id} LOG.warning("Insufficient free virtual space " - "(%(available)sGB) to accomodate thin " + "(%(available)sGB) to accommodate thin " "provisioned %(size)sGB volume on %(grouping)s" " %(grouping_name)s.", msg_args) return res diff --git a/cinder/volume/api.py b/cinder/volume/api.py index 7490d826bc8..257c4f1c60f 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -1951,7 +1951,7 @@ class API(base.Base): # FIXME(JDG): We want to be able to do things here like reserve a # volume for Nova to do BFV WHILE the volume may be in the process of # downloading image, we add downloading here; that's easy enough but - # we've got a race inbetween with the attaching/detaching that we do + # we've got a race between with the attaching/detaching that we do # locally on the Cinder node. Just come up with an easy way to # determine if we're attaching to the Cinder host for some work or if # we're being used by the outside world. diff --git a/cinder/volume/drivers/nimble.py b/cinder/volume/drivers/nimble.py index be72ef71448..f578a429b02 100644 --- a/cinder/volume/drivers/nimble.py +++ b/cinder/volume/drivers/nimble.py @@ -1397,7 +1397,7 @@ class NimbleRestAPIExecutor(object): filter = {'id': snap_id} r = self.get_query(api, filter) if not r.json()['data']: - raise NimbleAPIException(_("Snapshot: %s doesnt exist") % snap_id) + raise NimbleAPIException(_("Snapshot: %s doesn't exist") % snap_id) return r.json()['data'][0] @utils.retry(NimbleAPIException, 2, 3) diff --git a/cinder/volume/drivers/qnap.py b/cinder/volume/drivers/qnap.py index 4edbb48e752..0d5152552c5 100644 --- a/cinder/volume/drivers/qnap.py +++ b/cinder/volume/drivers/qnap.py @@ -751,7 +751,7 @@ class QnapISCSIDriver(san.SanISCSIDriver): _metadata = self._get_volume_metadata(new_volume) - # metadata will not be swap after migration wiht liberty version + # metadata will not be swap after migration with liberty version # , and the metadata of new volume is diifferent with the metadata # of original volume. Therefore, we need to update the migrated volume if not hasattr(new_volume, '_orig_metadata'): diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 7f1a90104df..4f3ce049a74 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -2149,7 +2149,7 @@ class VolumeManager(manager.CleanableManager, # NOTE(jdg): Things get a little hairy in here and we do a lot of # things based on volume previous-status and current-status. At some # point this should all be reworked but for now we need to maintain - # backward compatability and NOT change the API so we're going to try + # backward compatibility and NOT change the API so we're going to try # and make this work best we can LOG.debug("migrate_volume_completion: completing migration for " @@ -2266,7 +2266,7 @@ class VolumeManager(manager.CleanableManager, 'vol %(vol)s: %(err)s', {'vol': volume.id, 'err': ex}) - # For the new flow this is realy the key part. We just use the + # For the new flow this is really the key part. We just use the # attachments to the worker/destination volumes that we created and # used for the libvirt migration and we'll just swap their volume_id # entries to coorespond with the volume.id swap we did