From cd103b187b2a0865583d368308ecf6cd45fd6d48 Mon Sep 17 00:00:00 2001 From: shaofeng_cheng Date: Sat, 1 Apr 2017 18:00:23 +0800 Subject: [PATCH] Fix ceph role set target_max_bytes for cache pools Ceph is not able to determine the size of a cache pool automatically, so the configuration on the absolute size is required here otherwise the flush/evict will not work. see http://docs.ceph.com/docs/master/rados/operations/cache-tiering/ Change-Id: I4d4abb50787093a292e1ee6eb790c10ecf5ccb94 Closes-Bug: #1654375 --- ansible/group_vars/all.yml | 6 ++++++ ansible/roles/ceph_pools.yml | 20 ++++++++++++++++++++ etc/kolla/globals.yml | 6 ++++++ 3 files changed, 32 insertions(+) diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 5ee68ea295..0f83c3a9bf 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -521,6 +521,12 @@ octavia_amp_flavor_id: # Ceph can be setup with a caching to improve performance. To use the cache you # must provide separate disks than those for the OSDs ceph_enable_cache: "no" + +# Ceph is not able to determine the size of a cache pool automatically, +# so the configuration on the absolute size is required here, otherwise the flush/evict will not work. +ceph_target_max_bytes: "" +ceph_target_max_objects: "" + # Valid options are [ forward, none, writeback ] ceph_cache_mode: "writeback" diff --git a/ansible/roles/ceph_pools.yml b/ansible/roles/ceph_pools.yml index ae496e9569..c9afb79897 100644 --- a/ansible/roles/ceph_pools.yml +++ b/ansible/roles/ceph_pools.yml @@ -78,3 +78,23 @@ failed_when: False run_once: True when: "{{ ceph_enable_cache | bool }}" + +- name: Setting cache target_max_bytes + command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_bytes {{ ceph_target_max_bytes }} + delegate_to: "{{ groups['ceph-mon'][0] }}" + changed_when: False + failed_when: False + run_once: True + when: + - "{{ ceph_enable_cache | bool }}" + - "{{ ceph_target_max_bytes != '' }}" + +- name: Setting cache target_max_objects + command: docker exec ceph_mon ceph osd pool set {{ pool_name }}-cache target_max_objects {{ ceph_target_max_objects }} + delegate_to: "{{ groups['ceph-mon'][0] }}" + changed_when: False + failed_when: False + run_once: True + when: + - "{{ ceph_enable_cache | bool }}" + - "{{ ceph_target_max_objects != '' }}" diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml index 408bf347e7..6a1c598947 100644 --- a/etc/kolla/globals.yml +++ b/etc/kolla/globals.yml @@ -200,6 +200,12 @@ kolla_internal_vip_address: "10.10.10.254" # Ceph can be setup with a caching to improve performance. To use the cache you # must provide separate disks than those for the OSDs #ceph_enable_cache: "no" + +# Ceph is not able to determine the size of a cache pool automatically, +# so the configuration on the absolute size is required here, otherwise the flush/evict will not work. +#ceph_target_max_bytes: "" +#ceph_target_max_objects: "" + # Valid options are [ forward, none, writeback ] #ceph_cache_mode: "writeback"