diff mbox

ceph: optimizing cap reservation

Message ID 1519468562-68172-1-git-send-email-cgxu519@icloud.com (mailing list archive)
State New, archived
Headers show

Commit Message

Chengguang Xu Feb. 24, 2018, 10:36 a.m. UTC
When caps_avail_count is in a low level, most newly
trimmed caps will probably go into ->caps_list and
caps_avail_count will be increased. Hence after trimming,
should recheck caps_avail_count to effectly reuse
newly trimmed caps. Also, when releasing unnecessary
caps follow the same rule of ceph_put_cap.

Signed-off-by: Chengguang Xu <cgxu519@icloud.com>
---
Only compile tested.

 fs/ceph/caps.c | 41 +++++++++++++++++++++++++++++++++++++----
 1 file changed, 37 insertions(+), 4 deletions(-)

Comments

Yan, Zheng March 5, 2018, 3:05 a.m. UTC | #1
> On 24 Feb 2018, at 18:36, Chengguang Xu <cgxu519@icloud.com> wrote:
> 
> When caps_avail_count is in a low level, most newly
> trimmed caps will probably go into ->caps_list and
> caps_avail_count will be increased. Hence after trimming,
> should recheck caps_avail_count to effectly reuse
> newly trimmed caps. Also, when releasing unnecessary
> caps follow the same rule of ceph_put_cap.
> 
> Signed-off-by: Chengguang Xu <cgxu519@icloud.com>
> ---
> Only compile tested.
> 
> fs/ceph/caps.c | 41 +++++++++++++++++++++++++++++++++++++----
> 1 file changed, 37 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index 6582c45..ba58422 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -165,6 +165,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> 	int have;
> 	int alloc = 0;
> 	int max_caps;
> +	int more_avail_caps;
> 	bool trimmed = false;
> 	struct ceph_mds_session *s;
> 	LIST_HEAD(newcaps);
> @@ -204,6 +205,24 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> 					mutex_lock(&mdsc->mutex);
> 				}
> 				trimmed = true;
> +
> +				spin_lock(&mdsc->caps_list_lock);
> +				if (mdsc->caps_avail_count) {
> +					if (mdsc->caps_avail_count + have + alloc >= need)
> +						more_avail_caps = need - alloc - have;
> +					else
> +						more_avail_caps = mdsc->caps_avail_count;
> +
> +					i +=  more_avail_caps;
> +					have += more_avail_caps;
> +					mdsc->caps_avail_count -= more_avail_caps;
> +					mdsc->caps_reserve_count += more_avail_caps;
> +				}
> +				spin_unlock(&mdsc->caps_list_lock);
> +
> +				if (i >= need)
> +					break;
> +
> 				goto retry;
> 			} else {
> 				pr_warn("reserve caps ctx=%p ENOMEM "
> @@ -234,16 +253,30 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> 	return 0;
> 
> out_nomem:
> +
> +	spin_lock(&mdsc->caps_list_lock);
> +	mdsc->caps_avail_count += have;
> +	mdsc->caps_reserve_count -= have;
> +
> 	while (!list_empty(&newcaps)) {
> 		cap = list_first_entry(&newcaps,
> 				struct ceph_cap, caps_item);
> 		list_del(&cap->caps_item);
> -		kmem_cache_free(ceph_cap_cachep, cap);
> +
> +		/*
> +		 * Keep some preallocated caps around (ceph_min_count), to
> +		 * avoid lots of free/alloc churn.
> +		 */
> +		if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
> +				      mdsc->caps_min_count) {
> +			kmem_cache_free(ceph_cap_cachep, cap);
> +		} else {
> +			mdsc->caps_avail_count++;
> +			mdsc->caps_total_count++;
> +			list_add(&cap->caps_item, &mdsc->caps_list);
> +		}
> 	}
> 
> -	spin_lock(&mdsc->caps_list_lock);
> -	mdsc->caps_avail_count += have;
> -	mdsc->caps_reserve_count -= have;
> 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
> 					 mdsc->caps_reserve_count +
> 					 mdsc->caps_avail_count);
> -- 
> 1.8.3.1

Some lines exceed 80 characters. I make some modification to patch.

Regards
Yan, Zheng

> 

--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Chengguang Xu March 5, 2018, 4:19 a.m. UTC | #2
> 在 2018年3月5日,上午11:05,Yan, Zheng <zyan@redhat.com> 写道:
> 
> 
> 
>> On 24 Feb 2018, at 18:36, Chengguang Xu <cgxu519@icloud.com> wrote:
>> 
>> When caps_avail_count is in a low level, most newly
>> trimmed caps will probably go into ->caps_list and
>> caps_avail_count will be increased. Hence after trimming,
>> should recheck caps_avail_count to effectly reuse
>> newly trimmed caps. Also, when releasing unnecessary
>> caps follow the same rule of ceph_put_cap.
>> 
>> Signed-off-by: Chengguang Xu <cgxu519@icloud.com>
>> ---
>> Only compile tested.
>> 
>> fs/ceph/caps.c | 41 +++++++++++++++++++++++++++++++++++++----
>> 1 file changed, 37 insertions(+), 4 deletions(-)
>> 
>> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
>> index 6582c45..ba58422 100644
>> --- a/fs/ceph/caps.c
>> +++ b/fs/ceph/caps.c
>> @@ -165,6 +165,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>> 	int have;
>> 	int alloc = 0;
>> 	int max_caps;
>> +	int more_avail_caps;
>> 	bool trimmed = false;
>> 	struct ceph_mds_session *s;
>> 	LIST_HEAD(newcaps);
>> @@ -204,6 +205,24 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>> 					mutex_lock(&mdsc->mutex);
>> 				}
>> 				trimmed = true;
>> +
>> +				spin_lock(&mdsc->caps_list_lock);
>> +				if (mdsc->caps_avail_count) {
>> +					if (mdsc->caps_avail_count + have + alloc >= need)
>> +						more_avail_caps = need - alloc - have;
>> +					else
>> +						more_avail_caps = mdsc->caps_avail_count;
>> +
>> +					i +=  more_avail_caps;
>> +					have += more_avail_caps;
>> +					mdsc->caps_avail_count -= more_avail_caps;
>> +					mdsc->caps_reserve_count += more_avail_caps;
>> +				}
>> +				spin_unlock(&mdsc->caps_list_lock);
>> +
>> +				if (i >= need)
>> +					break;
>> +
>> 				goto retry;
>> 			} else {
>> 				pr_warn("reserve caps ctx=%p ENOMEM "
>> @@ -234,16 +253,30 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>> 	return 0;
>> 
>> out_nomem:
>> +
>> +	spin_lock(&mdsc->caps_list_lock);
>> +	mdsc->caps_avail_count += have;
>> +	mdsc->caps_reserve_count -= have;
>> +
>> 	while (!list_empty(&newcaps)) {
>> 		cap = list_first_entry(&newcaps,
>> 				struct ceph_cap, caps_item);
>> 		list_del(&cap->caps_item);
>> -		kmem_cache_free(ceph_cap_cachep, cap);
>> +
>> +		/*
>> +		 * Keep some preallocated caps around (ceph_min_count), to
>> +		 * avoid lots of free/alloc churn.
>> +		 */
>> +		if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
>> +				      mdsc->caps_min_count) {
>> +			kmem_cache_free(ceph_cap_cachep, cap);
>> +		} else {
>> +			mdsc->caps_avail_count++;
>> +			mdsc->caps_total_count++;
>> +			list_add(&cap->caps_item, &mdsc->caps_list);
>> +		}
>> 	}
>> 
>> -	spin_lock(&mdsc->caps_list_lock);
>> -	mdsc->caps_avail_count += have;
>> -	mdsc->caps_reserve_count -= have;
>> 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
>> 					 mdsc->caps_reserve_count +
>> 					 mdsc->caps_avail_count);
>> -- 
>> 1.8.3.1
> 
> Some lines exceed 80 characters. I make some modification to patch.

I’ll put more attention on it next time.

Thanks,
Chengguang.


--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6582c45..ba58422 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -165,6 +165,7 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 	int have;
 	int alloc = 0;
 	int max_caps;
+	int more_avail_caps;
 	bool trimmed = false;
 	struct ceph_mds_session *s;
 	LIST_HEAD(newcaps);
@@ -204,6 +205,24 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 					mutex_lock(&mdsc->mutex);
 				}
 				trimmed = true;
+
+				spin_lock(&mdsc->caps_list_lock);
+				if (mdsc->caps_avail_count) {
+					if (mdsc->caps_avail_count + have + alloc >= need)
+						more_avail_caps = need - alloc - have;
+					else
+						more_avail_caps = mdsc->caps_avail_count;
+
+					i +=  more_avail_caps;
+					have += more_avail_caps;
+					mdsc->caps_avail_count -= more_avail_caps;
+					mdsc->caps_reserve_count += more_avail_caps;
+				}
+				spin_unlock(&mdsc->caps_list_lock);
+
+				if (i >= need)
+					break;
+
 				goto retry;
 			} else {
 				pr_warn("reserve caps ctx=%p ENOMEM "
@@ -234,16 +253,30 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 	return 0;
 
 out_nomem:
+
+	spin_lock(&mdsc->caps_list_lock);
+	mdsc->caps_avail_count += have;
+	mdsc->caps_reserve_count -= have;
+
 	while (!list_empty(&newcaps)) {
 		cap = list_first_entry(&newcaps,
 				struct ceph_cap, caps_item);
 		list_del(&cap->caps_item);
-		kmem_cache_free(ceph_cap_cachep, cap);
+
+		/*
+		 * Keep some preallocated caps around (ceph_min_count), to
+		 * avoid lots of free/alloc churn.
+		 */
+		if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
+				      mdsc->caps_min_count) {
+			kmem_cache_free(ceph_cap_cachep, cap);
+		} else {
+			mdsc->caps_avail_count++;
+			mdsc->caps_total_count++;
+			list_add(&cap->caps_item, &mdsc->caps_list);
+		}
 	}
 
-	spin_lock(&mdsc->caps_list_lock);
-	mdsc->caps_avail_count += have;
-	mdsc->caps_reserve_count -= have;
 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 					 mdsc->caps_reserve_count +
 					 mdsc->caps_avail_count);