diff mbox series

[3/6] ceph: refactor error handling code in ceph_reserve_caps()

Message ID 20180728151540.11253-4-cgxu519@gmx.com (mailing list archive)
State New, archived
Headers show
Series code cleanup and minor optimization for cap reservation | expand

Commit Message

Chengguang Xu July 28, 2018, 3:15 p.m. UTC
Call new helper __ceph_unreserve_caps() to reduce duplicated code.

Signed-off-by: Chengguang Xu <cgxu519@gmx.com>
---
 fs/ceph/caps.c | 40 +++++++++-------------------------------
 1 file changed, 9 insertions(+), 31 deletions(-)

Comments

Yan, Zheng July 31, 2018, 9:53 a.m. UTC | #1
On Sat, Jul 28, 2018 at 11:19 PM Chengguang Xu <cgxu519@gmx.com> wrote:
>
> Call new helper __ceph_unreserve_caps() to reduce duplicated code.
>
> Signed-off-by: Chengguang Xu <cgxu519@gmx.com>
> ---
>  fs/ceph/caps.c | 40 +++++++++-------------------------------
>  1 file changed, 9 insertions(+), 31 deletions(-)
>
> diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> index 8129f6b39147..22eb70742c0b 100644
> --- a/fs/ceph/caps.c
> +++ b/fs/ceph/caps.c
> @@ -198,6 +198,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>         int have;
>         int alloc = 0;
>         int max_caps;
> +       int err = 0;
>         bool trimmed = false;
>         struct ceph_mds_session *s;
>         LIST_HEAD(newcaps);
> @@ -264,10 +265,12 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>
>                 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
>                         ctx, need, have + alloc);
> +               err = -ENOMEM;
>                 goto out_nomem;
>         }
>         BUG_ON(have + alloc != need);
>
> +out_nomem:
>         spin_lock(&mdsc->caps_list_lock);
>         mdsc->caps_total_count += alloc;
>         mdsc->caps_reserve_count += alloc;
> @@ -276,42 +279,17 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
>         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
>                                          mdsc->caps_reserve_count +
>                                          mdsc->caps_avail_count);
> +
> +       if (err)
> +               __ceph_unreserve_caps(mdsc, have + alloc);
> +       else
> +               ctx->count = need;
>         spin_unlock(&mdsc->caps_list_lock);
>
> -       ctx->count = need;
>         dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
>              ctx, mdsc->caps_total_count, mdsc->caps_use_count,
>              mdsc->caps_reserve_count, mdsc->caps_avail_count);
> -       return 0;
> -
> -out_nomem:
> -
> -       spin_lock(&mdsc->caps_list_lock);
> -       mdsc->caps_avail_count += have;
> -       mdsc->caps_reserve_count -= have;
> -
> -       while (!list_empty(&newcaps)) {
> -               cap = list_first_entry(&newcaps,
> -                               struct ceph_cap, caps_item);
> -               list_del(&cap->caps_item);
> -
> -               /* Keep some preallocated caps around (ceph_min_count), to
> -                * avoid lots of free/alloc churn. */
> -               if (mdsc->caps_avail_count >=
> -                   mdsc->caps_reserve_count + mdsc->caps_min_count) {
> -                       kmem_cache_free(ceph_cap_cachep, cap);
> -               } else {
> -                       mdsc->caps_avail_count++;
> -                       mdsc->caps_total_count++;
> -                       list_add(&cap->caps_item, &mdsc->caps_list);

you remove code that handle newly allocated caps. It's wrong

> -               }
> -       }
> -
> -       BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
> -                                        mdsc->caps_reserve_count +
> -                                        mdsc->caps_avail_count);
> -       spin_unlock(&mdsc->caps_list_lock);
> -       return -ENOMEM;
> +       return err;
>  }
>
>  int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
> --
> 2.17.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Chengguang Xu July 31, 2018, 10:22 a.m. UTC | #2
> Sent: Tuesday, July 31, 2018 at 5:53 PM
> From: "Yan, Zheng" <ukernel@gmail.com>
> To: "Chengguang Xu" <cgxu519@gmx.com>
> Cc: ceph-devel <ceph-devel@vger.kernel.org>, "Zheng Yan" <zyan@redhat.com>, "Ilya Dryomov" <idryomov@gmail.com>
> Subject: Re: [PATCH 3/6] ceph: refactor error handling code in ceph_reserve_caps()
>
> On Sat, Jul 28, 2018 at 11:19 PM Chengguang Xu <cgxu519@gmx.com> wrote:
> >
> > Call new helper __ceph_unreserve_caps() to reduce duplicated code.
> >
> > Signed-off-by: Chengguang Xu <cgxu519@gmx.com>
> > ---
> >  fs/ceph/caps.c | 40 +++++++++-------------------------------
> >  1 file changed, 9 insertions(+), 31 deletions(-)
> >
> > diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
> > index 8129f6b39147..22eb70742c0b 100644
> > --- a/fs/ceph/caps.c
> > +++ b/fs/ceph/caps.c
> > @@ -198,6 +198,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> >         int have;
> >         int alloc = 0;
> >         int max_caps;
> > +       int err = 0;
> >         bool trimmed = false;
> >         struct ceph_mds_session *s;
> >         LIST_HEAD(newcaps);
> > @@ -264,10 +265,12 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> >
> >                 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
> >                         ctx, need, have + alloc);
> > +               err = -ENOMEM;
> >                 goto out_nomem;
> >         }
> >         BUG_ON(have + alloc != need);
> >
> > +out_nomem:
> >         spin_lock(&mdsc->caps_list_lock);
> >         mdsc->caps_total_count += alloc;
> >         mdsc->caps_reserve_count += alloc;
> > @@ -276,42 +279,17 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
> >         BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
> >                                          mdsc->caps_reserve_count +
> >                                          mdsc->caps_avail_count);
> > +
> > +       if (err)
> > +               __ceph_unreserve_caps(mdsc, have + alloc);
> > +       else
> > +               ctx->count = need;
> >         spin_unlock(&mdsc->caps_list_lock);
> >
> > -       ctx->count = need;
> >         dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
> >              ctx, mdsc->caps_total_count, mdsc->caps_use_count,
> >              mdsc->caps_reserve_count, mdsc->caps_avail_count);
> > -       return 0;
> > -
> > -out_nomem:
> > -
> > -       spin_lock(&mdsc->caps_list_lock);
> > -       mdsc->caps_avail_count += have;
> > -       mdsc->caps_reserve_count -= have;
> > -
> > -       while (!list_empty(&newcaps)) {
> > -               cap = list_first_entry(&newcaps,
> > -                               struct ceph_cap, caps_item);
> > -               list_del(&cap->caps_item);
> > -
> > -               /* Keep some preallocated caps around (ceph_min_count), to
> > -                * avoid lots of free/alloc churn. */
> > -               if (mdsc->caps_avail_count >=
> > -                   mdsc->caps_reserve_count + mdsc->caps_min_count) {
> > -                       kmem_cache_free(ceph_cap_cachep, cap);
> > -               } else {
> > -                       mdsc->caps_avail_count++;
> > -                       mdsc->caps_total_count++;
> > -                       list_add(&cap->caps_item, &mdsc->caps_list);
> 
> you remove code that handle newly allocated caps. It's wrong

I slightly changed the place of label 'out_nomem', so whatever fail or success newly
allocated caps will splice into &mdsc->caps_list.

I think  __ceph_unreserve_caps() can handle this case correctly. Am I missing something?


Thanks,
Chengguang 
--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox series

Patch

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 8129f6b39147..22eb70742c0b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -198,6 +198,7 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 	int have;
 	int alloc = 0;
 	int max_caps;
+	int err = 0;
 	bool trimmed = false;
 	struct ceph_mds_session *s;
 	LIST_HEAD(newcaps);
@@ -264,10 +265,12 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 
 		pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
 			ctx, need, have + alloc);
+		err = -ENOMEM;
 		goto out_nomem;
 	}
 	BUG_ON(have + alloc != need);
 
+out_nomem:
 	spin_lock(&mdsc->caps_list_lock);
 	mdsc->caps_total_count += alloc;
 	mdsc->caps_reserve_count += alloc;
@@ -276,42 +279,17 @@  int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
 					 mdsc->caps_reserve_count +
 					 mdsc->caps_avail_count);
+
+	if (err)
+		__ceph_unreserve_caps(mdsc, have + alloc);
+	else
+		ctx->count = need;
 	spin_unlock(&mdsc->caps_list_lock);
 
-	ctx->count = need;
 	dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
 	     ctx, mdsc->caps_total_count, mdsc->caps_use_count,
 	     mdsc->caps_reserve_count, mdsc->caps_avail_count);
-	return 0;
-
-out_nomem:
-
-	spin_lock(&mdsc->caps_list_lock);
-	mdsc->caps_avail_count += have;
-	mdsc->caps_reserve_count -= have;
-
-	while (!list_empty(&newcaps)) {
-		cap = list_first_entry(&newcaps,
-				struct ceph_cap, caps_item);
-		list_del(&cap->caps_item);
-
-		/* Keep some preallocated caps around (ceph_min_count), to
-		 * avoid lots of free/alloc churn. */
-		if (mdsc->caps_avail_count >=
-		    mdsc->caps_reserve_count + mdsc->caps_min_count) {
-			kmem_cache_free(ceph_cap_cachep, cap);
-		} else {
-			mdsc->caps_avail_count++;
-			mdsc->caps_total_count++;
-			list_add(&cap->caps_item, &mdsc->caps_list);
-		}
-	}
-
-	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
-					 mdsc->caps_reserve_count +
-					 mdsc->caps_avail_count);
-	spin_unlock(&mdsc->caps_list_lock);
-	return -ENOMEM;
+	return err;
 }
 
 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,