diff mbox series

cachefiles: make on-demand request distribution fairer

Message ID 20220817065200.11543-1-yinxin.x@bytedance.com (mailing list archive)
State New, archived
Headers show
Series cachefiles: make on-demand request distribution fairer | expand

Commit Message

Xin Yin Aug. 17, 2022, 6:52 a.m. UTC
For now, enqueuing and dequeuing on-demand requests all start from
idx 0, this makes request distribution unfair. In the weighty
concurrent I/O scenario, the request stored in higher idx will starve.

Searching requests cyclically in cachefiles_ondemand_daemon_read,
makes distribution fairer.

Reported-by: Yongqing Li <liyongqing@bytedance.com>
Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
---
 fs/cachefiles/internal.h |  1 +
 fs/cachefiles/ondemand.c | 12 +++++++++---
 2 files changed, 10 insertions(+), 3 deletions(-)

Comments

Gao Xiang Aug. 17, 2022, 7:14 a.m. UTC | #1
Hi Yin,

On Wed, Aug 17, 2022 at 02:52:00PM +0800, Xin Yin wrote:
> For now, enqueuing and dequeuing on-demand requests all start from
> idx 0, this makes request distribution unfair. In the weighty
> concurrent I/O scenario, the request stored in higher idx will starve.
> 
> Searching requests cyclically in cachefiles_ondemand_daemon_read,
> makes distribution fairer.

Yeah, thanks for the catch.  The previous approach could cause somewhat
unfairness and make some requests starving... But we don't need strict
FIFO here.

> 
> Reported-by: Yongqing Li <liyongqing@bytedance.com>
> Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
> ---
>  fs/cachefiles/internal.h |  1 +
>  fs/cachefiles/ondemand.c | 12 +++++++++---
>  2 files changed, 10 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
> index 6cba2c6de2f9..2ad58c465208 100644
> --- a/fs/cachefiles/internal.h
> +++ b/fs/cachefiles/internal.h
> @@ -111,6 +111,7 @@ struct cachefiles_cache {
>  	char				*tag;		/* cache binding tag */
>  	refcount_t			unbind_pincount;/* refcount to do daemon unbind */
>  	struct xarray			reqs;		/* xarray of pending on-demand requests */
> +	unsigned long			req_id_next;

	unsigned long			ondemand_req_id_next; ?

Otherwise it looks good to me,

Thanks,
Gao Xiang

>  	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
>  	u32				ondemand_id_next;
>  };
> diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> index 1fee702d5529..247961d65369 100644
> --- a/fs/cachefiles/ondemand.c
> +++ b/fs/cachefiles/ondemand.c
> @@ -238,14 +238,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>  	unsigned long id = 0;
>  	size_t n;
>  	int ret = 0;
> -	XA_STATE(xas, &cache->reqs, 0);
> +	XA_STATE(xas, &cache->reqs, cache->req_id_next);
>  
>  	/*
> -	 * Search for a request that has not ever been processed, to prevent
> -	 * requests from being processed repeatedly.
> +	 * Cyclically search for a request that has not ever been processed,
> +	 * to prevent requests from being processed repeatedly, and make
> +	 * request distribution fair.
>  	 */
>  	xa_lock(&cache->reqs);
>  	req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
> +	if (!req && cache->req_id_next > 0) {
> +		xas_set(&xas, 0);
> +		req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
> +	}
>  	if (!req) {
>  		xa_unlock(&cache->reqs);
>  		return 0;
> @@ -260,6 +265,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>  	}
>  
>  	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
> +	cache->req_id_next = xas.xa_index + 1;
>  	xa_unlock(&cache->reqs);
>  
>  	id = xas.xa_index;
> -- 
> 2.25.1
> 
> --
> Linux-cachefs mailing list
> Linux-cachefs@redhat.com
> https://listman.redhat.com/mailman/listinfo/linux-cachefs
Xin Yin Aug. 17, 2022, 9:16 a.m. UTC | #2
On Wed, Aug 17, 2022 at 3:14 PM Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
>
> Hi Yin,
>
> On Wed, Aug 17, 2022 at 02:52:00PM +0800, Xin Yin wrote:
> > For now, enqueuing and dequeuing on-demand requests all start from
> > idx 0, this makes request distribution unfair. In the weighty
> > concurrent I/O scenario, the request stored in higher idx will starve.
> >
> > Searching requests cyclically in cachefiles_ondemand_daemon_read,
> > makes distribution fairer.
>
> Yeah, thanks for the catch.  The previous approach could cause somewhat
> unfairness and make some requests starving... But we don't need strict
> FIFO here.
>
> >
> > Reported-by: Yongqing Li <liyongqing@bytedance.com>
> > Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
> > ---
> >  fs/cachefiles/internal.h |  1 +
> >  fs/cachefiles/ondemand.c | 12 +++++++++---
> >  2 files changed, 10 insertions(+), 3 deletions(-)
> >
> > diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
> > index 6cba2c6de2f9..2ad58c465208 100644
> > --- a/fs/cachefiles/internal.h
> > +++ b/fs/cachefiles/internal.h
> > @@ -111,6 +111,7 @@ struct cachefiles_cache {
> >       char                            *tag;           /* cache binding tag */
> >       refcount_t                      unbind_pincount;/* refcount to do daemon unbind */
> >       struct xarray                   reqs;           /* xarray of pending on-demand requests */
> > +     unsigned long                   req_id_next;
>
>         unsigned long                   ondemand_req_id_next; ?
Hi Xiang,

Thanks for the detailed review , whether "ondemand_req_id_next" is a
little long ? struct cachefiles_cache only holds on-demand requests ,
so I think "req_id_next" will not cause ambiguity. Does this make
sense?

Thanks,
Xin Yin
>
> Otherwise it looks good to me,
>
> Thanks,
> Gao Xiang
>
> >       struct xarray                   ondemand_ids;   /* xarray for ondemand_id allocation */
> >       u32                             ondemand_id_next;
> >  };
> > diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> > index 1fee702d5529..247961d65369 100644
> > --- a/fs/cachefiles/ondemand.c
> > +++ b/fs/cachefiles/ondemand.c
> > @@ -238,14 +238,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
> >       unsigned long id = 0;
> >       size_t n;
> >       int ret = 0;
> > -     XA_STATE(xas, &cache->reqs, 0);
> > +     XA_STATE(xas, &cache->reqs, cache->req_id_next);
> >
> >       /*
> > -      * Search for a request that has not ever been processed, to prevent
> > -      * requests from being processed repeatedly.
> > +      * Cyclically search for a request that has not ever been processed,
> > +      * to prevent requests from being processed repeatedly, and make
> > +      * request distribution fair.
> >        */
> >       xa_lock(&cache->reqs);
> >       req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
> > +     if (!req && cache->req_id_next > 0) {
> > +             xas_set(&xas, 0);
> > +             req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
> > +     }
> >       if (!req) {
> >               xa_unlock(&cache->reqs);
> >               return 0;
> > @@ -260,6 +265,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
> >       }
> >
> >       xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
> > +     cache->req_id_next = xas.xa_index + 1;
> >       xa_unlock(&cache->reqs);
> >
> >       id = xas.xa_index;
> > --
> > 2.25.1
> >
> > --
> > Linux-cachefs mailing list
> > Linux-cachefs@redhat.com
> > https://listman.redhat.com/mailman/listinfo/linux-cachefs
Gao Xiang Aug. 17, 2022, 9:34 a.m. UTC | #3
On Wed, Aug 17, 2022 at 05:16:25PM +0800, Xin Yin wrote:
> On Wed, Aug 17, 2022 at 3:14 PM Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> >
> > Hi Yin,
> >
> > On Wed, Aug 17, 2022 at 02:52:00PM +0800, Xin Yin wrote:
> > > For now, enqueuing and dequeuing on-demand requests all start from
> > > idx 0, this makes request distribution unfair. In the weighty
> > > concurrent I/O scenario, the request stored in higher idx will starve.
> > >
> > > Searching requests cyclically in cachefiles_ondemand_daemon_read,
> > > makes distribution fairer.
> >
> > Yeah, thanks for the catch.  The previous approach could cause somewhat
> > unfairness and make some requests starving... But we don't need strict
> > FIFO here.
> >
> > >
> > > Reported-by: Yongqing Li <liyongqing@bytedance.com>
> > > Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
> > > ---
> > >  fs/cachefiles/internal.h |  1 +
> > >  fs/cachefiles/ondemand.c | 12 +++++++++---
> > >  2 files changed, 10 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
> > > index 6cba2c6de2f9..2ad58c465208 100644
> > > --- a/fs/cachefiles/internal.h
> > > +++ b/fs/cachefiles/internal.h
> > > @@ -111,6 +111,7 @@ struct cachefiles_cache {
> > >       char                            *tag;           /* cache binding tag */
> > >       refcount_t                      unbind_pincount;/* refcount to do daemon unbind */
> > >       struct xarray                   reqs;           /* xarray of pending on-demand requests */
> > > +     unsigned long                   req_id_next;
> >
> >         unsigned long                   ondemand_req_id_next; ?
> Hi Xiang,
> 
> Thanks for the detailed review , whether "ondemand_req_id_next" is a
> little long ? struct cachefiles_cache only holds on-demand requests ,
> so I think "req_id_next" will not cause ambiguity. Does this make
> sense?

If David is fine with "req_id_next", I'm okay with that as well.

Thanks,
Gao Xiang

> 
> Thanks,
> Xin Yin
> >
> > Otherwise it looks good to me,
> >
> > Thanks,
> > Gao Xiang
> >
> > >       struct xarray                   ondemand_ids;   /* xarray for ondemand_id allocation */
> > >       u32                             ondemand_id_next;
> > >  };
> > > diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> > > index 1fee702d5529..247961d65369 100644
> > > --- a/fs/cachefiles/ondemand.c
> > > +++ b/fs/cachefiles/ondemand.c
> > > @@ -238,14 +238,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
> > >       unsigned long id = 0;
> > >       size_t n;
> > >       int ret = 0;
> > > -     XA_STATE(xas, &cache->reqs, 0);
> > > +     XA_STATE(xas, &cache->reqs, cache->req_id_next);
> > >
> > >       /*
> > > -      * Search for a request that has not ever been processed, to prevent
> > > -      * requests from being processed repeatedly.
> > > +      * Cyclically search for a request that has not ever been processed,
> > > +      * to prevent requests from being processed repeatedly, and make
> > > +      * request distribution fair.
> > >        */
> > >       xa_lock(&cache->reqs);
> > >       req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
> > > +     if (!req && cache->req_id_next > 0) {
> > > +             xas_set(&xas, 0);
> > > +             req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
> > > +     }
> > >       if (!req) {
> > >               xa_unlock(&cache->reqs);
> > >               return 0;
> > > @@ -260,6 +265,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
> > >       }
> > >
> > >       xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
> > > +     cache->req_id_next = xas.xa_index + 1;
> > >       xa_unlock(&cache->reqs);
> > >
> > >       id = xas.xa_index;
> > > --
> > > 2.25.1
> > >
> > > --
> > > Linux-cachefs mailing list
> > > Linux-cachefs@redhat.com
> > > https://listman.redhat.com/mailman/listinfo/linux-cachefs
> 
> --
> Linux-cachefs mailing list
> Linux-cachefs@redhat.com
> https://listman.redhat.com/mailman/listinfo/linux-cachefs
Jingbo Xu Aug. 17, 2022, 12:15 p.m. UTC | #4
On 8/17/22 2:52 PM, Xin Yin wrote:
> For now, enqueuing and dequeuing on-demand requests all start from
> idx 0, this makes request distribution unfair. In the weighty
> concurrent I/O scenario, the request stored in higher idx will starve.
> 
> Searching requests cyclically in cachefiles_ondemand_daemon_read,
> makes distribution fairer.
> 
> Reported-by: Yongqing Li <liyongqing@bytedance.com>
> Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
> ---
>  fs/cachefiles/internal.h |  1 +
>  fs/cachefiles/ondemand.c | 12 +++++++++---
>  2 files changed, 10 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
> index 6cba2c6de2f9..2ad58c465208 100644
> --- a/fs/cachefiles/internal.h
> +++ b/fs/cachefiles/internal.h
> @@ -111,6 +111,7 @@ struct cachefiles_cache {
>  	char				*tag;		/* cache binding tag */
>  	refcount_t			unbind_pincount;/* refcount to do daemon unbind */
>  	struct xarray			reqs;		/* xarray of pending on-demand requests */
> +	unsigned long			req_id_next;
>  	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
>  	u32				ondemand_id_next;
>  };
> diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
> index 1fee702d5529..247961d65369 100644
> --- a/fs/cachefiles/ondemand.c
> +++ b/fs/cachefiles/ondemand.c
> @@ -238,14 +238,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>  	unsigned long id = 0;
>  	size_t n;
>  	int ret = 0;
> -	XA_STATE(xas, &cache->reqs, 0);
> +	XA_STATE(xas, &cache->reqs, cache->req_id_next);
>  
>  	/*
> -	 * Search for a request that has not ever been processed, to prevent
> -	 * requests from being processed repeatedly.
> +	 * Cyclically search for a request that has not ever been processed,
> +	 * to prevent requests from being processed repeatedly, and make
> +	 * request distribution fair.
>  	 */
>  	xa_lock(&cache->reqs);
>  	req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
> +	if (!req && cache->req_id_next > 0) {
> +		xas_set(&xas, 0);> +		req = xas_find_marked(&xas, cache->req_id_next - 1,
CACHEFILES_REQ_NEW);


LGTM.

Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>

> +	}
>  	if (!req) {
>  		xa_unlock(&cache->reqs);
>  		return 0;
> @@ -260,6 +265,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
>  	}
>  
>  	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
> +	cache->req_id_next = xas.xa_index + 1;
>  	xa_unlock(&cache->reqs);
>  
>  	id = xas.xa_index;
David Howells Aug. 24, 2022, 10:24 a.m. UTC | #5
Gao Xiang <hsiangkao@linux.alibaba.com> wrote:

> If David is fine with "req_id_next", I'm okay with that as well.

I can live with it.

Did you want to give me an R-b line?

David
David Howells Aug. 24, 2022, 10:25 a.m. UTC | #6
Xin Yin <yinxin.x@bytedance.com> wrote:

> Reported-by: Yongqing Li <liyongqing@bytedance.com>
> Signed-off-by: Xin Yin <yinxin.x@bytedance.com>

Can you give me a Fixes: line please?

David
Gao Xiang Aug. 24, 2022, 11 a.m. UTC | #7
Hi David,

On Wed, Aug 24, 2022 at 11:24:29AM +0100, David Howells wrote:
> Gao Xiang <hsiangkao@linux.alibaba.com> wrote:
> 
> > If David is fine with "req_id_next", I'm okay with that as well.
> 
> I can live with it.
> 
> Did you want to give me an R-b line?

Yeah, thanks, much appreciated!  As far as I know, such unfairness can
cause starvation in Bytedance's test environment, it would be better
to fix it as above.

Thanks,
Gao Xiang

> 
> David
Xin Yin Aug. 25, 2022, 1:36 a.m. UTC | #8
On Wed, Aug 24, 2022 at 6:25 PM David Howells <dhowells@redhat.com> wrote:
>
> Xin Yin <yinxin.x@bytedance.com> wrote:
>
> > Reported-by: Yongqing Li <liyongqing@bytedance.com>
> > Signed-off-by: Xin Yin <yinxin.x@bytedance.com>
>
> Can you give me a Fixes: line please?
>
Sure , I will send a V2 patch and add the Fixes line.

Thanks
Xin Yin

> David
>
David Howells Aug. 25, 2022, 2:34 p.m. UTC | #9
Xin Yin <yinxin.x@bytedance.com> wrote:

> > Can you give me a Fixes: line please?
> >
> Sure , I will send a V2 patch and add the Fixes line.

Just giving me a Fixes line would do.  I can insert it.

David
Xin Yin Aug. 26, 2022, 1:37 a.m. UTC | #10
On Thu, Aug 25, 2022 at 10:34 PM David Howells <dhowells@redhat.com> wrote:
>
> Xin Yin <yinxin.x@bytedance.com> wrote:
>
> > > Can you give me a Fixes: line please?
> > >
> > Sure , I will send a V2 patch and add the Fixes line.
>
> Just giving me a Fixes line would do.  I can insert it.
>

Yeah, many thanks , the fixes line would be:
Fixes: c8383054506c ("cachefiles: notify the user daemon when looking
up cookie")

Thanks,
Xin Yin

> David
>
diff mbox series

Patch

diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 6cba2c6de2f9..2ad58c465208 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -111,6 +111,7 @@  struct cachefiles_cache {
 	char				*tag;		/* cache binding tag */
 	refcount_t			unbind_pincount;/* refcount to do daemon unbind */
 	struct xarray			reqs;		/* xarray of pending on-demand requests */
+	unsigned long			req_id_next;
 	struct xarray			ondemand_ids;	/* xarray for ondemand_id allocation */
 	u32				ondemand_id_next;
 };
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 1fee702d5529..247961d65369 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -238,14 +238,19 @@  ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
 	unsigned long id = 0;
 	size_t n;
 	int ret = 0;
-	XA_STATE(xas, &cache->reqs, 0);
+	XA_STATE(xas, &cache->reqs, cache->req_id_next);
 
 	/*
-	 * Search for a request that has not ever been processed, to prevent
-	 * requests from being processed repeatedly.
+	 * Cyclically search for a request that has not ever been processed,
+	 * to prevent requests from being processed repeatedly, and make
+	 * request distribution fair.
 	 */
 	xa_lock(&cache->reqs);
 	req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
+	if (!req && cache->req_id_next > 0) {
+		xas_set(&xas, 0);
+		req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
+	}
 	if (!req) {
 		xa_unlock(&cache->reqs);
 		return 0;
@@ -260,6 +265,7 @@  ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
 	}
 
 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+	cache->req_id_next = xas.xa_index + 1;
 	xa_unlock(&cache->reqs);
 
 	id = xas.xa_index;