Message ID | 1490069087-4783-2-git-send-email-jasowang@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hello! On 3/21/2017 7:04 AM, Jason Wang wrote: > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { Hm, why not *for*? > + ptr = __ptr_ring_consume(r); > + if (!ptr) > + break; > + array[i++] = ptr; > + } > + > + return i; > +} > + > /* > * Note: resize (below) nests producer lock within consumer lock, so if you > * call this in interrupt or BH context, you must disable interrupts/BH when > @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) [...] MBR, Sergei
On 2017年03月21日 18:25, Sergei Shtylyov wrote: > Hello! > > On 3/21/2017 7:04 AM, Jason Wang wrote: > >> Signed-off-by: Jason Wang <jasowang@redhat.com> >> --- >> include/linux/ptr_ring.h | 65 >> ++++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 65 insertions(+) >> >> diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h >> index 6c70444..4771ded 100644 >> --- a/include/linux/ptr_ring.h >> +++ b/include/linux/ptr_ring.h >> @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct >> ptr_ring *r) >> return ptr; >> } >> >> +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + void *ptr; >> + int i = 0; >> + >> + while (i < n) { > > Hm, why not *for*? Yes, it maybe better, if there's other comment on the series, will change it in next version. Thanks
On Tue, Mar 21, 2017 at 12:04:40PM +0800, Jason Wang wrote: > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h > index 6c70444..4771ded 100644 > --- a/include/linux/ptr_ring.h > +++ b/include/linux/ptr_ring.h > @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) > return ptr; > } > > +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + void *ptr; > + int i = 0; > + > + while (i < n) { > + ptr = __ptr_ring_consume(r); > + if (!ptr) > + break; > + array[i++] = ptr; > + } > + > + return i; > +} > + > /* > * Note: resize (below) nests producer lock within consumer lock, so if you > * call this in interrupt or BH context, you must disable interrupts/BH when This ignores the comment above that function: /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). */ Also - it looks like it shouldn't matter if reads are reordered but I wonder. Thoughts? Including some reasoning about it in commit log would be nice. > @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) > return ptr; > } > > +static inline int ptr_ring_consume_batched(struct ptr_ring *r, > + void **array, int n) > +{ > + int ret; > + > + spin_lock(&r->consumer_lock); > + ret = __ptr_ring_consume_batched(r, array, n); > + spin_unlock(&r->consumer_lock); > + > + return ret; > +} > + > +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, > + void **array, int n) > +{ > + int ret; > + > + spin_lock_irq(&r->consumer_lock); > + ret = __ptr_ring_consume_batched(r, array, n); > + spin_unlock_irq(&r->consumer_lock); > + > + return ret; > +} > + > +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, > + void **array, int n) > +{ > + unsigned long flags; > + int ret; > + > + spin_lock_irqsave(&r->consumer_lock, flags); > + ret = __ptr_ring_consume_batched(r, array, n); > + spin_unlock_irqrestore(&r->consumer_lock, flags); > + > + return ret; > +} > + > +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, > + void **array, int n) > +{ > + int ret; > + > + spin_lock_bh(&r->consumer_lock); > + ret = __ptr_ring_consume_batched(r, array, n); > + spin_unlock_bh(&r->consumer_lock); > + > + return ret; > +} > + > /* Cast to structure type and call a function without discarding from FIFO. > * Function must return a value. > * Callers must take consumer_lock. > -- > 2.7.4
On 2017年03月22日 21:43, Michael S. Tsirkin wrote: > On Tue, Mar 21, 2017 at 12:04:40PM +0800, Jason Wang wrote: >> Signed-off-by: Jason Wang <jasowang@redhat.com> >> --- >> include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 65 insertions(+) >> >> diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h >> index 6c70444..4771ded 100644 >> --- a/include/linux/ptr_ring.h >> +++ b/include/linux/ptr_ring.h >> @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) >> return ptr; >> } >> >> +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + void *ptr; >> + int i = 0; >> + >> + while (i < n) { >> + ptr = __ptr_ring_consume(r); >> + if (!ptr) >> + break; >> + array[i++] = ptr; >> + } >> + >> + return i; >> +} >> + >> /* >> * Note: resize (below) nests producer lock within consumer lock, so if you >> * call this in interrupt or BH context, you must disable interrupts/BH when > > This ignores the comment above that function: > > /* Note: callers invoking this in a loop must use a compiler barrier, > * for example cpu_relax(). > */ Yes, __ptr_ring_swap_queue() ignores this too. > > Also - it looks like it shouldn't matter if reads are reordered but I wonder. > Thoughts? Including some reasoning about it in commit log would be nice. Yes, I think it doesn't matter in this case, it matters only for batched producing. Thanks > >> @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) >> return ptr; >> } >> >> +static inline int ptr_ring_consume_batched(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + int ret; >> + >> + spin_lock(&r->consumer_lock); >> + ret = __ptr_ring_consume_batched(r, array, n); >> + spin_unlock(&r->consumer_lock); >> + >> + return ret; >> +} >> + >> +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + int ret; >> + >> + spin_lock_irq(&r->consumer_lock); >> + ret = __ptr_ring_consume_batched(r, array, n); >> + spin_unlock_irq(&r->consumer_lock); >> + >> + return ret; >> +} >> + >> +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + unsigned long flags; >> + int ret; >> + >> + spin_lock_irqsave(&r->consumer_lock, flags); >> + ret = __ptr_ring_consume_batched(r, array, n); >> + spin_unlock_irqrestore(&r->consumer_lock, flags); >> + >> + return ret; >> +} >> + >> +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, >> + void **array, int n) >> +{ >> + int ret; >> + >> + spin_lock_bh(&r->consumer_lock); >> + ret = __ptr_ring_consume_batched(r, array, n); >> + spin_unlock_bh(&r->consumer_lock); >> + >> + return ret; >> +} >> + >> /* Cast to structure type and call a function without discarding from FIFO. >> * Function must return a value. >> * Callers must take consumer_lock. >> -- >> 2.7.4
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6c70444..4771ded 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -247,6 +247,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +static inline int __ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + void *ptr; + int i = 0; + + while (i < n) { + ptr = __ptr_ring_consume(r); + if (!ptr) + break; + array[i++] = ptr; + } + + return i; +} + /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when @@ -297,6 +313,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) return ptr; } +static inline int ptr_ring_consume_batched(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_irq(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irq(&r->consumer_lock); + + return ret; +} + +static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, + void **array, int n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&r->consumer_lock, flags); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_irqrestore(&r->consumer_lock, flags); + + return ret; +} + +static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, + void **array, int n) +{ + int ret; + + spin_lock_bh(&r->consumer_lock); + ret = __ptr_ring_consume_batched(r, array, n); + spin_unlock_bh(&r->consumer_lock); + + return ret; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock.
Signed-off-by: Jason Wang <jasowang@redhat.com> --- include/linux/ptr_ring.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+)