diff mbox series

drm/i915/perf: Do not clear pollin for small user read buffers

Message ID 20200327231608.68108-1-ashutosh.dixit@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/perf: Do not clear pollin for small user read buffers | expand

Commit Message

Dixit, Ashutosh March 27, 2020, 11:16 p.m. UTC
It is wrong to block the user thread in the next poll when OA data is
already available which could not fit in the user buffer provided in
the previous read. In several cases the exact user buffer size is not
known. Blocking user space in poll can lead to data loss when the
buffer size used is smaller than the available data.

This change fixes this issue and allows user space to read all OA data
even when using a buffer size smaller than the available data using
multiple non-blocking reads rather than staying blocked in poll till
the next timer interrupt.

v2: Fix ret value for blocking reads (Umesh)
v3: Mistake during patch send (Ashutosh)
v4: Remove -EAGAIN from comment (Umesh)

Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
 drivers/gpu/drm/i915/i915_perf.c | 59 +++++++-------------------------
 1 file changed, 12 insertions(+), 47 deletions(-)

Comments

Lionel Landwerlin March 30, 2020, 8:23 a.m. UTC | #1
On 28/03/2020 01:16, Ashutosh Dixit wrote:
> It is wrong to block the user thread in the next poll when OA data is
> already available which could not fit in the user buffer provided in
> the previous read. In several cases the exact user buffer size is not
> known. Blocking user space in poll can lead to data loss when the
> buffer size used is smaller than the available data.
>
> This change fixes this issue and allows user space to read all OA data
> even when using a buffer size smaller than the available data using
> multiple non-blocking reads rather than staying blocked in poll till
> the next timer interrupt.
>
> v2: Fix ret value for blocking reads (Umesh)
> v3: Mistake during patch send (Ashutosh)
> v4: Remove -EAGAIN from comment (Umesh)
>
> Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>


Looks like you change makes more sense than what I suggested.

I have a few nits below.


Thanks,


-Lionel


> ---
>   drivers/gpu/drm/i915/i915_perf.c | 59 +++++++-------------------------
>   1 file changed, 12 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index c74ebac50015..5f6d9bff99c8 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -2914,49 +2914,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
>   		gen8_update_reg_state_unlocked(ce, stream);
>   }
>   
> -/**
> - * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
> - * @stream: An i915 perf stream
> - * @file: An i915 perf stream file
> - * @buf: destination buffer given by userspace
> - * @count: the number of bytes userspace wants to read
> - * @ppos: (inout) file seek position (unused)
> - *
> - * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
> - * ensure that if we've successfully copied any data then reporting that takes
> - * precedence over any internal error status, so the data isn't lost.
> - *
> - * For example ret will be -ENOSPC whenever there is more buffered data than
> - * can be copied to userspace, but that's only interesting if we weren't able
> - * to copy some data because it implies the userspace buffer is too small to
> - * receive a single record (and we never split records).
> - *
> - * Another case with ret == -EFAULT is more of a grey area since it would seem
> - * like bad form for userspace to ask us to overrun its buffer, but the user
> - * knows best:
> - *
> - *   http://yarchive.net/comp/linux/partial_reads_writes.html
> - *
> - * Returns: The number of bytes copied or a negative error code on failure.
> - */
> -static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
> -				     struct file *file,
> -				     char __user *buf,
> -				     size_t count,
> -				     loff_t *ppos)
> -{
> -	/* Note we keep the offset (aka bytes read) separate from any
> -	 * error status so that the final check for whether we return
> -	 * the bytes read with a higher precedence than any error (see
> -	 * comment below) doesn't need to be handled/duplicated in
> -	 * stream->ops->read() implementations.
> -	 */
> -	size_t offset = 0;
> -	int ret = stream->ops->read(stream, buf, count, &offset);
> -
> -	return offset ?: (ret ?: -EAGAIN);
> -}
> -
>   /**
>    * i915_perf_read - handles read() FOP for i915 perf stream FDs
>    * @file: An i915 perf stream file
> @@ -2982,6 +2939,8 @@ static ssize_t i915_perf_read(struct file *file,
>   {
>   	struct i915_perf_stream *stream = file->private_data;
>   	struct i915_perf *perf = stream->perf;
> +	size_t offset = 0;
> +	int __ret;
>   	ssize_t ret;
>   
>   	/* To ensure it's handled consistently we simply treat all reads of a
> @@ -3005,16 +2964,19 @@ static ssize_t i915_perf_read(struct file *file,
>   				return ret;
>   
>   			mutex_lock(&perf->lock);
> -			ret = i915_perf_read_locked(stream, file,
> -						    buf, count, ppos);
> +			__ret = stream->ops->read(stream, buf, count, &offset);
> +			ret = offset ?: (__ret ?: -EAGAIN);
I would drop this line above and move it to the end of the function.
>   			mutex_unlock(&perf->lock);
>   		} while (ret == -EAGAIN);
>   	} else {
>   		mutex_lock(&perf->lock);
> -		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
> +		__ret = stream->ops->read(stream, buf, count, &offset);
> +		ret = offset ?: (__ret ?: -EAGAIN);
I would drop this line above and move it to the end of the function.
>   		mutex_unlock(&perf->lock);
>   	}
>   
> +	/* Possible values for __ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
> +
>   	/* We allow the poll checking to sometimes report false positive EPOLLIN
>   	 * events where we might actually report EAGAIN on read() if there's
>   	 * not really any data available. In this situation though we don't
> @@ -3022,8 +2984,11 @@ static ssize_t i915_perf_read(struct file *file,
>   	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
>   	 * effectively ensures we back off until the next hrtimer callback
>   	 * before reporting another EPOLLIN event.
> +	 * The exception to this is if ops->read() returned -ENOSPC which means
> +	 * that more OA data is available than could fit in the user provided
> +	 * buffer. In this case we want the next poll() call to not block.
>   	 */
> -	if (ret >= 0 || ret == -EAGAIN)
> +	if ((ret > 0 || ret == -EAGAIN) && __ret != -ENOSPC)

I guess this could be simplified with

if (__ret != -ENOSPC)


As far as I can see in all other cases (failure of some kind, all data 
read), we should clear pollin.

>   		stream->pollin = false;
>   
>   	return ret;

return offset ?: (__ret ?: -EAGAIN);


You could probably just retain a single ret variable.
Dixit, Ashutosh March 31, 2020, 5:47 a.m. UTC | #2
On Mon, 30 Mar 2020 01:23:29 -0700, Lionel Landwerlin wrote:
>
> On 28/03/2020 01:16, Ashutosh Dixit wrote:
> > It is wrong to block the user thread in the next poll when OA data is
> > already available which could not fit in the user buffer provided in
> > the previous read. In several cases the exact user buffer size is not
> > known. Blocking user space in poll can lead to data loss when the
> > buffer size used is smaller than the available data.
> >
> > This change fixes this issue and allows user space to read all OA data
> > even when using a buffer size smaller than the available data using
> > multiple non-blocking reads rather than staying blocked in poll till
> > the next timer interrupt.
> >
> > v2: Fix ret value for blocking reads (Umesh)
> > v3: Mistake during patch send (Ashutosh)
> > v4: Remove -EAGAIN from comment (Umesh)
> >
> > Cc: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
> > Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> > Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
>
> Looks like you change makes more sense than what I suggested.
>
> I have a few nits below.

Thanks Lionel, most of what you suggested made sense so I have made those
changes and submitted a v5. Please take a look. More comments below.

>
> Thanks,
>
> -Lionel
>
>
> > ---
> >   drivers/gpu/drm/i915/i915_perf.c | 59 +++++++-------------------------
> >   1 file changed, 12 insertions(+), 47 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> > index c74ebac50015..5f6d9bff99c8 100644
> > --- a/drivers/gpu/drm/i915/i915_perf.c
> > +++ b/drivers/gpu/drm/i915/i915_perf.c
> > @@ -2914,49 +2914,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
> >		gen8_update_reg_state_unlocked(ce, stream);
> >   }
> >   -/**
> > - * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
> > - * @stream: An i915 perf stream
> > - * @file: An i915 perf stream file
> > - * @buf: destination buffer given by userspace
> > - * @count: the number of bytes userspace wants to read
> > - * @ppos: (inout) file seek position (unused)
> > - *
> > - * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
> > - * ensure that if we've successfully copied any data then reporting that takes
> > - * precedence over any internal error status, so the data isn't lost.
> > - *
> > - * For example ret will be -ENOSPC whenever there is more buffered data than
> > - * can be copied to userspace, but that's only interesting if we weren't able
> > - * to copy some data because it implies the userspace buffer is too small to
> > - * receive a single record (and we never split records).
> > - *
> > - * Another case with ret == -EFAULT is more of a grey area since it would seem
> > - * like bad form for userspace to ask us to overrun its buffer, but the user
> > - * knows best:
> > - *
> > - *   http://yarchive.net/comp/linux/partial_reads_writes.html
> > - *
> > - * Returns: The number of bytes copied or a negative error code on failure.
> > - */
> > -static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
> > -				     struct file *file,
> > -				     char __user *buf,
> > -				     size_t count,
> > -				     loff_t *ppos)
> > -{
> > -	/* Note we keep the offset (aka bytes read) separate from any
> > -	 * error status so that the final check for whether we return
> > -	 * the bytes read with a higher precedence than any error (see
> > -	 * comment below) doesn't need to be handled/duplicated in
> > -	 * stream->ops->read() implementations.
> > -	 */
> > -	size_t offset = 0;
> > -	int ret = stream->ops->read(stream, buf, count, &offset);
> > -
> > -	return offset ?: (ret ?: -EAGAIN);
> > -}
> > -
> >   /**
> >    * i915_perf_read - handles read() FOP for i915 perf stream FDs
> >    * @file: An i915 perf stream file
> > @@ -2982,6 +2939,8 @@ static ssize_t i915_perf_read(struct file *file,
> >   {
> >	struct i915_perf_stream *stream = file->private_data;
> >	struct i915_perf *perf = stream->perf;
> > +	size_t offset = 0;
> > +	int __ret;
> >	ssize_t ret;
> >		/* To ensure it's handled consistently we simply treat all reads of
> > a
> > @@ -3005,16 +2964,19 @@ static ssize_t i915_perf_read(struct file *file,
> >				return ret;
> >				mutex_lock(&perf->lock);
> > -			ret = i915_perf_read_locked(stream, file,
> > -						    buf, count, ppos);
> > +			__ret = stream->ops->read(stream, buf, count, &offset);
> > +			ret = offset ?: (__ret ?: -EAGAIN);

> I would drop this line above and move it to the end of the function.

Unfortunately, Umesh pointed this out, that can't be done because ret is
used in the loop (do { } while (ret == -EAGAIN); ).

> >			mutex_unlock(&perf->lock);
> >		} while (ret == -EAGAIN);
> >	} else {
> >		mutex_lock(&perf->lock);
> > -		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
> > +		__ret = stream->ops->read(stream, buf, count, &offset);
> > +		ret = offset ?: (__ret ?: -EAGAIN);

> I would drop this line above and move it to the end of the function.

Done.

> >		mutex_unlock(&perf->lock);
> >	}
> >   +	/* Possible values for __ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
> > +
> >	/* We allow the poll checking to sometimes report false positive EPOLLIN
> >	 * events where we might actually report EAGAIN on read() if there's
> >	 * not really any data available. In this situation though we don't
> > @@ -3022,8 +2984,11 @@ static ssize_t i915_perf_read(struct file *file,
> >	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
> >	 * effectively ensures we back off until the next hrtimer callback
> >	 * before reporting another EPOLLIN event.
> > +	 * The exception to this is if ops->read() returned -ENOSPC which means
> > +	 * that more OA data is available than could fit in the user provided
> > +	 * buffer. In this case we want the next poll() call to not block.
> >	 */
> > -	if (ret >= 0 || ret == -EAGAIN)
> > +	if ((ret > 0 || ret == -EAGAIN) && __ret != -ENOSPC)
>
> I guess this could be simplified with
>
> if (__ret != -ENOSPC)
>
> As far as I can see in all other cases (failure of some kind, all data
> read), we should clear pollin.

Done.

>
> >		stream->pollin = false;
> >		return ret;
>
> return offset ?: (__ret ?: -EAGAIN);
>
> You could probably just retain a single ret variable.

Done, mostly. There is a second ret in a restricted scope due to the reason
mentioned above (ret in the loop).
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index c74ebac50015..5f6d9bff99c8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2914,49 +2914,6 @@  void i915_oa_init_reg_state(const struct intel_context *ce,
 		gen8_update_reg_state_unlocked(ce, stream);
 }
 
-/**
- * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
- * @stream: An i915 perf stream
- * @file: An i915 perf stream file
- * @buf: destination buffer given by userspace
- * @count: the number of bytes userspace wants to read
- * @ppos: (inout) file seek position (unused)
- *
- * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
- * ensure that if we've successfully copied any data then reporting that takes
- * precedence over any internal error status, so the data isn't lost.
- *
- * For example ret will be -ENOSPC whenever there is more buffered data than
- * can be copied to userspace, but that's only interesting if we weren't able
- * to copy some data because it implies the userspace buffer is too small to
- * receive a single record (and we never split records).
- *
- * Another case with ret == -EFAULT is more of a grey area since it would seem
- * like bad form for userspace to ask us to overrun its buffer, but the user
- * knows best:
- *
- *   http://yarchive.net/comp/linux/partial_reads_writes.html
- *
- * Returns: The number of bytes copied or a negative error code on failure.
- */
-static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
-				     struct file *file,
-				     char __user *buf,
-				     size_t count,
-				     loff_t *ppos)
-{
-	/* Note we keep the offset (aka bytes read) separate from any
-	 * error status so that the final check for whether we return
-	 * the bytes read with a higher precedence than any error (see
-	 * comment below) doesn't need to be handled/duplicated in
-	 * stream->ops->read() implementations.
-	 */
-	size_t offset = 0;
-	int ret = stream->ops->read(stream, buf, count, &offset);
-
-	return offset ?: (ret ?: -EAGAIN);
-}
-
 /**
  * i915_perf_read - handles read() FOP for i915 perf stream FDs
  * @file: An i915 perf stream file
@@ -2982,6 +2939,8 @@  static ssize_t i915_perf_read(struct file *file,
 {
 	struct i915_perf_stream *stream = file->private_data;
 	struct i915_perf *perf = stream->perf;
+	size_t offset = 0;
+	int __ret;
 	ssize_t ret;
 
 	/* To ensure it's handled consistently we simply treat all reads of a
@@ -3005,16 +2964,19 @@  static ssize_t i915_perf_read(struct file *file,
 				return ret;
 
 			mutex_lock(&perf->lock);
-			ret = i915_perf_read_locked(stream, file,
-						    buf, count, ppos);
+			__ret = stream->ops->read(stream, buf, count, &offset);
+			ret = offset ?: (__ret ?: -EAGAIN);
 			mutex_unlock(&perf->lock);
 		} while (ret == -EAGAIN);
 	} else {
 		mutex_lock(&perf->lock);
-		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
+		__ret = stream->ops->read(stream, buf, count, &offset);
+		ret = offset ?: (__ret ?: -EAGAIN);
 		mutex_unlock(&perf->lock);
 	}
 
+	/* Possible values for __ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
+
 	/* We allow the poll checking to sometimes report false positive EPOLLIN
 	 * events where we might actually report EAGAIN on read() if there's
 	 * not really any data available. In this situation though we don't
@@ -3022,8 +2984,11 @@  static ssize_t i915_perf_read(struct file *file,
 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
 	 * effectively ensures we back off until the next hrtimer callback
 	 * before reporting another EPOLLIN event.
+	 * The exception to this is if ops->read() returned -ENOSPC which means
+	 * that more OA data is available than could fit in the user provided
+	 * buffer. In this case we want the next poll() call to not block.
 	 */
-	if (ret >= 0 || ret == -EAGAIN)
+	if ((ret > 0 || ret == -EAGAIN) && __ret != -ENOSPC)
 		stream->pollin = false;
 
 	return ret;