diff mbox series

[3/6] drm/gud: Don't retry a failed framebuffer flush

Message ID 20221122-gud-shadow-plane-v1-3-9de3afa3383e@tronnes.org (mailing list archive)
State New, archived
Headers show
Series drm/gud: Use the shadow plane helper | expand

Commit Message

Noralf Tr��nnes via B4 Submission Endpoint Nov. 22, 2022, 8:58 p.m. UTC
From: Noralf Trønnes <noralf@tronnes.org>

If a framebuffer flush fails the driver will do one retry by requeing the
worker. Currently the worker is used even for synchronous flushing, but a
later patch will inline it, so this needs to change. Thinking about how to
solve this I came to the conclusion that this retry mechanism was a fix
for a problem that was only in the mind of the developer (me) and not
something that solved a real problem.

So let's remove this for now and revisit later should it become necessary.
gud_add_damage() has now only one caller so it can be inlined.

Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
---
 drivers/gpu/drm/gud/gud_pipe.c | 48 +++++++-----------------------------------
 1 file changed, 8 insertions(+), 40 deletions(-)

Comments

Javier Martinez Canillas Nov. 23, 2022, 8:38 a.m. UTC | #1
On 11/22/22 21:58, Noralf Trønnes via B4 Submission Endpoint wrote:
> From: Noralf Trønnes <noralf@tronnes.org>
> 
> If a framebuffer flush fails the driver will do one retry by requeing the
> worker. Currently the worker is used even for synchronous flushing, but a
> later patch will inline it, so this needs to change. Thinking about how to
> solve this I came to the conclusion that this retry mechanism was a fix
> for a problem that was only in the mind of the developer (me) and not
> something that solved a real problem.
> 
> So let's remove this for now and revisit later should it become necessary.
> gud_add_damage() has now only one caller so it can be inlined.
>

Makes sense.
 
> Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
> ---
>  drivers/gpu/drm/gud/gud_pipe.c | 48 +++++++-----------------------------------
>  1 file changed, 8 insertions(+), 40 deletions(-)

Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Thomas Zimmermann Nov. 23, 2022, 9:25 a.m. UTC | #2
Am 22.11.22 um 21:58 schrieb Noralf Trønnes via B4 Submission Endpoint:
> From: Noralf Trønnes <noralf@tronnes.org>
> 
> If a framebuffer flush fails the driver will do one retry by requeing the
> worker. Currently the worker is used even for synchronous flushing, but a
> later patch will inline it, so this needs to change. Thinking about how to
> solve this I came to the conclusion that this retry mechanism was a fix
> for a problem that was only in the mind of the developer (me) and not
> something that solved a real problem.
> 
> So let's remove this for now and revisit later should it become necessary.
> gud_add_damage() has now only one caller so it can be inlined.
> 
> Signed-off-by: Noralf Trønnes <noralf@tronnes.org>

Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>

> ---
>   drivers/gpu/drm/gud/gud_pipe.c | 48 +++++++-----------------------------------
>   1 file changed, 8 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
> index 61f4abaf1811..ff1358815af5 100644
> --- a/drivers/gpu/drm/gud/gud_pipe.c
> +++ b/drivers/gpu/drm/gud/gud_pipe.c
> @@ -333,37 +333,6 @@ void gud_clear_damage(struct gud_device *gdrm)
>   	gdrm->damage.y2 = 0;
>   }
>   
> -static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
> -{
> -	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
> -	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
> -	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
> -	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
> -}
> -
> -static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
> -				   struct drm_rect *damage)
> -{
> -	/*
> -	 * pipe_update waits for the worker when the display mode is going to change.
> -	 * This ensures that the width and height is still the same making it safe to
> -	 * add back the damage.
> -	 */
> -
> -	mutex_lock(&gdrm->damage_lock);
> -	if (!gdrm->fb) {
> -		drm_framebuffer_get(fb);
> -		gdrm->fb = fb;
> -	}
> -	gud_add_damage(gdrm, damage);
> -	mutex_unlock(&gdrm->damage_lock);
> -
> -	/* Retry only once to avoid a possible storm in case of continues errors. */
> -	if (!gdrm->prev_flush_failed)
> -		queue_work(system_long_wq, &gdrm->work);
> -	gdrm->prev_flush_failed = true;
> -}
> -
>   void gud_flush_work(struct work_struct *work)
>   {
>   	struct gud_device *gdrm = container_of(work, struct gud_device, work);
> @@ -407,14 +376,10 @@ void gud_flush_work(struct work_struct *work)
>   		ret = gud_flush_rect(gdrm, fb, format, &rect);
>   		if (ret) {
>   			if (ret != -ENODEV && ret != -ECONNRESET &&
> -			    ret != -ESHUTDOWN && ret != -EPROTO) {
> -				bool prev_flush_failed = gdrm->prev_flush_failed;
> -
> -				gud_retry_failed_flush(gdrm, fb, &damage);
> -				if (!prev_flush_failed)
> -					dev_err_ratelimited(fb->dev->dev,
> -							    "Failed to flush framebuffer: error=%d\n", ret);
> -			}
> +			    ret != -ESHUTDOWN && ret != -EPROTO)
> +				dev_err_ratelimited(fb->dev->dev,
> +						    "Failed to flush framebuffer: error=%d\n", ret);
> +			gdrm->prev_flush_failed = true;
>   			break;
>   		}
>   
> @@ -439,7 +404,10 @@ static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer
>   		gdrm->fb = fb;
>   	}
>   
> -	gud_add_damage(gdrm, damage);
> +	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
> +	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
> +	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
> +	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
>   
>   	mutex_unlock(&gdrm->damage_lock);
>   
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 61f4abaf1811..ff1358815af5 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -333,37 +333,6 @@  void gud_clear_damage(struct gud_device *gdrm)
 	gdrm->damage.y2 = 0;
 }
 
-static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
-{
-	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
-	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
-	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
-	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
-}
-
-static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
-				   struct drm_rect *damage)
-{
-	/*
-	 * pipe_update waits for the worker when the display mode is going to change.
-	 * This ensures that the width and height is still the same making it safe to
-	 * add back the damage.
-	 */
-
-	mutex_lock(&gdrm->damage_lock);
-	if (!gdrm->fb) {
-		drm_framebuffer_get(fb);
-		gdrm->fb = fb;
-	}
-	gud_add_damage(gdrm, damage);
-	mutex_unlock(&gdrm->damage_lock);
-
-	/* Retry only once to avoid a possible storm in case of continues errors. */
-	if (!gdrm->prev_flush_failed)
-		queue_work(system_long_wq, &gdrm->work);
-	gdrm->prev_flush_failed = true;
-}
-
 void gud_flush_work(struct work_struct *work)
 {
 	struct gud_device *gdrm = container_of(work, struct gud_device, work);
@@ -407,14 +376,10 @@  void gud_flush_work(struct work_struct *work)
 		ret = gud_flush_rect(gdrm, fb, format, &rect);
 		if (ret) {
 			if (ret != -ENODEV && ret != -ECONNRESET &&
-			    ret != -ESHUTDOWN && ret != -EPROTO) {
-				bool prev_flush_failed = gdrm->prev_flush_failed;
-
-				gud_retry_failed_flush(gdrm, fb, &damage);
-				if (!prev_flush_failed)
-					dev_err_ratelimited(fb->dev->dev,
-							    "Failed to flush framebuffer: error=%d\n", ret);
-			}
+			    ret != -ESHUTDOWN && ret != -EPROTO)
+				dev_err_ratelimited(fb->dev->dev,
+						    "Failed to flush framebuffer: error=%d\n", ret);
+			gdrm->prev_flush_failed = true;
 			break;
 		}
 
@@ -439,7 +404,10 @@  static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer
 		gdrm->fb = fb;
 	}
 
-	gud_add_damage(gdrm, damage);
+	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
+	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
+	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
+	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
 
 	mutex_unlock(&gdrm->damage_lock);