diff mbox

[10/11] drm/i915: Use a preemption timeout to enforce interactivity

Message ID 20180326115044.2505-11-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson March 26, 2018, 11:50 a.m. UTC
Use a liberal timeout of 20ms to ensure that the rendering for an
interactive pageflip is started in a timely fashion, and that
user interaction is not blocked by GPU, or CPU, hogs. This is at the cost
of resetting whoever was blocking the preemption, likely leading to that
context/process being banned from submitting future requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h      |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c      | 18 ++++++++++--------
 drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++++-
 3 files changed, 29 insertions(+), 10 deletions(-)

Comments

Tvrtko Ursulin March 27, 2018, 8:35 a.m. UTC | #1
On 26/03/2018 12:50, Chris Wilson wrote:
> Use a liberal timeout of 20ms to ensure that the rendering for an
> interactive pageflip is started in a timely fashion, and that
> user interaction is not blocked by GPU, or CPU, hogs. This is at the cost
> of resetting whoever was blocking the preemption, likely leading to that
> context/process being banned from submitting future requests.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_drv.h      |  3 ++-
>   drivers/gpu/drm/i915/i915_gem.c      | 18 ++++++++++--------
>   drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++++-
>   3 files changed, 29 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 800230ba1c3b..87388feb973d 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3150,8 +3150,9 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>   			 struct intel_rps_client *rps);
>   int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   				  unsigned int flags,
> -				  int priority);
> +				  int priority, unsigned int timeout);
>   #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
> +#define I915_PREEMPTION_TIMEOUT_DISPLAY (20 * 1000 * 1000) /* 20 ms */
>   
>   int __must_check
>   i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 675d6bb59337..252c6b58e739 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -469,7 +469,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
>   	return timeout;
>   }
>   
> -static void __fence_set_priority(struct dma_fence *fence, int prio)
> +static void __fence_set_priority(struct dma_fence *fence,
> +				 int prio, unsigned int timeout)
>   {
>   	struct i915_request *rq;
>   	struct intel_engine_cs *engine;
> @@ -482,11 +483,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
>   
>   	rcu_read_lock();
>   	if (engine->schedule)
> -		engine->schedule(rq, prio, 0);
> +		engine->schedule(rq, prio, timeout);
>   	rcu_read_unlock();
>   }
>   
> -static void fence_set_priority(struct dma_fence *fence, int prio)
> +static void fence_set_priority(struct dma_fence *fence,
> +			       int prio, unsigned int timeout)
>   {
>   	/* Recurse once into a fence-array */
>   	if (dma_fence_is_array(fence)) {
> @@ -494,16 +496,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
>   		int i;
>   
>   		for (i = 0; i < array->num_fences; i++)
> -			__fence_set_priority(array->fences[i], prio);
> +			__fence_set_priority(array->fences[i], prio, timeout);
>   	} else {
> -		__fence_set_priority(fence, prio);
> +		__fence_set_priority(fence, prio, timeout);
>   	}
>   }
>   
>   int
>   i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			      unsigned int flags,
> -			      int prio)
> +			      int prio, unsigned int timeout)
>   {
>   	struct dma_fence *excl;
>   
> @@ -518,7 +520,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			return ret;
>   
>   		for (i = 0; i < count; i++) {
> -			fence_set_priority(shared[i], prio);
> +			fence_set_priority(shared[i], prio, timeout);
>   			dma_fence_put(shared[i]);
>   		}
>   
> @@ -528,7 +530,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   	}
>   
>   	if (excl) {
> -		fence_set_priority(excl, prio);
> +		fence_set_priority(excl, prio, timeout);
>   		dma_fence_put(excl);
>   	}
>   	return 0;
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 4c30c7c04f9c..830f2d4e540f 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12786,7 +12786,23 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>   
>   	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
>   
> -	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
> +	/*
> +	 * Reschedule our dependencies, and ensure we run within a timeout.
> +	 *
> +	 * Note that if the timeout is exceeded, then whoever was running that
> +	 * prevented us from acquiring the GPU is declared rogue and reset. An
> +	 * unresponsive process will then be banned in order to preserve
> +	 * interactivity. Since this can be seen as a bit heavy-handed, we
> +	 * select a timeout for when the dropped frames start to become a
> +	 * noticeable nuisance for the user (20 ms, i.e. preemption was blocked
> +	 * for more than a frame). Note, this is only a timeout for a delay in
> +	 * preempting the current request in order to run our dependency chain,
> +	 * our dependency chain may itself take a long time to run to completion
> +	 * before we can present the framebuffer.
> +	 */
> +	i915_gem_object_wait_priority(obj, 0,
> +				      I915_PRIORITY_DISPLAY,
> +				      I915_PREEMPTION_TIMEOUT_DISPLAY);

API signature changes to allow timeouts are fine, but I think this hunk 
should be separate patch at the end of the series. (Since it has the 
potential to make things which used to work, albeit stutteringly (?), 
start crashing.

Regards,

Tvrtko

>   
>   	mutex_unlock(&dev_priv->drm.struct_mutex);
>   	i915_gem_object_unpin_pages(obj);
>
Chris Wilson March 27, 2018, 8:39 a.m. UTC | #2
Quoting Tvrtko Ursulin (2018-03-27 09:35:17)
> 
> On 26/03/2018 12:50, Chris Wilson wrote:
> > Use a liberal timeout of 20ms to ensure that the rendering for an
> > interactive pageflip is started in a timely fashion, and that
> > user interaction is not blocked by GPU, or CPU, hogs. This is at the cost
> > of resetting whoever was blocking the preemption, likely leading to that
> > context/process being banned from submitting future requests.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/i915_drv.h      |  3 ++-
> >   drivers/gpu/drm/i915/i915_gem.c      | 18 ++++++++++--------
> >   drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++++-
> >   3 files changed, 29 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index 800230ba1c3b..87388feb973d 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -3150,8 +3150,9 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
> >                        struct intel_rps_client *rps);
> >   int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> >                                 unsigned int flags,
> > -                               int priority);
> > +                               int priority, unsigned int timeout);
> >   #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
> > +#define I915_PREEMPTION_TIMEOUT_DISPLAY (20 * 1000 * 1000) /* 20 ms */
> >   
> >   int __must_check
> >   i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 675d6bb59337..252c6b58e739 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -469,7 +469,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
> >       return timeout;
> >   }
> >   
> > -static void __fence_set_priority(struct dma_fence *fence, int prio)
> > +static void __fence_set_priority(struct dma_fence *fence,
> > +                              int prio, unsigned int timeout)
> >   {
> >       struct i915_request *rq;
> >       struct intel_engine_cs *engine;
> > @@ -482,11 +483,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
> >   
> >       rcu_read_lock();
> >       if (engine->schedule)
> > -             engine->schedule(rq, prio, 0);
> > +             engine->schedule(rq, prio, timeout);
> >       rcu_read_unlock();
> >   }
> >   
> > -static void fence_set_priority(struct dma_fence *fence, int prio)
> > +static void fence_set_priority(struct dma_fence *fence,
> > +                            int prio, unsigned int timeout)
> >   {
> >       /* Recurse once into a fence-array */
> >       if (dma_fence_is_array(fence)) {
> > @@ -494,16 +496,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
> >               int i;
> >   
> >               for (i = 0; i < array->num_fences; i++)
> > -                     __fence_set_priority(array->fences[i], prio);
> > +                     __fence_set_priority(array->fences[i], prio, timeout);
> >       } else {
> > -             __fence_set_priority(fence, prio);
> > +             __fence_set_priority(fence, prio, timeout);
> >       }
> >   }
> >   
> >   int
> >   i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> >                             unsigned int flags,
> > -                           int prio)
> > +                           int prio, unsigned int timeout)
> >   {
> >       struct dma_fence *excl;
> >   
> > @@ -518,7 +520,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> >                       return ret;
> >   
> >               for (i = 0; i < count; i++) {
> > -                     fence_set_priority(shared[i], prio);
> > +                     fence_set_priority(shared[i], prio, timeout);
> >                       dma_fence_put(shared[i]);
> >               }
> >   
> > @@ -528,7 +530,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> >       }
> >   
> >       if (excl) {
> > -             fence_set_priority(excl, prio);
> > +             fence_set_priority(excl, prio, timeout);
> >               dma_fence_put(excl);
> >       }
> >       return 0;
> > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> > index 4c30c7c04f9c..830f2d4e540f 100644
> > --- a/drivers/gpu/drm/i915/intel_display.c
> > +++ b/drivers/gpu/drm/i915/intel_display.c
> > @@ -12786,7 +12786,23 @@ intel_prepare_plane_fb(struct drm_plane *plane,
> >   
> >       ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
> >   
> > -     i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
> > +     /*
> > +      * Reschedule our dependencies, and ensure we run within a timeout.
> > +      *
> > +      * Note that if the timeout is exceeded, then whoever was running that
> > +      * prevented us from acquiring the GPU is declared rogue and reset. An
> > +      * unresponsive process will then be banned in order to preserve
> > +      * interactivity. Since this can be seen as a bit heavy-handed, we
> > +      * select a timeout for when the dropped frames start to become a
> > +      * noticeable nuisance for the user (20 ms, i.e. preemption was blocked
> > +      * for more than a frame). Note, this is only a timeout for a delay in
> > +      * preempting the current request in order to run our dependency chain,
> > +      * our dependency chain may itself take a long time to run to completion
> > +      * before we can present the framebuffer.
> > +      */
> > +     i915_gem_object_wait_priority(obj, 0,
> > +                                   I915_PRIORITY_DISPLAY,
> > +                                   I915_PREEMPTION_TIMEOUT_DISPLAY);
> 
> API signature changes to allow timeouts are fine, but I think this hunk 
> should be separate patch at the end of the series. (Since it has the 
> potential to make things which used to work, albeit stutteringly (?), 
> start crashing.

It is at the end as a separate patch. What am I missing?
(The only thing after it is exposing a param to userspace.)
-Chris
Tvrtko Ursulin March 27, 2018, 8:57 a.m. UTC | #3
On 27/03/2018 09:39, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2018-03-27 09:35:17)
>>
>> On 26/03/2018 12:50, Chris Wilson wrote:
>>> Use a liberal timeout of 20ms to ensure that the rendering for an
>>> interactive pageflip is started in a timely fashion, and that
>>> user interaction is not blocked by GPU, or CPU, hogs. This is at the cost
>>> of resetting whoever was blocking the preemption, likely leading to that
>>> context/process being banned from submitting future requests.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>>    drivers/gpu/drm/i915/i915_drv.h      |  3 ++-
>>>    drivers/gpu/drm/i915/i915_gem.c      | 18 ++++++++++--------
>>>    drivers/gpu/drm/i915/intel_display.c | 18 +++++++++++++++++-
>>>    3 files changed, 29 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>>> index 800230ba1c3b..87388feb973d 100644
>>> --- a/drivers/gpu/drm/i915/i915_drv.h
>>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>>> @@ -3150,8 +3150,9 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>>>                         struct intel_rps_client *rps);
>>>    int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>>>                                  unsigned int flags,
>>> -                               int priority);
>>> +                               int priority, unsigned int timeout);
>>>    #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
>>> +#define I915_PREEMPTION_TIMEOUT_DISPLAY (20 * 1000 * 1000) /* 20 ms */
>>>    
>>>    int __must_check
>>>    i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
>>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>>> index 675d6bb59337..252c6b58e739 100644
>>> --- a/drivers/gpu/drm/i915/i915_gem.c
>>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>>> @@ -469,7 +469,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
>>>        return timeout;
>>>    }
>>>    
>>> -static void __fence_set_priority(struct dma_fence *fence, int prio)
>>> +static void __fence_set_priority(struct dma_fence *fence,
>>> +                              int prio, unsigned int timeout)
>>>    {
>>>        struct i915_request *rq;
>>>        struct intel_engine_cs *engine;
>>> @@ -482,11 +483,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
>>>    
>>>        rcu_read_lock();
>>>        if (engine->schedule)
>>> -             engine->schedule(rq, prio, 0);
>>> +             engine->schedule(rq, prio, timeout);
>>>        rcu_read_unlock();
>>>    }
>>>    
>>> -static void fence_set_priority(struct dma_fence *fence, int prio)
>>> +static void fence_set_priority(struct dma_fence *fence,
>>> +                            int prio, unsigned int timeout)
>>>    {
>>>        /* Recurse once into a fence-array */
>>>        if (dma_fence_is_array(fence)) {
>>> @@ -494,16 +496,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
>>>                int i;
>>>    
>>>                for (i = 0; i < array->num_fences; i++)
>>> -                     __fence_set_priority(array->fences[i], prio);
>>> +                     __fence_set_priority(array->fences[i], prio, timeout);
>>>        } else {
>>> -             __fence_set_priority(fence, prio);
>>> +             __fence_set_priority(fence, prio, timeout);
>>>        }
>>>    }
>>>    
>>>    int
>>>    i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>>>                              unsigned int flags,
>>> -                           int prio)
>>> +                           int prio, unsigned int timeout)
>>>    {
>>>        struct dma_fence *excl;
>>>    
>>> @@ -518,7 +520,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>>>                        return ret;
>>>    
>>>                for (i = 0; i < count; i++) {
>>> -                     fence_set_priority(shared[i], prio);
>>> +                     fence_set_priority(shared[i], prio, timeout);
>>>                        dma_fence_put(shared[i]);
>>>                }
>>>    
>>> @@ -528,7 +530,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>>>        }
>>>    
>>>        if (excl) {
>>> -             fence_set_priority(excl, prio);
>>> +             fence_set_priority(excl, prio, timeout);
>>>                dma_fence_put(excl);
>>>        }
>>>        return 0;
>>> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
>>> index 4c30c7c04f9c..830f2d4e540f 100644
>>> --- a/drivers/gpu/drm/i915/intel_display.c
>>> +++ b/drivers/gpu/drm/i915/intel_display.c
>>> @@ -12786,7 +12786,23 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>>>    
>>>        ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
>>>    
>>> -     i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
>>> +     /*
>>> +      * Reschedule our dependencies, and ensure we run within a timeout.
>>> +      *
>>> +      * Note that if the timeout is exceeded, then whoever was running that
>>> +      * prevented us from acquiring the GPU is declared rogue and reset. An
>>> +      * unresponsive process will then be banned in order to preserve
>>> +      * interactivity. Since this can be seen as a bit heavy-handed, we
>>> +      * select a timeout for when the dropped frames start to become a
>>> +      * noticeable nuisance for the user (20 ms, i.e. preemption was blocked
>>> +      * for more than a frame). Note, this is only a timeout for a delay in
>>> +      * preempting the current request in order to run our dependency chain,
>>> +      * our dependency chain may itself take a long time to run to completion
>>> +      * before we can present the framebuffer.
>>> +      */
>>> +     i915_gem_object_wait_priority(obj, 0,
>>> +                                   I915_PRIORITY_DISPLAY,
>>> +                                   I915_PREEMPTION_TIMEOUT_DISPLAY);
>>
>> API signature changes to allow timeouts are fine, but I think this hunk
>> should be separate patch at the end of the series. (Since it has the
>> potential to make things which used to work, albeit stutteringly (?),
>> start crashing.
> 
> It is at the end as a separate patch. What am I missing?
> (The only thing after it is exposing a param to userspace.)

My bad, I assumed that last one depends on API signature changes from 
this one.

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 800230ba1c3b..87388feb973d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3150,8 +3150,9 @@  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  int priority, unsigned int timeout);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PREEMPTION_TIMEOUT_DISPLAY (20 * 1000 * 1000) /* 20 ms */
 
 int __must_check
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 675d6bb59337..252c6b58e739 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -469,7 +469,8 @@  i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 int prio, unsigned int timeout)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -482,11 +483,12 @@  static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio, 0);
+		engine->schedule(rq, prio, timeout);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       int prio, unsigned int timeout)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -494,16 +496,16 @@  static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], prio, timeout);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, prio, timeout);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      int prio, unsigned int timeout)
 {
 	struct dma_fence *excl;
 
@@ -518,7 +520,7 @@  i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], prio, timeout);
 			dma_fence_put(shared[i]);
 		}
 
@@ -528,7 +530,7 @@  i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, prio, timeout);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4c30c7c04f9c..830f2d4e540f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12786,7 +12786,23 @@  intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	/*
+	 * Reschedule our dependencies, and ensure we run within a timeout.
+	 *
+	 * Note that if the timeout is exceeded, then whoever was running that
+	 * prevented us from acquiring the GPU is declared rogue and reset. An
+	 * unresponsive process will then be banned in order to preserve
+	 * interactivity. Since this can be seen as a bit heavy-handed, we
+	 * select a timeout for when the dropped frames start to become a
+	 * noticeable nuisance for the user (20 ms, i.e. preemption was blocked
+	 * for more than a frame). Note, this is only a timeout for a delay in
+	 * preempting the current request in order to run our dependency chain,
+	 * our dependency chain may itself take a long time to run to completion
+	 * before we can present the framebuffer.
+	 */
+	i915_gem_object_wait_priority(obj, 0,
+				      I915_PRIORITY_DISPLAY,
+				      I915_PREEMPTION_TIMEOUT_DISPLAY);
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);