diff mbox

[07/18] drm/i915: Restore nonblocking awaits for modesetting

Message ID 20160914065250.15482-8-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Sept. 14, 2016, 6:52 a.m. UTC
After combining the dma-buf reservation object and the GEM reservation
object, we lost the ability to do a nonblocking wait on the i915 request
(as we blocked upon the reservation object during prepare_fb). We can
instead convert the reservation object into a fence upon which we can
asynchronously wait (including a forced timeout in case the DMA fence is
never signaled).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_display.c | 78 ++++++++++++++++++++++--------------
 drivers/gpu/drm/i915/intel_drv.h     |  2 +
 2 files changed, 51 insertions(+), 29 deletions(-)

Comments

Joonas Lahtinen Sept. 19, 2016, 4:01 p.m. UTC | #1
Maarten, could you give this a look?

On ke, 2016-09-14 at 07:52 +0100, Chris Wilson wrote:
> After combining the dma-buf reservation object and the GEM reservation
> object, we lost the ability to do a nonblocking wait on the i915 request
> (as we blocked upon the reservation object during prepare_fb). We can
> instead convert the reservation object into a fence upon which we can
> asynchronously wait (including a forced timeout in case the DMA fence is
> never signaled).
> 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_display.c | 78 ++++++++++++++++++++++--------------
>  drivers/gpu/drm/i915/intel_drv.h     |  2 +
>  2 files changed, 51 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 5c0ea08dddb3..294df122f7c6 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -14383,12 +14383,33 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
>  
>  static void intel_atomic_commit_work(struct work_struct *work)
>  {
> > -	struct drm_atomic_state *state = container_of(work,
> > -						      struct drm_atomic_state,
> > -						      commit_work);
> > +	struct drm_atomic_state *state =
> > +		container_of(work, struct drm_atomic_state, commit_work);
> +
> >  	intel_atomic_commit_tail(state);
>  }
>  
> +static int __i915_sw_fence_call
> +intel_atomic_commit_ready(struct i915_sw_fence *fence,
> > +			  enum i915_sw_fence_notify notify)
> +{
> > +	struct intel_atomic_state *state =
> > +		container_of(fence, struct intel_atomic_state, commit_ready);
> +
> > +	switch (notify) {
> > +	case FENCE_COMPLETE:
> > +		if (state->base.commit_work.func)
> > +			queue_work(system_unbound_wq, &state->base.commit_work);
> > +		break;
> +
> > +	case FENCE_FREE:
> > +		drm_atomic_state_put(&state->base);
> > +		break;
> > +	}
> +
> > +	return NOTIFY_DONE;
> +}
> +
>  static void intel_atomic_track_fbs(struct drm_atomic_state *state)
>  {
> >  	struct drm_plane_state *old_plane_state;
> @@ -14434,11 +14455,14 @@ static int intel_atomic_commit(struct drm_device *dev,
> >  	if (ret)
> >  		return ret;
>  
> > -	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
> > +	drm_atomic_state_get(state);
> > +	i915_sw_fence_init(&intel_state->commit_ready,
> > +			   intel_atomic_commit_ready);
>  
> >  	ret = intel_atomic_prepare_commit(dev, state);
> >  	if (ret) {
> >  		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
> > +		i915_sw_fence_commit(&intel_state->commit_ready);
> >  		return ret;
> >  	}
>  
> @@ -14449,10 +14473,14 @@ static int intel_atomic_commit(struct drm_device *dev,
> >  	intel_atomic_track_fbs(state);
>  
> >  	drm_atomic_state_get(state);
> > -	if (nonblock)
> > -		queue_work(system_unbound_wq, &state->commit_work);
> > -	else
> > +	INIT_WORK(&state->commit_work,
> > +		  nonblock ? intel_atomic_commit_work : NULL);
> +
> > +	i915_sw_fence_commit(&intel_state->commit_ready);
> > +	if (!nonblock) {
> > +		i915_sw_fence_wait(&intel_state->commit_ready);
> >  		intel_atomic_commit_tail(state);
> > +	}
>  
> >  	return 0;
>  }
> @@ -14568,8 +14596,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
> >  	struct drm_framebuffer *fb = new_state->fb;
> >  	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
> >  	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
> > -	long lret;
> > -	int ret = 0;
> > +	int ret;
>  
> >  	if (!obj && !old_obj)
> >  		return 0;
> @@ -14577,7 +14604,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
> >  	if (old_obj) {
> >  		struct drm_crtc_state *crtc_state =
> >  			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
> > -		long timeout = 0;
>  
> >  		/* Big Hammer, we also need to ensure that any pending
> >  		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
> @@ -14590,31 +14616,25 @@ intel_prepare_plane_fb(struct drm_plane *plane,
> >  		 * This should only fail upon a hung GPU, in which case we
> >  		 * can safely continue.
> >  		 */
> > -		if (needs_modeset(crtc_state))
> > -			timeout = i915_gem_object_wait(old_obj,
> > -						       I915_WAIT_INTERRUPTIBLE |
> > -						       I915_WAIT_LOCKED,
> > -						       MAX_SCHEDULE_TIMEOUT,
> > -						       NULL);
> > -		if (timeout < 0) {
> > -			/* GPU hangs should have been swallowed by the wait */
> > -			WARN_ON(timeout == -EIO);
> > -			return timeout;
> > +		if (needs_modeset(crtc_state)) {
> > +			ret = i915_sw_fence_await_reservation(&to_intel_atomic_state(new_state->state)->commit_ready,
> > +							      old_obj->resv, NULL,
> > +							      false, 0,
> > +							      GFP_KERNEL);
> > +			if (ret < 0)
> > +				return ret;
> >  		}
> >  	}
>  
> >  	if (!obj)
> >  		return 0;
>  
> > -	/* For framebuffer backed by dmabuf, wait for fence */
> > -	lret = i915_gem_object_wait(obj,
> > -				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
> > -				    MAX_SCHEDULE_TIMEOUT,
> > -				    NULL);
> > -	if (lret == -ERESTARTSYS)
> > -		return lret;
> -
> > -	WARN(lret < 0, "waiting returns %li\n", lret);
> > +	ret = i915_sw_fence_await_reservation(&to_intel_atomic_state(new_state->state)->commit_ready,
> > +					      obj->resv, NULL,
> > +					      false, 10*HZ,
> > +					      GFP_KERNEL);
> > +	if (ret < 0)
> > +		return ret;
>  
> >  	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
> >  	    INTEL_INFO(dev)->cursor_needs_physical) {
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 15e3050edeb9..cb99a2540863 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -357,6 +357,8 @@ struct intel_atomic_state {
>  
> >  	/* Gen9+ only */
> >  	struct skl_wm_values wm_results;
> +
> > +	struct i915_sw_fence commit_ready;
>  };
>  
>  struct intel_plane_state {
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5c0ea08dddb3..294df122f7c6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14383,12 +14383,33 @@  static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 
 static void intel_atomic_commit_work(struct work_struct *work)
 {
-	struct drm_atomic_state *state = container_of(work,
-						      struct drm_atomic_state,
-						      commit_work);
+	struct drm_atomic_state *state =
+		container_of(work, struct drm_atomic_state, commit_work);
+
 	intel_atomic_commit_tail(state);
 }
 
+static int __i915_sw_fence_call
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+			  enum i915_sw_fence_notify notify)
+{
+	struct intel_atomic_state *state =
+		container_of(fence, struct intel_atomic_state, commit_ready);
+
+	switch (notify) {
+	case FENCE_COMPLETE:
+		if (state->base.commit_work.func)
+			queue_work(system_unbound_wq, &state->base.commit_work);
+		break;
+
+	case FENCE_FREE:
+		drm_atomic_state_put(&state->base);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
 	struct drm_plane_state *old_plane_state;
@@ -14434,11 +14455,14 @@  static int intel_atomic_commit(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+	drm_atomic_state_get(state);
+	i915_sw_fence_init(&intel_state->commit_ready,
+			   intel_atomic_commit_ready);
 
 	ret = intel_atomic_prepare_commit(dev, state);
 	if (ret) {
 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+		i915_sw_fence_commit(&intel_state->commit_ready);
 		return ret;
 	}
 
@@ -14449,10 +14473,14 @@  static int intel_atomic_commit(struct drm_device *dev,
 	intel_atomic_track_fbs(state);
 
 	drm_atomic_state_get(state);
-	if (nonblock)
-		queue_work(system_unbound_wq, &state->commit_work);
-	else
+	INIT_WORK(&state->commit_work,
+		  nonblock ? intel_atomic_commit_work : NULL);
+
+	i915_sw_fence_commit(&intel_state->commit_ready);
+	if (!nonblock) {
+		i915_sw_fence_wait(&intel_state->commit_ready);
 		intel_atomic_commit_tail(state);
+	}
 
 	return 0;
 }
@@ -14568,8 +14596,7 @@  intel_prepare_plane_fb(struct drm_plane *plane,
 	struct drm_framebuffer *fb = new_state->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
-	long lret;
-	int ret = 0;
+	int ret;
 
 	if (!obj && !old_obj)
 		return 0;
@@ -14577,7 +14604,6 @@  intel_prepare_plane_fb(struct drm_plane *plane,
 	if (old_obj) {
 		struct drm_crtc_state *crtc_state =
 			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
-		long timeout = 0;
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14590,31 +14616,25 @@  intel_prepare_plane_fb(struct drm_plane *plane,
 		 * This should only fail upon a hung GPU, in which case we
 		 * can safely continue.
 		 */
-		if (needs_modeset(crtc_state))
-			timeout = i915_gem_object_wait(old_obj,
-						       I915_WAIT_INTERRUPTIBLE |
-						       I915_WAIT_LOCKED,
-						       MAX_SCHEDULE_TIMEOUT,
-						       NULL);
-		if (timeout < 0) {
-			/* GPU hangs should have been swallowed by the wait */
-			WARN_ON(timeout == -EIO);
-			return timeout;
+		if (needs_modeset(crtc_state)) {
+			ret = i915_sw_fence_await_reservation(&to_intel_atomic_state(new_state->state)->commit_ready,
+							      old_obj->resv, NULL,
+							      false, 0,
+							      GFP_KERNEL);
+			if (ret < 0)
+				return ret;
 		}
 	}
 
 	if (!obj)
 		return 0;
 
-	/* For framebuffer backed by dmabuf, wait for fence */
-	lret = i915_gem_object_wait(obj,
-				    I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-				    MAX_SCHEDULE_TIMEOUT,
-				    NULL);
-	if (lret == -ERESTARTSYS)
-		return lret;
-
-	WARN(lret < 0, "waiting returns %li\n", lret);
+	ret = i915_sw_fence_await_reservation(&to_intel_atomic_state(new_state->state)->commit_ready,
+					      obj->resv, NULL,
+					      false, 10*HZ,
+					      GFP_KERNEL);
+	if (ret < 0)
+		return ret;
 
 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
 	    INTEL_INFO(dev)->cursor_needs_physical) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 15e3050edeb9..cb99a2540863 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -357,6 +357,8 @@  struct intel_atomic_state {
 
 	/* Gen9+ only */
 	struct skl_wm_values wm_results;
+
+	struct i915_sw_fence commit_ready;
 };
 
 struct intel_plane_state {