Message ID | 1443007632-5573-4-git-send-email-maarten.lankhorst@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: > Move it from intel_crtc_atomic_commit to prepare_plane_fb. > Waiting is done before committing, otherwise it's too late > to undo the changes. > > Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> > --- > drivers/gpu/drm/i915/intel_atomic.c | 2 - > drivers/gpu/drm/i915/intel_display.c | 107 ++++++++++++++++++++-------------- > - > drivers/gpu/drm/i915/intel_drv.h | 2 - > 3 files changed, 62 insertions(+), 49 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_atomic.c > b/drivers/gpu/drm/i915/intel_atomic.c > index f1975f267710..25a891aa3824 100644 > --- a/drivers/gpu/drm/i915/intel_atomic.c > +++ b/drivers/gpu/drm/i915/intel_atomic.c > @@ -205,8 +205,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, > * but since this plane is unchanged just do > the > * minimum required validation. > */ > - if (plane->type == DRM_PLANE_TYPE_PRIMARY) > - intel_crtc->atomic.wait_for_flips = > true; > crtc_state->base.planes_changed = true; > } > > diff --git a/drivers/gpu/drm/i915/intel_display.c > b/drivers/gpu/drm/i915/intel_display.c > index 25e1eac260fd..cd651ff6c15b 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -3221,32 +3221,6 @@ void intel_finish_reset(struct drm_device *dev) > drm_modeset_unlock_all(dev); > } > > -static void > -intel_finish_fb(struct drm_framebuffer *old_fb) > -{ > - struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); > - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); > - bool was_interruptible = dev_priv->mm.interruptible; > - int ret; > - > - /* Big Hammer, we also need to ensure that any pending > - * MI_WAIT_FOR_EVENT inside a user batch buffer on the > - * current scanout is retired before unpinning the old > - * framebuffer. Note that we rely on userspace rendering > - * into the buffer attached to the pipe they are waiting > - * on. If not, userspace generates a GPU hang with IPEHR > - * point to the MI_WAIT_FOR_EVENT. > - * > - * This should only fail upon a hung GPU, in which case we > - * can safely continue. > - */ > - dev_priv->mm.interruptible = false; > - ret = i915_gem_object_wait_rendering(obj, true); > - dev_priv->mm.interruptible = was_interruptible; > - > - WARN_ON(ret); > -} > - > static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) > { > struct drm_device *dev = crtc->dev; > @@ -3867,15 +3841,23 @@ static void page_flip_completed(struct intel_crtc > *intel_crtc) > work->pending_flip_obj); > } > > -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) > +static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) > { > struct drm_device *dev = crtc->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > + long ret; > > WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); > - if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, > - !intel_crtc_has_pending_flip(crtc), > - 60*HZ) == 0)) { > + > + ret = wait_event_interruptible_timeout( > + dev_priv->pending_flip_queue, > + !intel_crtc_has_pending_flip(crtc), > + 60*HZ); > + > + if (ret < 0) > + return ret; > + > + if (ret == 0) { > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > > spin_lock_irq(&dev->event_lock); > @@ -3886,11 +3868,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc > *crtc) > spin_unlock_irq(&dev->event_lock); > } > > - if (crtc->primary->fb) { > - mutex_lock(&dev->struct_mutex); > - intel_finish_fb(crtc->primary->fb); > - mutex_unlock(&dev->struct_mutex); > - } There is another caller of intel_crtc_wait_for_pending_flips() besides the one touched in this patch: intel_crtc_disable_noatomic(). In your previous series you dropped that call based on the fact that there shouldn't be any pending flips at that point, but that patch has been dropped. Wouldn't it be better to add a WARN_ON as Chris suggested then instead of keeping the wait for flips but without the work around? > + return 0; > } > > /* Program iCLKIP clock to the desired frequency */ > @@ -4750,9 +4728,6 @@ static void intel_pre_plane_update(struct intel_crtc > *crtc) > struct drm_i915_private *dev_priv = dev->dev_private; > struct intel_crtc_atomic_commit *atomic = &crtc->atomic; > > - if (atomic->wait_for_flips) > - intel_crtc_wait_for_pending_flips(&crtc->base); > - > if (atomic->disable_fbc) > intel_fbc_disable_crtc(crtc); > > @@ -11596,7 +11571,6 @@ int intel_plane_atomic_calc_changes(struct > drm_crtc_state *crtc_state, > > switch (plane->type) { > case DRM_PLANE_TYPE_PRIMARY: > - intel_crtc->atomic.wait_for_flips = true; > intel_crtc->atomic.pre_disable_primary = turn_off; > intel_crtc->atomic.post_enable_primary = turn_on; > > @@ -13015,6 +12989,30 @@ static int intel_atomic_check(struct drm_device *dev, > return drm_atomic_helper_check_planes(state->dev, state); > } > > +static int intel_atomic_prepare_commit(struct drm_device *dev, > + struct drm_atomic_state *state, > + bool async) > +{ > + struct drm_crtc_state *crtc_state; > + struct drm_crtc *crtc; > + int i, ret; > + > + if (async) { > + DRM_DEBUG_KMS("i915 does not yet support async commit\n"); > + return -EINVAL; > + } > + > + for_each_crtc_in_state(state, crtc, crtc_state, i) { > + ret = intel_crtc_wait_for_pending_flips(crtc); > + if (ret) > + return ret; > + } > + > + ret = drm_atomic_helper_prepare_planes(dev, state); > + > + return ret; > +} > + > /** > * intel_atomic_commit - commit validated state object > * @dev: DRM device > @@ -13042,12 +13040,7 @@ static int intel_atomic_commit(struct drm_device > *dev, > int i; > bool any_ms = false; > > - if (async) { > - DRM_DEBUG_KMS("i915 does not yet support async commit\n"); > - return -EINVAL; > - } > - > - ret = drm_atomic_helper_prepare_planes(dev, state); > + ret = intel_atomic_prepare_commit(dev, state, async); > if (ret) > return ret; > > @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, > if (ret) > return ret; > > + if (old_obj) { > + struct drm_crtc_state *crtc_state = > + drm_atomic_get_existing_crtc_state(new_state->state, > plane->state->crtc); > + > + /* Big Hammer, we also need to ensure that any pending > + * MI_WAIT_FOR_EVENT inside a user batch buffer on the > + * current scanout is retired before unpinning the old > + * framebuffer. Note that we rely on userspace rendering > + * into the buffer attached to the pipe they are waiting > + * on. If not, userspace generates a GPU hang with IPEHR > + * point to the MI_WAIT_FOR_EVENT. > + * > + * This should only fail upon a hung GPU, in which case we > + * can safely continue. > + */ > + if (needs_modeset(crtc_state)) > + ret = i915_gem_object_wait_rendering(old_obj, true); > + > + /* Swallow -EIO errors to allow updates during hw lockup. */ > + if (ret && ret != -EIO) > + goto out; Doesn't this change the behavior of a modeset after a GPU hang? Since mm.interruptible is true, i915_gem_check_wedge() might return -EAGAIN instead of -EIO. Previously the modeset would continue in that scenario, but now, somewhat contrary to the comment above, we don't continue and instead pass the -EAGAIN to user space. Ander > + } > + > if (!obj) { > ret = 0; > } else if (plane->type == DRM_PLANE_TYPE_CURSOR && > @@ -13321,6 +13337,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, > if (ret == 0) > i915_gem_track_fb(old_obj, obj, intel_plane > ->frontbuffer_bit); > > +out: > mutex_unlock(&dev->struct_mutex); > > return ret; > diff --git a/drivers/gpu/drm/i915/intel_drv.h > b/drivers/gpu/drm/i915/intel_drv.h > index a2f0b981e26c..cfdb0f2714cd 100644 > --- a/drivers/gpu/drm/i915/intel_drv.h > +++ b/drivers/gpu/drm/i915/intel_drv.h > @@ -499,7 +499,6 @@ struct skl_pipe_wm { > */ > struct intel_crtc_atomic_commit { > /* Sleepable operations to perform before commit */ > - bool wait_for_flips; > bool disable_fbc; > bool disable_ips; > bool disable_cxsr; > @@ -1158,7 +1157,6 @@ enum intel_display_power_domain > intel_display_port_power_domain(struct intel_encoder *intel_encoder); > void intel_mode_from_pipe_config(struct drm_display_mode *mode, > struct intel_crtc_state *pipe_config); > -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); > void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); > > int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
On Mon, Oct 19, 2015 at 04:16:53PM +0300, Ander Conselvan De Oliveira wrote: > On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: > > @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, > > if (ret) > > return ret; > > > > + if (old_obj) { > > + struct drm_crtc_state *crtc_state = > > + drm_atomic_get_existing_crtc_state(new_state->state, > > plane->state->crtc); > > + > > + /* Big Hammer, we also need to ensure that any pending > > + * MI_WAIT_FOR_EVENT inside a user batch buffer on the > > + * current scanout is retired before unpinning the old > > + * framebuffer. Note that we rely on userspace rendering > > + * into the buffer attached to the pipe they are waiting > > + * on. If not, userspace generates a GPU hang with IPEHR > > + * point to the MI_WAIT_FOR_EVENT. > > + * > > + * This should only fail upon a hung GPU, in which case we > > + * can safely continue. > > + */ > > + if (needs_modeset(crtc_state)) > > + ret = i915_gem_object_wait_rendering(old_obj, true); > > + > > + /* Swallow -EIO errors to allow updates during hw lockup. */ > > + if (ret && ret != -EIO) > > + goto out; > > Doesn't this change the behavior of a modeset after a GPU hang? Since > mm.interruptible is true, i915_gem_check_wedge() might return -EAGAIN instead of > -EIO. Previously the modeset would continue in that scenario, but now, somewhat > contrary to the comment above, we don't continue and instead pass the -EAGAIN to > user space. It's "while the gpu hang is pending" not "after", but this change is the hole point of making pinning interruptible. With current modeset code the only thing we could hope for is that the reset would go through, and otherwise we'd have to fail the modeset. Now we can correctly retry the operation if it has run into a concurrent gpu hang/reset. Note that we still should eat any -EIO, since modesets must continue even if the render side is completely dead. -Daniel
Op 19-10-15 om 15:16 schreef Ander Conselvan De Oliveira: > On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: >> Move it from intel_crtc_atomic_commit to prepare_plane_fb. >> Waiting is done before committing, otherwise it's too late >> to undo the changes. >> >> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> >> --- >> drivers/gpu/drm/i915/intel_atomic.c | 2 - >> drivers/gpu/drm/i915/intel_display.c | 107 ++++++++++++++++++++-------------- >> - >> drivers/gpu/drm/i915/intel_drv.h | 2 - >> 3 files changed, 62 insertions(+), 49 deletions(-) >> >> diff --git a/drivers/gpu/drm/i915/intel_atomic.c >> b/drivers/gpu/drm/i915/intel_atomic.c >> index f1975f267710..25a891aa3824 100644 >> --- a/drivers/gpu/drm/i915/intel_atomic.c >> +++ b/drivers/gpu/drm/i915/intel_atomic.c >> @@ -205,8 +205,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, >> * but since this plane is unchanged just do >> the >> * minimum required validation. >> */ >> - if (plane->type == DRM_PLANE_TYPE_PRIMARY) >> - intel_crtc->atomic.wait_for_flips = >> true; >> crtc_state->base.planes_changed = true; >> } >> >> diff --git a/drivers/gpu/drm/i915/intel_display.c >> b/drivers/gpu/drm/i915/intel_display.c >> index 25e1eac260fd..cd651ff6c15b 100644 >> --- a/drivers/gpu/drm/i915/intel_display.c >> +++ b/drivers/gpu/drm/i915/intel_display.c >> @@ -3221,32 +3221,6 @@ void intel_finish_reset(struct drm_device *dev) >> drm_modeset_unlock_all(dev); >> } >> >> -static void >> -intel_finish_fb(struct drm_framebuffer *old_fb) >> -{ >> - struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); >> - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); >> - bool was_interruptible = dev_priv->mm.interruptible; >> - int ret; >> - >> - /* Big Hammer, we also need to ensure that any pending >> - * MI_WAIT_FOR_EVENT inside a user batch buffer on the >> - * current scanout is retired before unpinning the old >> - * framebuffer. Note that we rely on userspace rendering >> - * into the buffer attached to the pipe they are waiting >> - * on. If not, userspace generates a GPU hang with IPEHR >> - * point to the MI_WAIT_FOR_EVENT. >> - * >> - * This should only fail upon a hung GPU, in which case we >> - * can safely continue. >> - */ >> - dev_priv->mm.interruptible = false; >> - ret = i915_gem_object_wait_rendering(obj, true); >> - dev_priv->mm.interruptible = was_interruptible; >> - >> - WARN_ON(ret); >> -} >> - >> static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) >> { >> struct drm_device *dev = crtc->dev; >> @@ -3867,15 +3841,23 @@ static void page_flip_completed(struct intel_crtc >> *intel_crtc) >> work->pending_flip_obj); >> } >> >> -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) >> +static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) >> { >> struct drm_device *dev = crtc->dev; >> struct drm_i915_private *dev_priv = dev->dev_private; >> + long ret; >> >> WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); >> - if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, >> - !intel_crtc_has_pending_flip(crtc), >> - 60*HZ) == 0)) { >> + >> + ret = wait_event_interruptible_timeout( >> + dev_priv->pending_flip_queue, >> + !intel_crtc_has_pending_flip(crtc), >> + 60*HZ); >> + >> + if (ret < 0) >> + return ret; >> + >> + if (ret == 0) { >> struct intel_crtc *intel_crtc = to_intel_crtc(crtc); >> >> spin_lock_irq(&dev->event_lock); >> @@ -3886,11 +3868,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc >> *crtc) >> spin_unlock_irq(&dev->event_lock); >> } >> >> - if (crtc->primary->fb) { >> - mutex_lock(&dev->struct_mutex); >> - intel_finish_fb(crtc->primary->fb); >> - mutex_unlock(&dev->struct_mutex); >> - } > There is another caller of intel_crtc_wait_for_pending_flips() besides the one > touched in this patch: intel_crtc_disable_noatomic(). In your previous series > you dropped that call based on the fact that there shouldn't be any pending > flips at that point, but that patch has been dropped. > > Wouldn't it be better to add a WARN_ON as Chris suggested then instead of > keeping the wait for flips but without the work around? > Yeah that would be a good idea. I'll fix up this patch.
On Mon, 2015-10-19 at 15:30 +0200, Daniel Vetter wrote: > On Mon, Oct 19, 2015 at 04:16:53PM +0300, Ander Conselvan De Oliveira wrote: > > On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: > > > @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, > > > if (ret) > > > return ret; > > > > > > + if (old_obj) { > > > + struct drm_crtc_state *crtc_state = > > > + drm_atomic_get_existing_crtc_state(new_state > > > ->state, > > > plane->state->crtc); > > > + > > > + /* Big Hammer, we also need to ensure that any pending > > > + * MI_WAIT_FOR_EVENT inside a user batch buffer on the > > > + * current scanout is retired before unpinning the old > > > + * framebuffer. Note that we rely on userspace rendering > > > + * into the buffer attached to the pipe they are waiting > > > + * on. If not, userspace generates a GPU hang with IPEHR > > > + * point to the MI_WAIT_FOR_EVENT. > > > + * > > > + * This should only fail upon a hung GPU, in which case > > > we > > > + * can safely continue. > > > + */ > > > + if (needs_modeset(crtc_state)) > > > + ret = i915_gem_object_wait_rendering(old_obj, > > > true); > > > + > > > + /* Swallow -EIO errors to allow updates during hw lockup. > > > */ > > > + if (ret && ret != -EIO) > > > + goto out; > > > > Doesn't this change the behavior of a modeset after a GPU hang? Since > > mm.interruptible is true, i915_gem_check_wedge() might return -EAGAIN > > instead of > > -EIO. Previously the modeset would continue in that scenario, but now, > > somewhat > > contrary to the comment above, we don't continue and instead pass the > > -EAGAIN to > > user space. > > It's "while the gpu hang is pending" not "after", but this change is the > hole point of making pinning interruptible. With current modeset code the > only thing we could hope for is that the reset would go through, and > otherwise we'd have to fail the modeset. Now we can correctly retry the > operation if it has run into a concurrent gpu hang/reset. So in that case should user space retry the modeset? I don't think it does that at moment, at least weston and xf86-video-intel don't. I'm not sure how big of a deal that is, though, since it is an unlikely corner case. Ander > Note that we still should eat any -EIO, since modesets must continue even > if the render side is completely dead. > -Daniel
On Tue, Oct 20, 2015 at 10:38:41AM +0300, Ander Conselvan De Oliveira wrote: > On Mon, 2015-10-19 at 15:30 +0200, Daniel Vetter wrote: > > On Mon, Oct 19, 2015 at 04:16:53PM +0300, Ander Conselvan De Oliveira wrote: > > > On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: > > > > @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, > > > > if (ret) > > > > return ret; > > > > > > > > + if (old_obj) { > > > > + struct drm_crtc_state *crtc_state = > > > > + drm_atomic_get_existing_crtc_state(new_state > > > > ->state, > > > > plane->state->crtc); > > > > + > > > > + /* Big Hammer, we also need to ensure that any pending > > > > + * MI_WAIT_FOR_EVENT inside a user batch buffer on the > > > > + * current scanout is retired before unpinning the old > > > > + * framebuffer. Note that we rely on userspace rendering > > > > + * into the buffer attached to the pipe they are waiting > > > > + * on. If not, userspace generates a GPU hang with IPEHR > > > > + * point to the MI_WAIT_FOR_EVENT. > > > > + * > > > > + * This should only fail upon a hung GPU, in which case > > > > we > > > > + * can safely continue. > > > > + */ > > > > + if (needs_modeset(crtc_state)) > > > > + ret = i915_gem_object_wait_rendering(old_obj, > > > > true); > > > > + > > > > + /* Swallow -EIO errors to allow updates during hw lockup. > > > > */ > > > > + if (ret && ret != -EIO) > > > > + goto out; > > > > > > Doesn't this change the behavior of a modeset after a GPU hang? Since > > > mm.interruptible is true, i915_gem_check_wedge() might return -EAGAIN > > > instead of > > > -EIO. Previously the modeset would continue in that scenario, but now, > > > somewhat > > > contrary to the comment above, we don't continue and instead pass the > > > -EAGAIN to > > > user space. > > > > It's "while the gpu hang is pending" not "after", but this change is the > > hole point of making pinning interruptible. With current modeset code the > > only thing we could hope for is that the reset would go through, and > > otherwise we'd have to fail the modeset. Now we can correctly retry the > > operation if it has run into a concurrent gpu hang/reset. > > So in that case should user space retry the modeset? I don't think it does that > at moment, at least weston and xf86-video-intel don't. I'm not sure how big of a > deal that is, though, since it is an unlikely corner case. Every drm ioctl goes through drmIoctl in libdrm, which does this restarting. If not then that's a userspace bug, since this is a core bit of the drm ABI design. -Daniel
On Tue, 2015-10-20 at 10:10 +0200, Daniel Vetter wrote: > On Tue, Oct 20, 2015 at 10:38:41AM +0300, Ander Conselvan De Oliveira wrote: > > On Mon, 2015-10-19 at 15:30 +0200, Daniel Vetter wrote: > > > On Mon, Oct 19, 2015 at 04:16:53PM +0300, Ander Conselvan De Oliveira > > > wrote: > > > > On Wed, 2015-09-23 at 13:27 +0200, Maarten Lankhorst wrote: > > > > > @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane > > > > > *plane, > > > > > if (ret) > > > > > return ret; > > > > > > > > > > + if (old_obj) { > > > > > + struct drm_crtc_state *crtc_state = > > > > > + drm_atomic_get_existing_crtc_state(new_state > > > > > ->state, > > > > > plane->state->crtc); > > > > > + > > > > > + /* Big Hammer, we also need to ensure that any > > > > > pending > > > > > + * MI_WAIT_FOR_EVENT inside a user batch buffer on > > > > > the > > > > > + * current scanout is retired before unpinning the > > > > > old > > > > > + * framebuffer. Note that we rely on userspace > > > > > rendering > > > > > + * into the buffer attached to the pipe they are > > > > > waiting > > > > > + * on. If not, userspace generates a GPU hang with > > > > > IPEHR > > > > > + * point to the MI_WAIT_FOR_EVENT. > > > > > + * > > > > > + * This should only fail upon a hung GPU, in which > > > > > case > > > > > we > > > > > + * can safely continue. > > > > > + */ > > > > > + if (needs_modeset(crtc_state)) > > > > > + ret = i915_gem_object_wait_rendering(old_obj, > > > > > true); > > > > > + > > > > > + /* Swallow -EIO errors to allow updates during hw > > > > > lockup. > > > > > */ > > > > > + if (ret && ret != -EIO) > > > > > + goto out; > > > > > > > > Doesn't this change the behavior of a modeset after a GPU hang? Since > > > > mm.interruptible is true, i915_gem_check_wedge() might return -EAGAIN > > > > instead of > > > > -EIO. Previously the modeset would continue in that scenario, but now, > > > > somewhat > > > > contrary to the comment above, we don't continue and instead pass the > > > > -EAGAIN to > > > > user space. > > > > > > It's "while the gpu hang is pending" not "after", but this change is the > > > hole point of making pinning interruptible. With current modeset code the > > > only thing we could hope for is that the reset would go through, and > > > otherwise we'd have to fail the modeset. Now we can correctly retry the > > > operation if it has run into a concurrent gpu hang/reset. > > > > So in that case should user space retry the modeset? I don't think it does > > that > > at moment, at least weston and xf86-video-intel don't. I'm not sure how big > > of a > > deal that is, though, since it is an unlikely corner case. > > Every drm ioctl goes through drmIoctl in libdrm, which does this > restarting. If not then that's a userspace bug, since this is a core bit > of the drm ABI design. Ah, true, completely forgot about that. With the new 2.9 patch, I don't see any other issues with this. I'm happy to give my r-b, but I think someone else that knows this stuff a bit better should at least ack the patch. Ander
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index f1975f267710..25a891aa3824 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -205,8 +205,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, * but since this plane is unchanged just do the * minimum required validation. */ - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - intel_crtc->atomic.wait_for_flips = true; crtc_state->base.planes_changed = true; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 25e1eac260fd..cd651ff6c15b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3221,32 +3221,6 @@ void intel_finish_reset(struct drm_device *dev) drm_modeset_unlock_all(dev); } -static void -intel_finish_fb(struct drm_framebuffer *old_fb) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - bool was_interruptible = dev_priv->mm.interruptible; - int ret; - - /* Big Hammer, we also need to ensure that any pending - * MI_WAIT_FOR_EVENT inside a user batch buffer on the - * current scanout is retired before unpinning the old - * framebuffer. Note that we rely on userspace rendering - * into the buffer attached to the pipe they are waiting - * on. If not, userspace generates a GPU hang with IPEHR - * point to the MI_WAIT_FOR_EVENT. - * - * This should only fail upon a hung GPU, in which case we - * can safely continue. - */ - dev_priv->mm.interruptible = false; - ret = i915_gem_object_wait_rendering(obj, true); - dev_priv->mm.interruptible = was_interruptible; - - WARN_ON(ret); -} - static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3867,15 +3841,23 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) work->pending_flip_obj); } -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + long ret; WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); - if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, - !intel_crtc_has_pending_flip(crtc), - 60*HZ) == 0)) { + + ret = wait_event_interruptible_timeout( + dev_priv->pending_flip_queue, + !intel_crtc_has_pending_flip(crtc), + 60*HZ); + + if (ret < 0) + return ret; + + if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); spin_lock_irq(&dev->event_lock); @@ -3886,11 +3868,7 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) spin_unlock_irq(&dev->event_lock); } - if (crtc->primary->fb) { - mutex_lock(&dev->struct_mutex); - intel_finish_fb(crtc->primary->fb); - mutex_unlock(&dev->struct_mutex); - } + return 0; } /* Program iCLKIP clock to the desired frequency */ @@ -4750,9 +4728,6 @@ static void intel_pre_plane_update(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc_atomic_commit *atomic = &crtc->atomic; - if (atomic->wait_for_flips) - intel_crtc_wait_for_pending_flips(&crtc->base); - if (atomic->disable_fbc) intel_fbc_disable_crtc(crtc); @@ -11596,7 +11571,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - intel_crtc->atomic.wait_for_flips = true; intel_crtc->atomic.pre_disable_primary = turn_off; intel_crtc->atomic.post_enable_primary = turn_on; @@ -13015,6 +12989,30 @@ static int intel_atomic_check(struct drm_device *dev, return drm_atomic_helper_check_planes(state->dev, state); } +static int intel_atomic_prepare_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i, ret; + + if (async) { + DRM_DEBUG_KMS("i915 does not yet support async commit\n"); + return -EINVAL; + } + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + ret = intel_crtc_wait_for_pending_flips(crtc); + if (ret) + return ret; + } + + ret = drm_atomic_helper_prepare_planes(dev, state); + + return ret; +} + /** * intel_atomic_commit - commit validated state object * @dev: DRM device @@ -13042,12 +13040,7 @@ static int intel_atomic_commit(struct drm_device *dev, int i; bool any_ms = false; - if (async) { - DRM_DEBUG_KMS("i915 does not yet support async commit\n"); - return -EINVAL; - } - - ret = drm_atomic_helper_prepare_planes(dev, state); + ret = intel_atomic_prepare_commit(dev, state, async); if (ret) return ret; @@ -13306,6 +13299,29 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) return ret; + if (old_obj) { + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. Note that we rely on userspace rendering + * into the buffer attached to the pipe they are waiting + * on. If not, userspace generates a GPU hang with IPEHR + * point to the MI_WAIT_FOR_EVENT. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. + */ + if (needs_modeset(crtc_state)) + ret = i915_gem_object_wait_rendering(old_obj, true); + + /* Swallow -EIO errors to allow updates during hw lockup. */ + if (ret && ret != -EIO) + goto out; + } + if (!obj) { ret = 0; } else if (plane->type == DRM_PLANE_TYPE_CURSOR && @@ -13321,6 +13337,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret == 0) i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); +out: mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a2f0b981e26c..cfdb0f2714cd 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -499,7 +499,6 @@ struct skl_pipe_wm { */ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ - bool wait_for_flips; bool disable_fbc; bool disable_ips; bool disable_cxsr; @@ -1158,7 +1157,6 @@ enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); -void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
Move it from intel_crtc_atomic_commit to prepare_plane_fb. Waiting is done before committing, otherwise it's too late to undo the changes. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> --- drivers/gpu/drm/i915/intel_atomic.c | 2 - drivers/gpu/drm/i915/intel_display.c | 107 ++++++++++++++++++++--------------- drivers/gpu/drm/i915/intel_drv.h | 2 - 3 files changed, 62 insertions(+), 49 deletions(-)