Message ID | 1448786893-2522-5-git-send-email-chris@chris-wilson.co.uk (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Sun, Nov 29, 2015 at 08:48:02AM +0000, Chris Wilson wrote: > Instead of querying the reset counter before every access to the ring, > query it the first time we touch the ring, and do a final compare when > submitting the request. For correctness, we need to then sanitize how > the reset_counter is incremented to prevent broken submission and > waiting across resets, in the process fixing the persistent -EIO we > still see today on failed waits. > > v2: Rebase > v3: Now with added testcase > v4: Rebase > > Testcase: igt/gem_eio > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Imo you really can't hide an ABI change in a refactor/optimize patch like this ... I really think moving check_wedge out of wait_request must be it's own little patch. -Daniel > --- > drivers/gpu/drm/i915/i915_debugfs.c | 2 +- > drivers/gpu/drm/i915/i915_drv.c | 32 +++++++----- > drivers/gpu/drm/i915/i915_drv.h | 39 ++++++++++---- > drivers/gpu/drm/i915/i915_gem.c | 90 +++++++++++++++------------------ > drivers/gpu/drm/i915/i915_irq.c | 21 +------- > drivers/gpu/drm/i915/intel_display.c | 13 ++--- > drivers/gpu/drm/i915/intel_lrc.c | 13 +++-- > drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++--- > 8 files changed, 111 insertions(+), 113 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index a728ff11e389..8458447ddc17 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -4660,7 +4660,7 @@ i915_wedged_get(void *data, u64 *val) > struct drm_device *dev = data; > struct drm_i915_private *dev_priv = dev->dev_private; > > - *val = atomic_read(&dev_priv->gpu_error.reset_counter); > + *val = i915_terminally_wedged(&dev_priv->gpu_error); > > return 0; > } > diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c > index 90faa8e03fca..eb893a5e00b1 100644 > --- a/drivers/gpu/drm/i915/i915_drv.c > +++ b/drivers/gpu/drm/i915/i915_drv.c > @@ -923,23 +923,31 @@ int i915_resume_switcheroo(struct drm_device *dev) > int i915_reset(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = dev->dev_private; > + struct i915_gpu_error *error = &dev_priv->gpu_error; > + unsigned reset_counter; > bool simulated; > int ret; > > intel_reset_gt_powersave(dev); > > mutex_lock(&dev->struct_mutex); > + atomic_andnot(I915_WEDGED, &error->reset_counter); > + reset_counter = atomic_inc_return(&error->reset_counter); > + if (WARN_ON(__i915_reset_in_progress(reset_counter))) { > + ret = -EIO; > + goto error; > + } > > i915_gem_reset(dev); > > - simulated = dev_priv->gpu_error.stop_rings != 0; > + simulated = error->stop_rings != 0; > > ret = intel_gpu_reset(dev); > > /* Also reset the gpu hangman. */ > if (simulated) { > DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); > - dev_priv->gpu_error.stop_rings = 0; > + error->stop_rings = 0; > if (ret == -ENODEV) { > DRM_INFO("Reset not implemented, but ignoring " > "error for simulated gpu hangs\n"); > @@ -952,8 +960,7 @@ int i915_reset(struct drm_device *dev) > > if (ret) { > DRM_ERROR("Failed to reset chip: %i\n", ret); > - mutex_unlock(&dev->struct_mutex); > - return ret; > + goto error; > } > > intel_overlay_reset(dev_priv); > @@ -972,20 +979,14 @@ int i915_reset(struct drm_device *dev) > * was running at the time of the reset (i.e. we weren't VT > * switched away). > */ > - > - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ > - dev_priv->gpu_error.reload_in_reset = true; > - > ret = i915_gem_init_hw(dev); > - > - dev_priv->gpu_error.reload_in_reset = false; > - > - mutex_unlock(&dev->struct_mutex); > if (ret) { > DRM_ERROR("Failed hw init on reset %d\n", ret); > - return ret; > + goto error; > } > > + mutex_unlock(&dev->struct_mutex); > + > /* > * rps/rc6 re-init is necessary to restore state lost after the > * reset and the re-install of gt irqs. Skip for ironlake per > @@ -996,6 +997,11 @@ int i915_reset(struct drm_device *dev) > intel_enable_gt_powersave(dev); > > return 0; > + > +error: > + atomic_or(I915_WEDGED, &error->reset_counter); > + mutex_unlock(&dev->struct_mutex); > + return ret; > } > > static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index d07041c1729d..c24c23d8a0c0 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1384,9 +1384,6 @@ struct i915_gpu_error { > > /* For missed irq/seqno simulation. */ > unsigned int test_irq_rings; > - > - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ > - bool reload_in_reset; > }; > > enum modeset_restore { > @@ -2181,6 +2178,7 @@ struct drm_i915_gem_request { > /** On Which ring this request was generated */ > struct drm_i915_private *i915; > struct intel_engine_cs *ring; > + unsigned reset_counter; > > /** GEM sequence number associated with the previous request, > * when the HWS breadcrumb is equal to this the GPU is processing > @@ -2976,23 +2974,47 @@ i915_gem_find_active_request(struct intel_engine_cs *ring); > > bool i915_gem_retire_requests(struct drm_device *dev); > void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); > -int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, > +int __must_check i915_gem_check_wedge(unsigned reset_counter, > bool interruptible); > > +static inline u32 i915_reset_counter(struct i915_gpu_error *error) > +{ > + return atomic_read(&error->reset_counter); > +} > + > +static inline bool __i915_reset_in_progress(u32 reset) > +{ > + return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); > +} > + > +static inline bool __i915_reset_in_progress_or_wedged(u32 reset) > +{ > + return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); > +} > + > +static inline bool __i915_terminally_wedged(u32 reset) > +{ > + return unlikely(reset & I915_WEDGED); > +} > + > static inline bool i915_reset_in_progress(struct i915_gpu_error *error) > { > - return unlikely(atomic_read(&error->reset_counter) > - & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); > + return __i915_reset_in_progress(i915_reset_counter(error)); > +} > + > +static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) > +{ > + return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); > } > > static inline bool i915_terminally_wedged(struct i915_gpu_error *error) > { > - return atomic_read(&error->reset_counter) & I915_WEDGED; > + return __i915_terminally_wedged(i915_reset_counter(error)); > } > > static inline u32 i915_reset_count(struct i915_gpu_error *error) > { > - return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; > + return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; > } > > static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) > @@ -3025,7 +3047,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, > #define i915_add_request_no_flush(req) \ > __i915_add_request(req, NULL, false) > int __i915_wait_request(struct drm_i915_gem_request *req, > - unsigned reset_counter, > bool interruptible, > s64 *timeout, > struct intel_rps_client *rps); > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index ada461e02718..646d189e23a1 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -85,9 +85,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) > { > int ret; > > -#define EXIT_COND (!i915_reset_in_progress(error) || \ > - i915_terminally_wedged(error)) > - if (EXIT_COND) > + if (!i915_reset_in_progress(error)) > return 0; > > /* > @@ -96,17 +94,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) > * we should simply try to bail out and fail as gracefully as possible. > */ > ret = wait_event_interruptible_timeout(error->reset_queue, > - EXIT_COND, > + !i915_reset_in_progress(error), > 10*HZ); > if (ret == 0) { > DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); > return -EIO; > } else if (ret < 0) { > return ret; > + } else { > + return 0; > } > -#undef EXIT_COND > - > - return 0; > } > > int i915_mutex_lock_interruptible(struct drm_device *dev) > @@ -1110,26 +1107,20 @@ put_rpm: > } > > int > -i915_gem_check_wedge(struct i915_gpu_error *error, > +i915_gem_check_wedge(unsigned reset_counter, > bool interruptible) > { > - if (i915_reset_in_progress(error)) { > + if (__i915_reset_in_progress_or_wedged(reset_counter)) { > /* Non-interruptible callers can't handle -EAGAIN, hence return > * -EIO unconditionally for these. */ > if (!interruptible) > return -EIO; > > /* Recovery complete, but the reset failed ... */ > - if (i915_terminally_wedged(error)) > + if (__i915_terminally_wedged(reset_counter)) > return -EIO; > > - /* > - * Check if GPU Reset is in progress - we need intel_ring_begin > - * to work properly to reinit the hw state while the gpu is > - * still marked as reset-in-progress. Handle this with a flag. > - */ > - if (!error->reload_in_reset) > - return -EAGAIN; > + return -EAGAIN; > } > > return 0; > @@ -1223,7 +1214,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) > /** > * __i915_wait_request - wait until execution of request has finished > * @req: duh! > - * @reset_counter: reset sequence associated with the given request > * @interruptible: do an interruptible wait (normally yes) > * @timeout: in - how long to wait (NULL forever); out - how much time remaining > * > @@ -1238,7 +1228,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) > * errno with remaining time filled in timeout argument. > */ > int __i915_wait_request(struct drm_i915_gem_request *req, > - unsigned reset_counter, > bool interruptible, > s64 *timeout, > struct intel_rps_client *rps) > @@ -1289,12 +1278,12 @@ int __i915_wait_request(struct drm_i915_gem_request *req, > > /* We need to check whether any gpu reset happened in between > * the caller grabbing the seqno and now ... */ > - if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { > - /* ... but upgrade the -EAGAIN to an -EIO if the gpu > - * is truely gone. */ > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); > - if (ret == 0) > - ret = -EAGAIN; > + if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { > + /* As we do not requeue the request over a GPU reset, > + * if one does occur we know that the request is > + * effectively complete. > + */ > + ret = 0; > break; > } > > @@ -1462,13 +1451,7 @@ i915_wait_request(struct drm_i915_gem_request *req) > > BUG_ON(!mutex_is_locked(&dev->struct_mutex)); > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); > - if (ret) > - return ret; > - > - ret = __i915_wait_request(req, > - atomic_read(&dev_priv->gpu_error.reset_counter), > - interruptible, NULL, NULL); > + ret = __i915_wait_request(req, interruptible, NULL, NULL); > if (ret) > return ret; > > @@ -1543,7 +1526,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_gem_request *requests[I915_NUM_RINGS]; > - unsigned reset_counter; > int ret, i, n = 0; > > BUG_ON(!mutex_is_locked(&dev->struct_mutex)); > @@ -1552,12 +1534,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > if (!obj->active) > return 0; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); > - if (ret) > - return ret; > - > - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); > - > if (readonly) { > struct drm_i915_gem_request *req; > > @@ -1579,9 +1555,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, > } > > mutex_unlock(&dev->struct_mutex); > + ret = 0; > for (i = 0; ret == 0 && i < n; i++) > - ret = __i915_wait_request(requests[i], reset_counter, true, > - NULL, rps); > + ret = __i915_wait_request(requests[i], true, NULL, rps); > mutex_lock(&dev->struct_mutex); > > for (i = 0; i < n; i++) { > @@ -2685,6 +2661,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > struct drm_i915_gem_request **req_out) > { > struct drm_i915_private *dev_priv = to_i915(ring->dev); > + unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); > struct drm_i915_gem_request *req; > int ret; > > @@ -2693,6 +2670,10 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > > *req_out = NULL; > > + ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); > + if (ret) > + return ret; > + > req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); > if (req == NULL) > return -ENOMEM; > @@ -2704,6 +2685,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, > kref_init(&req->ref); > req->i915 = dev_priv; > req->ring = ring; > + req->reset_counter = reset_counter; > req->ctx = ctx; > i915_gem_context_reference(req->ctx); > > @@ -3064,11 +3046,9 @@ retire: > int > i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > { > - struct drm_i915_private *dev_priv = dev->dev_private; > struct drm_i915_gem_wait *args = data; > struct drm_i915_gem_object *obj; > struct drm_i915_gem_request *req[I915_NUM_RINGS]; > - unsigned reset_counter; > int i, n = 0; > int ret; > > @@ -3102,7 +3082,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > } > > drm_gem_object_unreference(&obj->base); > - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); > > for (i = 0; i < I915_NUM_RINGS; i++) { > if (obj->last_read_req[i] == NULL) > @@ -3115,11 +3094,23 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > > for (i = 0; i < n; i++) { > if (ret == 0) > - ret = __i915_wait_request(req[i], reset_counter, true, > + ret = __i915_wait_request(req[i], true, > args->timeout_ns > 0 ? &args->timeout_ns : NULL, > file->driver_priv); > + > + /* If the GPU hung before this, report it. Ideally we only > + * report if this request cannot be completed. Currently > + * when we don't mark the guilty party and abort all > + * requests on reset, so just mark all as EIO. > + */ > + if (ret == 0 && > + req[i]->reset_counter != i915_reset_counter(&req[i]->i915->gpu_error)) > + ret = -EIO; > + > i915_gem_request_unreference__unlocked(req[i]); > } > + > + > return ret; > > out: > @@ -3147,7 +3138,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, > if (!i915_semaphore_is_enabled(obj->base.dev)) { > struct drm_i915_private *i915 = to_i915(obj->base.dev); > ret = __i915_wait_request(from_req, > - atomic_read(&i915->gpu_error.reset_counter), > i915->mm.interruptible, > NULL, > &i915->rps.semaphores); > @@ -4076,14 +4066,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > struct drm_i915_file_private *file_priv = file->driver_priv; > unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; > struct drm_i915_gem_request *request, *target = NULL; > - unsigned reset_counter; > int ret; > > ret = i915_gem_wait_for_error(&dev_priv->gpu_error); > if (ret) > return ret; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); > + /* ABI: return -EIO if wedged */ > + ret = i915_gem_check_wedge(i915_reset_counter(&dev_priv->gpu_error), > + false); > if (ret) > return ret; > > @@ -4101,7 +4092,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > > target = request; > } > - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); > if (target) > i915_gem_request_reference(target); > spin_unlock(&file_priv->mm.lock); > @@ -4109,7 +4099,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) > if (target == NULL) > return 0; > > - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); > + ret = __i915_wait_request(target, true, NULL, NULL); > if (ret == 0) > queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); > > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index e88d692583a5..3b2a8fbe0392 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -2434,7 +2434,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, > static void i915_reset_and_wakeup(struct drm_device *dev) > { > struct drm_i915_private *dev_priv = to_i915(dev); > - struct i915_gpu_error *error = &dev_priv->gpu_error; > char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; > char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; > char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; > @@ -2452,7 +2451,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) > * the reset in-progress bit is only ever set by code outside of this > * work we don't need to worry about any other races. > */ > - if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { > + if (i915_reset_in_progress(&dev_priv->gpu_error)) { > DRM_DEBUG_DRIVER("resetting chip\n"); > kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, > reset_event); > @@ -2480,25 +2479,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev) > > intel_runtime_pm_put(dev_priv); > > - if (ret == 0) { > - /* > - * After all the gem state is reset, increment the reset > - * counter and wake up everyone waiting for the reset to > - * complete. > - * > - * Since unlock operations are a one-sided barrier only, > - * we need to insert a barrier here to order any seqno > - * updates before > - * the counter increment. > - */ > - smp_mb__before_atomic(); > - atomic_inc(&dev_priv->gpu_error.reset_counter); > - > + if (ret == 0) > kobject_uevent_env(&dev->primary->kdev->kobj, > KOBJ_CHANGE, reset_done_event); > - } else { > - atomic_or(I915_WEDGED, &error->reset_counter); > - } > > /* > * Note: The wake_up also serves as a memory barrier so that > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index 0743337ac9e8..d77a17bb94d0 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -3289,8 +3289,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) > struct intel_crtc *intel_crtc = to_intel_crtc(crtc); > bool pending; > > - if (i915_reset_in_progress(&dev_priv->gpu_error) || > - intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) > + if (intel_crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) > return false; > > spin_lock_irq(&dev->event_lock); > @@ -10879,8 +10878,7 @@ static bool page_flip_finished(struct intel_crtc *crtc) > struct drm_device *dev = crtc->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > > - if (i915_reset_in_progress(&dev_priv->gpu_error) || > - crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) > + if (crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) > return true; > > /* > @@ -11322,7 +11320,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work) > > if (mmio_flip->req) { > WARN_ON(__i915_wait_request(mmio_flip->req, > - mmio_flip->crtc->reset_counter, > false, NULL, > &mmio_flip->i915->rps.mmioflips)); > i915_gem_request_unreference__unlocked(mmio_flip->req); > @@ -13312,9 +13309,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, > > ret = drm_atomic_helper_prepare_planes(dev, state); > if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { > - u32 reset_counter; > - > - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); > mutex_unlock(&dev->struct_mutex); > > for_each_plane_in_state(state, plane, plane_state, i) { > @@ -13325,8 +13319,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, > continue; > > ret = __i915_wait_request(intel_plane_state->wait_req, > - reset_counter, true, > - NULL, NULL); > + true, NULL, NULL); > > /* Swallow -EIO errors to allow updates during hw lockup. */ > if (ret == -EIO) > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 4ebafab53f30..b40ffc3607e7 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -711,6 +711,14 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, > if (ret) > return ret; > > + /* If the request was completed due to a GPU hang, we want to > + * error out before we continue to emit more commands to the GPU. > + */ > + ret = i915_gem_check_wedge(i915_reset_counter(&req->i915->gpu_error), > + req->i915->mm.interruptible); > + if (ret) > + return ret; > + > ringbuf->space = space; > return 0; > } > @@ -825,11 +833,6 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) > WARN_ON(req == NULL); > dev_priv = req->ring->dev->dev_private; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, > - dev_priv->mm.interruptible); > - if (ret) > - return ret; > - > ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); > if (ret) > return ret; > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index 57d78f264b53..511efe556d73 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -2254,6 +2254,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) > if (ret) > return ret; > > + /* If the request was completed due to a GPU hang, we want to > + * error out before we continue to emit more commands to the GPU. > + */ > + ret = i915_gem_check_wedge(i915_reset_counter(&to_i915(ring->dev)->gpu_error), > + to_i915(ring->dev)->mm.interruptible); > + if (ret) > + return ret; > + > ringbuf->space = space; > return 0; > } > @@ -2286,7 +2294,6 @@ int intel_ring_idle(struct intel_engine_cs *ring) > > /* Make sure we do not trigger any retires */ > return __i915_wait_request(req, > - atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), > to_i915(ring->dev)->mm.interruptible, > NULL, NULL); > } > @@ -2417,11 +2424,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, > ring = req->ring; > dev_priv = ring->dev->dev_private; > > - ret = i915_gem_check_wedge(&dev_priv->gpu_error, > - dev_priv->mm.interruptible); > - if (ret) > - return ret; > - > ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); > if (ret) > return ret; > -- > 2.6.2 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Tue, Dec 01, 2015 at 09:31:31AM +0100, Daniel Vetter wrote: > On Sun, Nov 29, 2015 at 08:48:02AM +0000, Chris Wilson wrote: > > Instead of querying the reset counter before every access to the ring, > > query it the first time we touch the ring, and do a final compare when > > submitting the request. For correctness, we need to then sanitize how > > the reset_counter is incremented to prevent broken submission and > > waiting across resets, in the process fixing the persistent -EIO we > > still see today on failed waits. > > > > v2: Rebase > > v3: Now with added testcase > > v4: Rebase > > > > Testcase: igt/gem_eio > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > > Imo you really can't hide an ABI change in a refactor/optimize patch like > this ... I really think moving check_wedge out of wait_request must be > it's own little patch. Urm, that is the essential part of the patch in this series. -Chris
On Tue, Dec 01, 2015 at 08:47:44AM +0000, Chris Wilson wrote: > On Tue, Dec 01, 2015 at 09:31:31AM +0100, Daniel Vetter wrote: > > On Sun, Nov 29, 2015 at 08:48:02AM +0000, Chris Wilson wrote: > > > Instead of querying the reset counter before every access to the ring, > > > query it the first time we touch the ring, and do a final compare when > > > submitting the request. For correctness, we need to then sanitize how > > > the reset_counter is incremented to prevent broken submission and > > > waiting across resets, in the process fixing the persistent -EIO we > > > still see today on failed waits. > > > > > > v2: Rebase > > > v3: Now with added testcase > > > v4: Rebase > > > > > > Testcase: igt/gem_eio > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > > > > Imo you really can't hide an ABI change in a refactor/optimize patch like > > this ... I really think moving check_wedge out of wait_request must be > > it's own little patch. > > Urm, that is the essential part of the patch in this series. Ok, no it's not. The key is having the request->reset_counter. I was thinking that the wakeup logic had to be the same in both - which is true, but that doesn't restrict how the wakeup is then propagated from i915_wait_request. Getting the wakeup robust though is the meat of the patch. -Chris
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a728ff11e389..8458447ddc17 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4660,7 +4660,7 @@ i915_wedged_get(void *data, u64 *val) struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; - *val = atomic_read(&dev_priv->gpu_error.reset_counter); + *val = i915_terminally_wedged(&dev_priv->gpu_error); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 90faa8e03fca..eb893a5e00b1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -923,23 +923,31 @@ int i915_resume_switcheroo(struct drm_device *dev) int i915_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gpu_error *error = &dev_priv->gpu_error; + unsigned reset_counter; bool simulated; int ret; intel_reset_gt_powersave(dev); mutex_lock(&dev->struct_mutex); + atomic_andnot(I915_WEDGED, &error->reset_counter); + reset_counter = atomic_inc_return(&error->reset_counter); + if (WARN_ON(__i915_reset_in_progress(reset_counter))) { + ret = -EIO; + goto error; + } i915_gem_reset(dev); - simulated = dev_priv->gpu_error.stop_rings != 0; + simulated = error->stop_rings != 0; ret = intel_gpu_reset(dev); /* Also reset the gpu hangman. */ if (simulated) { DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); - dev_priv->gpu_error.stop_rings = 0; + error->stop_rings = 0; if (ret == -ENODEV) { DRM_INFO("Reset not implemented, but ignoring " "error for simulated gpu hangs\n"); @@ -952,8 +960,7 @@ int i915_reset(struct drm_device *dev) if (ret) { DRM_ERROR("Failed to reset chip: %i\n", ret); - mutex_unlock(&dev->struct_mutex); - return ret; + goto error; } intel_overlay_reset(dev_priv); @@ -972,20 +979,14 @@ int i915_reset(struct drm_device *dev) * was running at the time of the reset (i.e. we weren't VT * switched away). */ - - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ - dev_priv->gpu_error.reload_in_reset = true; - ret = i915_gem_init_hw(dev); - - dev_priv->gpu_error.reload_in_reset = false; - - mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("Failed hw init on reset %d\n", ret); - return ret; + goto error; } + mutex_unlock(&dev->struct_mutex); + /* * rps/rc6 re-init is necessary to restore state lost after the * reset and the re-install of gt irqs. Skip for ironlake per @@ -996,6 +997,11 @@ int i915_reset(struct drm_device *dev) intel_enable_gt_powersave(dev); return 0; + +error: + atomic_or(I915_WEDGED, &error->reset_counter); + mutex_unlock(&dev->struct_mutex); + return ret; } static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d07041c1729d..c24c23d8a0c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1384,9 +1384,6 @@ struct i915_gpu_error { /* For missed irq/seqno simulation. */ unsigned int test_irq_rings; - - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ - bool reload_in_reset; }; enum modeset_restore { @@ -2181,6 +2178,7 @@ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct drm_i915_private *i915; struct intel_engine_cs *ring; + unsigned reset_counter; /** GEM sequence number associated with the previous request, * when the HWS breadcrumb is equal to this the GPU is processing @@ -2976,23 +2974,47 @@ i915_gem_find_active_request(struct intel_engine_cs *ring); bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); -int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, +int __must_check i915_gem_check_wedge(unsigned reset_counter, bool interruptible); +static inline u32 i915_reset_counter(struct i915_gpu_error *error) +{ + return atomic_read(&error->reset_counter); +} + +static inline bool __i915_reset_in_progress(u32 reset) +{ + return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); +} + +static inline bool __i915_reset_in_progress_or_wedged(u32 reset) +{ + return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); +} + +static inline bool __i915_terminally_wedged(u32 reset) +{ + return unlikely(reset & I915_WEDGED); +} + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { - return unlikely(atomic_read(&error->reset_counter) - & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); + return __i915_reset_in_progress(i915_reset_counter(error)); +} + +static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) +{ + return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); } static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { - return atomic_read(&error->reset_counter) & I915_WEDGED; + return __i915_terminally_wedged(i915_reset_counter(error)); } static inline u32 i915_reset_count(struct i915_gpu_error *error) { - return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; + return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; } static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) @@ -3025,7 +3047,6 @@ void __i915_add_request(struct drm_i915_gem_request *req, #define i915_add_request_no_flush(req) \ __i915_add_request(req, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, - unsigned reset_counter, bool interruptible, s64 *timeout, struct intel_rps_client *rps); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ada461e02718..646d189e23a1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -85,9 +85,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) { int ret; -#define EXIT_COND (!i915_reset_in_progress(error) || \ - i915_terminally_wedged(error)) - if (EXIT_COND) + if (!i915_reset_in_progress(error)) return 0; /* @@ -96,17 +94,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error) * we should simply try to bail out and fail as gracefully as possible. */ ret = wait_event_interruptible_timeout(error->reset_queue, - EXIT_COND, + !i915_reset_in_progress(error), 10*HZ); if (ret == 0) { DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); return -EIO; } else if (ret < 0) { return ret; + } else { + return 0; } -#undef EXIT_COND - - return 0; } int i915_mutex_lock_interruptible(struct drm_device *dev) @@ -1110,26 +1107,20 @@ put_rpm: } int -i915_gem_check_wedge(struct i915_gpu_error *error, +i915_gem_check_wedge(unsigned reset_counter, bool interruptible) { - if (i915_reset_in_progress(error)) { + if (__i915_reset_in_progress_or_wedged(reset_counter)) { /* Non-interruptible callers can't handle -EAGAIN, hence return * -EIO unconditionally for these. */ if (!interruptible) return -EIO; /* Recovery complete, but the reset failed ... */ - if (i915_terminally_wedged(error)) + if (__i915_terminally_wedged(reset_counter)) return -EIO; - /* - * Check if GPU Reset is in progress - we need intel_ring_begin - * to work properly to reinit the hw state while the gpu is - * still marked as reset-in-progress. Handle this with a flag. - */ - if (!error->reload_in_reset) - return -EAGAIN; + return -EAGAIN; } return 0; @@ -1223,7 +1214,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) /** * __i915_wait_request - wait until execution of request has finished * @req: duh! - * @reset_counter: reset sequence associated with the given request * @interruptible: do an interruptible wait (normally yes) * @timeout: in - how long to wait (NULL forever); out - how much time remaining * @@ -1238,7 +1228,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * errno with remaining time filled in timeout argument. */ int __i915_wait_request(struct drm_i915_gem_request *req, - unsigned reset_counter, bool interruptible, s64 *timeout, struct intel_rps_client *rps) @@ -1289,12 +1278,12 @@ int __i915_wait_request(struct drm_i915_gem_request *req, /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ - if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { - /* ... but upgrade the -EAGAIN to an -EIO if the gpu - * is truely gone. */ - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); - if (ret == 0) - ret = -EAGAIN; + if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { + /* As we do not requeue the request over a GPU reset, + * if one does occur we know that the request is + * effectively complete. + */ + ret = 0; break; } @@ -1462,13 +1451,7 @@ i915_wait_request(struct drm_i915_gem_request *req) BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); - if (ret) - return ret; - - ret = __i915_wait_request(req, - atomic_read(&dev_priv->gpu_error.reset_counter), - interruptible, NULL, NULL); + ret = __i915_wait_request(req, interruptible, NULL, NULL); if (ret) return ret; @@ -1543,7 +1526,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_request *requests[I915_NUM_RINGS]; - unsigned reset_counter; int ret, i, n = 0; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -1552,12 +1534,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, if (!obj->active) return 0; - ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); - if (ret) - return ret; - - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); - if (readonly) { struct drm_i915_gem_request *req; @@ -1579,9 +1555,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, } mutex_unlock(&dev->struct_mutex); + ret = 0; for (i = 0; ret == 0 && i < n; i++) - ret = __i915_wait_request(requests[i], reset_counter, true, - NULL, rps); + ret = __i915_wait_request(requests[i], true, NULL, rps); mutex_lock(&dev->struct_mutex); for (i = 0; i < n; i++) { @@ -2685,6 +2661,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, struct drm_i915_gem_request **req_out) { struct drm_i915_private *dev_priv = to_i915(ring->dev); + unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); struct drm_i915_gem_request *req; int ret; @@ -2693,6 +2670,10 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, *req_out = NULL; + ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); + if (ret) + return ret; + req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); if (req == NULL) return -ENOMEM; @@ -2704,6 +2685,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, kref_init(&req->ref); req->i915 = dev_priv; req->ring = ring; + req->reset_counter = reset_counter; req->ctx = ctx; i915_gem_context_reference(req->ctx); @@ -3064,11 +3046,9 @@ retire: int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; struct drm_i915_gem_request *req[I915_NUM_RINGS]; - unsigned reset_counter; int i, n = 0; int ret; @@ -3102,7 +3082,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } drm_gem_object_unreference(&obj->base); - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); for (i = 0; i < I915_NUM_RINGS; i++) { if (obj->last_read_req[i] == NULL) @@ -3115,11 +3094,23 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) for (i = 0; i < n; i++) { if (ret == 0) - ret = __i915_wait_request(req[i], reset_counter, true, + ret = __i915_wait_request(req[i], true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); + + /* If the GPU hung before this, report it. Ideally we only + * report if this request cannot be completed. Currently + * when we don't mark the guilty party and abort all + * requests on reset, so just mark all as EIO. + */ + if (ret == 0 && + req[i]->reset_counter != i915_reset_counter(&req[i]->i915->gpu_error)) + ret = -EIO; + i915_gem_request_unreference__unlocked(req[i]); } + + return ret; out: @@ -3147,7 +3138,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (!i915_semaphore_is_enabled(obj->base.dev)) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, - atomic_read(&i915->gpu_error.reset_counter), i915->mm.interruptible, NULL, &i915->rps.semaphores); @@ -4076,14 +4066,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_gem_request *request, *target = NULL; - unsigned reset_counter; int ret; ret = i915_gem_wait_for_error(&dev_priv->gpu_error); if (ret) return ret; - ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); + /* ABI: return -EIO if wedged */ + ret = i915_gem_check_wedge(i915_reset_counter(&dev_priv->gpu_error), + false); if (ret) return ret; @@ -4101,7 +4092,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) target = request; } - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); if (target) i915_gem_request_reference(target); spin_unlock(&file_priv->mm.lock); @@ -4109,7 +4099,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (target == NULL) return 0; - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); + ret = __i915_wait_request(target, true, NULL, NULL); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e88d692583a5..3b2a8fbe0392 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2434,7 +2434,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, static void i915_reset_and_wakeup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_gpu_error *error = &dev_priv->gpu_error; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; @@ -2452,7 +2451,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * the reset in-progress bit is only ever set by code outside of this * work we don't need to worry about any other races. */ - if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { + if (i915_reset_in_progress(&dev_priv->gpu_error)) { DRM_DEBUG_DRIVER("resetting chip\n"); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_event); @@ -2480,25 +2479,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev) intel_runtime_pm_put(dev_priv); - if (ret == 0) { - /* - * After all the gem state is reset, increment the reset - * counter and wake up everyone waiting for the reset to - * complete. - * - * Since unlock operations are a one-sided barrier only, - * we need to insert a barrier here to order any seqno - * updates before - * the counter increment. - */ - smp_mb__before_atomic(); - atomic_inc(&dev_priv->gpu_error.reset_counter); - + if (ret == 0) kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_done_event); - } else { - atomic_or(I915_WEDGED, &error->reset_counter); - } /* * Note: The wake_up also serves as a memory barrier so that diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0743337ac9e8..d77a17bb94d0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3289,8 +3289,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool pending; - if (i915_reset_in_progress(&dev_priv->gpu_error) || - intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + if (intel_crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) return false; spin_lock_irq(&dev->event_lock); @@ -10879,8 +10878,7 @@ static bool page_flip_finished(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (i915_reset_in_progress(&dev_priv->gpu_error) || - crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) + if (crtc->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) return true; /* @@ -11322,7 +11320,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work) if (mmio_flip->req) { WARN_ON(__i915_wait_request(mmio_flip->req, - mmio_flip->crtc->reset_counter, false, NULL, &mmio_flip->i915->rps.mmioflips)); i915_gem_request_unreference__unlocked(mmio_flip->req); @@ -13312,9 +13309,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ret = drm_atomic_helper_prepare_planes(dev, state); if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { - u32 reset_counter; - - reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); for_each_plane_in_state(state, plane, plane_state, i) { @@ -13325,8 +13319,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, continue; ret = __i915_wait_request(intel_plane_state->wait_req, - reset_counter, true, - NULL, NULL); + true, NULL, NULL); /* Swallow -EIO errors to allow updates during hw lockup. */ if (ret == -EIO) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4ebafab53f30..b40ffc3607e7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -711,6 +711,14 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, if (ret) return ret; + /* If the request was completed due to a GPU hang, we want to + * error out before we continue to emit more commands to the GPU. + */ + ret = i915_gem_check_wedge(i915_reset_counter(&req->i915->gpu_error), + req->i915->mm.interruptible); + if (ret) + return ret; + ringbuf->space = space; return 0; } @@ -825,11 +833,6 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) WARN_ON(req == NULL); dev_priv = req->ring->dev->dev_private; - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); - if (ret) - return ret; - ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 57d78f264b53..511efe556d73 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2254,6 +2254,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) if (ret) return ret; + /* If the request was completed due to a GPU hang, we want to + * error out before we continue to emit more commands to the GPU. + */ + ret = i915_gem_check_wedge(i915_reset_counter(&to_i915(ring->dev)->gpu_error), + to_i915(ring->dev)->mm.interruptible); + if (ret) + return ret; + ringbuf->space = space; return 0; } @@ -2286,7 +2294,6 @@ int intel_ring_idle(struct intel_engine_cs *ring) /* Make sure we do not trigger any retires */ return __i915_wait_request(req, - atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), to_i915(ring->dev)->mm.interruptible, NULL, NULL); } @@ -2417,11 +2424,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, ring = req->ring; dev_priv = ring->dev->dev_private; - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); - if (ret) - return ret; - ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); if (ret) return ret;
Instead of querying the reset counter before every access to the ring, query it the first time we touch the ring, and do a final compare when submitting the request. For correctness, we need to then sanitize how the reset_counter is incremented to prevent broken submission and waiting across resets, in the process fixing the persistent -EIO we still see today on failed waits. v2: Rebase v3: Now with added testcase v4: Rebase Testcase: igt/gem_eio Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 32 +++++++----- drivers/gpu/drm/i915/i915_drv.h | 39 ++++++++++---- drivers/gpu/drm/i915/i915_gem.c | 90 +++++++++++++++------------------ drivers/gpu/drm/i915/i915_irq.c | 21 +------- drivers/gpu/drm/i915/intel_display.c | 13 ++--- drivers/gpu/drm/i915/intel_lrc.c | 13 +++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++--- 8 files changed, 111 insertions(+), 113 deletions(-)