@@ -449,8 +449,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
+ i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
intel_guc_ucode_fini(dev);
@@ -1188,8 +1188,8 @@ int i915_driver_unload(struct drm_device *dev)
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
+ i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev);
@@ -3018,7 +3018,7 @@ int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
@@ -4823,7 +4823,7 @@ i915_gem_init_hw(struct drm_device *dev)
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
goto out;
}
@@ -4836,7 +4836,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
goto out;
}
@@ -4844,7 +4844,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
goto out;
}
@@ -4922,7 +4922,7 @@ out_unlock:
}
void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
@@ -4931,13 +4931,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev);
+ if (i915.enable_execlists) {
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev);
+ }
}
static void
@@ -226,9 +226,8 @@ enum {
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj);
-
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring);
+static void lrc_teardown_hardware_status_page(struct intel_engine_cs *ring);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -1473,8 +1472,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
u8 next_context_status_buffer_hw;
- lrc_setup_hardware_status_page(ring,
- ring->default_context->engine[ring->id].state);
+ lrc_setup_hardware_status_page(ring);
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1899,10 +1897,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
- if (ring->status_page.obj) {
- kunmap(sg_page(ring->status_page.obj->pages->sgl));
- ring->status_page.obj = NULL;
- }
+ /* Status page should have gone already */
+ WARN_ON(ring->status_page.page_addr);
+ WARN_ON(ring->status_page.obj);
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
@@ -2372,6 +2369,11 @@ void intel_lr_context_free(struct intel_context *ctx)
continue;
if (ctx->is_global_default) {
+ /*
+ * The HWSP is part of the default context
+ * object in LRC mode.
+ */
+ lrc_teardown_hardware_status_page(ringbuf->ring);
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
@@ -2406,24 +2408,45 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj)
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *dctx_obj = dctx->engine[ring->id].state;
+ u64 dctx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
struct page *page;
- /* The HWSP is part of the default context object in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
- + LRC_PPHWSP_PN * PAGE_SIZE;
- page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
+ /*
+ * The HWSP is part of the default context object in LRC mode.
+ * Note that it doesn't contribute to the refcount!
+ */
+ page = i915_gem_object_get_page(dctx_obj, LRC_PPHWSP_PN);
ring->status_page.page_addr = kmap(page);
- ring->status_page.obj = default_ctx_obj;
+ ring->status_page.gfx_addr = dctx_addr + LRC_PPHWSP_PN * PAGE_SIZE;
+ ring->status_page.obj = dctx_obj;
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
+/* This should be called before the default context is destroyed */
+static void lrc_teardown_hardware_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_gem_object *dctx_obj = ring->status_page.obj;
+ struct page *page;
+
+ WARN_ON(!dctx_obj);
+
+ if (ring->status_page.page_addr) {
+ page = i915_gem_object_get_page(dctx_obj, LRC_PPHWSP_PN);
+ kunmap(page);
+ ring->status_page.page_addr = NULL;
+ }
+
+ ring->status_page.obj = NULL;
+}
+
/**
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
Swap the order of context & engine cleanup, so that it is now contexts, then engines. This allows the context clean up code to do things like confirm that ring->dev->struct_mutex is locked without a NULL pointer dereference. This came about as a result of the 'intel_ring_initialized() must be simple and inline' patch now using ring->dev as an initialised flag. Rename the cleanup function to reflect what it actually does. Also clean up some very annoying whitespace issues at the same time. Previous code did a kunmap() on the wrong page, and didn't account for the fact that the HWSP and the default context are the different offsets within the same object. v2: Also make the fix in i915_load_modeset_init, not just in i915_driver_unload (Chris Wilson) v3: Folded in Dave Gordon's fix for HWSP kunmap issues. v4: Rebase over Dave Gordon's various cleanups Signed-off-by: Nick Hoath <nicholas.hoath@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Gordon <david.s.gordon@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_dma.c | 4 +-- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++-------- drivers/gpu/drm/i915/intel_lrc.c | 55 ++++++++++++++++++++++++++++------------ 4 files changed, 54 insertions(+), 30 deletions(-)