@@ -1721,10 +1721,15 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
continue;
if (ctx_obj) {
- struct page *page = i915_gem_object_get_page(ctx_obj, 1);
- uint32_t *reg_state = kmap_atomic(page);
+ struct page *page ;
+ uint32_t *reg_state;
int j;
+ i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+
+ page = i915_gem_object_get_page(ctx_obj, 1);
+ reg_state = kmap_atomic(page);
+
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
@@ -1736,6 +1741,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
}
kunmap_atomic(reg_state);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+
seq_putc(m, '\n');
}
}
@@ -628,6 +628,7 @@ struct intel_context {
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
+ atomic_t unpin_count;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -2491,12 +2491,23 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
+ struct intel_context *ctx = request->ctx;
+
list_del(&request->list);
i915_gem_request_remove_from_client(request);
- if (request->ctx)
- i915_gem_context_unreference(request->ctx);
+ if (ctx) {
+ struct intel_engine_cs *ring = request->ring;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ atomic_t *unpin_count = &ctx->engine[ring->id].unpin_count;
+ if (ctx_obj) {
+ if (atomic_dec_return(unpin_count) == 0 &&
+ ctx != ring->default_context)
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ }
+ i915_gem_context_unreference(ctx);
+ }
kfree(request);
}
@@ -2551,6 +2562,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
}
/*
+ * Clear the execlists queue up before freeing the requests, as those
+ * are the ones that keep the context and ringbuffer backing objects
+ * pinned in place.
+ */
+ while (!list_empty(&ring->execlist_queue)) {
+ struct intel_ctx_submit_request *submit_req;
+
+ submit_req = list_first_entry(&ring->execlist_queue,
+ struct intel_ctx_submit_request,
+ execlist_link);
+ list_del(&submit_req->execlist_link);
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(submit_req->ctx);
+ kfree(submit_req);
+ }
+
+ /*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
@@ -2567,18 +2595,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_free_request(request);
}
- while (!list_empty(&ring->execlist_queue)) {
- struct intel_ctx_submit_request *submit_req;
-
- submit_req = list_first_entry(&ring->execlist_queue,
- struct intel_ctx_submit_request,
- execlist_link);
- list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
- i915_gem_context_unreference(submit_req->ctx);
- kfree(submit_req);
- }
-
/* These may not have been flush before the reset, do so now */
kfree(ring->preallocated_lazy_request);
ring->preallocated_lazy_request = NULL;
@@ -139,8 +139,6 @@
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_ALIGN 4096
-
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -767,16 +765,30 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
+ int ret;
+
if (ring->outstanding_lazy_seqno)
return 0;
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ atomic_t *unpin_count = &ctx->engine[ring->id].unpin_count;
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
+ if (atomic_inc_return(unpin_count) == 1 &&
+ ctx != ring->default_context) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj,
+ GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ }
+
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
@@ -1610,12 +1622,15 @@ void intel_lr_context_free(struct intel_context *ctx)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
if (ctx_obj) {
+ struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+ struct intel_engine_cs *ring = ringbuf->ring;
+
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
+ if (ctx == ring->default_context)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
}
}
@@ -1658,6 +1673,7 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
+ const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
@@ -1677,18 +1693,21 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return ret;
}
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
- drm_gem_object_unreference(&ctx_obj->base);
- return ret;
+ if (is_global_default_ctx) {
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
+ drm_gem_object_unreference(&ctx_obj->base);
+ return ret;
+ }
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
- i915_gem_object_ggtt_unpin(ctx_obj);
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
ret = -ENOMEM;
return ret;
@@ -1744,7 +1763,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
error:
kfree(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
+ if (is_global_default_ctx)
+ i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#define GEN8_LR_CONTEXT_ALIGN 4096
+
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)