@@ -2161,6 +2161,10 @@ struct drm_i915_gem_object {
struct work_struct *work;
} userptr;
};
+
+ /** Support for automatic CPU side mapping of object */
+ int (*mmap)(struct drm_i915_gem_object *obj, bool unmap);
+ void *mappable;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -3222,6 +3222,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
if (vma->pin_count)
return -EBUSY;
+ if (obj->mmap)
+ obj->mmap(obj, true);
+
BUG_ON(obj->pages == NULL);
if (wait) {
@@ -3517,6 +3517,14 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
vma->bound |= bind_flags;
+ if (vma->obj->mmap) {
+ ret = vma->obj->mmap(vma->obj, false);
+ if (ret) {
+ i915_vma_unbind(vma);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -719,6 +719,18 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_advance(request->ringbuf);
+ /* Push the hw context on to the active list */
+ i915_vma_move_to_active(
+ i915_gem_obj_to_ggtt(
+ request->ctx->engine[ring->id].state),
+ request);
+
+ /* Push the ringbuf on to the active list */
+ i915_vma_move_to_active(
+ i915_gem_obj_to_ggtt(
+ request->ctx->engine[ring->id].ringbuf->obj),
+ request);
+
request->tail = request->ringbuf->tail;
if (intel_ring_stopped(ring))
@@ -987,10 +999,15 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
if (ret)
return ret;
- ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ ret = i915_gem_obj_ggtt_pin(ringbuf->obj, PAGE_SIZE,
+ PIN_MAPPABLE);
if (ret)
goto unpin_ctx_obj;
+ ret = i915_gem_object_set_to_gtt_domain(ringbuf->obj, true);
+ if (ret)
+ goto unpin_rb_obj;
+
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@@ -999,6 +1016,8 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
return ret;
+unpin_rb_obj:
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
@@ -1033,7 +1052,7 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--rq->ctx->engine[ring->id].pin_count == 0) {
- intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
@@ -2351,7 +2370,7 @@ void intel_lr_context_free(struct intel_context *ctx)
struct intel_engine_cs *ring = ringbuf->ring;
if (ctx == ring->default_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
i915_gem_object_ggtt_unpin(ctx_obj);
}
WARN_ON(ctx->engine[ring->id].pin_count);
@@ -2518,5 +2537,8 @@ void intel_lr_context_reset(struct drm_device *dev,
ringbuf->head = 0;
ringbuf->tail = 0;
+
+ i915_gem_object_ggtt_unpin(
+ ctx->engine[ring->id].state);
}
}
@@ -2002,38 +2002,35 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static int intel_mmap_ringbuffer_obj(struct drm_i915_gem_object *obj,
+ bool unmap)
{
- iounmap(ringbuf->virtual_start);
- ringbuf->virtual_start = NULL;
- i915_gem_object_ggtt_unpin(ringbuf->obj);
-}
-
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = ringbuf->obj;
- int ret;
-
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
- if (ret)
- return ret;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret) {
- i915_gem_object_ggtt_unpin(obj);
- return ret;
- }
-
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- i915_gem_object_ggtt_unpin(obj);
- return -EINVAL;
+ int ret = 0;
+ struct intel_ringbuffer *ringbuf =
+ (struct intel_ringbuffer *)obj->mappable;
+
+ if (!unmap) {
+ struct drm_device *dev = ringbuf->ring->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ WARN_ON(ringbuf->virtual_start != NULL);
+ if (ringbuf->virtual_start == NULL) {
+ ringbuf->virtual_start = ioremap_wc(
+ dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(obj),
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ i915_gem_object_ggtt_unpin(obj);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (!i915_gem_obj_is_pinned(ringbuf->obj)) {
+ iounmap(ringbuf->virtual_start);
+ ringbuf->virtual_start = NULL;
+ }
}
-
- return 0;
+ return ret;
}
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2060,6 +2057,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
ringbuf->obj = obj;
+ obj->mmap = intel_mmap_ringbuffer_obj;
+ obj->mappable = ringbuf;
+
return 0;
}
@@ -2138,7 +2138,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ ret = i915_gem_obj_ggtt_pin(ringbuf->obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
@@ -2146,12 +2146,19 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
+ ret = i915_gem_object_set_to_gtt_domain(ringbuf->obj, true);
+ if (ret)
+ goto error_unpin;
+
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
return 0;
+error_unpin:
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ intel_destroy_ringbuffer_obj(ringbuf);
error:
intel_ringbuffer_free(ringbuf);
ring->buffer = NULL;
@@ -2170,7 +2177,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- intel_unpin_ringbuffer_obj(ring->buffer);
+ i915_gem_object_ggtt_unpin(ring->buffer->obj);
intel_ringbuffer_free(ring->buffer);
ring->buffer = NULL;
@@ -421,9 +421,6 @@ intel_write_status_page(struct intel_engine_cs *ring,
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
Shovel all context related objects through the active queue and obj management. - Added callback in vma_(un)bind to add CPU (un)mapping at same time if desired - Inserted LRC hw context & ringbuf to vma active list Issue: VIZ-4277 Signed-off-by: Nick Hoath <nicholas.hoath@intel.com> --- drivers/gpu/drm/i915/i915_drv.h | 4 ++ drivers/gpu/drm/i915/i915_gem.c | 3 ++ drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++ drivers/gpu/drm/i915/intel_lrc.c | 28 +++++++++++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 71 ++++++++++++++++++--------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 -- 6 files changed, 79 insertions(+), 38 deletions(-)