@@ -2676,7 +2676,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference__unlocked(req[i]);
+ i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3688,7 +3688,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- i915_gem_request_unreference__unlocked(target);
+ i915_gem_request_unreference(target);
return ret;
}
@@ -73,6 +73,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
goto err;
kref_init(&req->ref);
+ INIT_LIST_HEAD(&req->list);
req->i915 = dev_priv;
req->ring = ring;
req->reset_counter = reset_counter;
@@ -117,13 +118,6 @@ err:
return ret;
}
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
- intel_ring_reserved_space_cancel(req->ringbuf);
-
- i915_gem_request_unreference(req);
-}
-
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
@@ -168,6 +162,27 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
request->pid = NULL;
}
+static void __i915_gem_request_release(struct drm_i915_gem_request *request)
+{
+ list_del_init(&request->list);
+
+ i915_gem_request_remove_from_client(request);
+
+ if (i915.enable_execlists) {
+ if (request->ctx != request->ring->default_context)
+ intel_lr_context_unpin(request);
+ }
+ i915_gem_context_unreference(request->ctx);
+
+ i915_gem_request_unreference(request);
+}
+
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+ intel_ring_reserved_space_cancel(req->ringbuf);
+ __i915_gem_request_release(req);
+}
+
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
trace_i915_gem_request_retire(request);
@@ -181,11 +196,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* completion order.
*/
request->ringbuf->last_retired_head = request->postfix;
-
- list_del_init(&request->list);
- i915_gem_request_remove_from_client(request);
-
- i915_gem_request_unreference(request);
+ __i915_gem_request_release(request);
}
void
@@ -492,22 +503,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
void i915_gem_request_free(struct kref *req_ref)
{
- struct drm_i915_gem_request *req = container_of(req_ref,
- typeof(*req), ref);
- struct intel_context *ctx = req->ctx;
-
- if (req->file_priv)
- i915_gem_request_remove_from_client(req);
-
- if (ctx) {
- if (i915.enable_execlists) {
- if (ctx != req->ring->default_context)
- intel_lr_context_unpin(req);
- }
-
- i915_gem_context_unreference(ctx);
- }
-
+ struct drm_i915_gem_request *req =
+ container_of(req_ref, typeof(*req), ref);
kmem_cache_free(req->i915->requests, req);
}
-
@@ -155,23 +155,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
- struct drm_device *dev;
-
- if (!req)
- return;
-
- dev = req->ring->dev;
- if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
-}
-
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
@@ -324,7 +324,7 @@ static int intel_breadcrumbs_signaller(void *arg)
if (signal_complete(signal)) {
intel_engine_remove_wait(engine, &signal->wait);
- i915_gem_request_unreference__unlocked(signal->request);
+ i915_gem_request_unreference(signal->request);
spin_lock(&engine->breadcrumbs.lock);
rb_erase(&signal->node, &engine->breadcrumbs.signals);
@@ -11367,7 +11367,7 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
WARN_ON(__i915_wait_request(mmio_flip->req,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
- i915_gem_request_unreference__unlocked(mmio_flip->req);
+ i915_gem_request_unreference(mmio_flip->req);
}
/* For framebuffer backed by dmabuf, wait for fence */
@@ -7196,7 +7196,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
gen6_rps_boost(to_i915(req->ring->dev), NULL,
req->emitted_jiffies);
- i915_gem_request_unreference__unlocked(req);
+ i915_gem_request_unreference(req);
kfree(boost);
}
If we move the release of the GEM request (i.e. decoupling it from the various lists used for client and context tracking) after it is complete (either by the GPU retiring the request, or by the caller cancelling the request), we can remove the requirement that the final unreference of the GEM request need to be under the struct_mutex. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem.c | 4 +-- drivers/gpu/drm/i915/i915_gem_request.c | 54 +++++++++++++++----------------- drivers/gpu/drm/i915/i915_gem_request.h | 14 --------- drivers/gpu/drm/i915/intel_breadcrumbs.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 2 +- 6 files changed, 30 insertions(+), 48 deletions(-)