@@ -472,25 +472,21 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_reserve_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
+ if (!gc)
+ return 0;
+
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- *offset = gc->wq_tail;
-
- /* advance the tail for next workqueue item */
- gc->wq_tail += size;
- gc->wq_tail &= gc->wq_size - 1;
-
/* this will break the loop */
timeout_counter = 0;
ret = 0;
@@ -512,11 +508,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off = 0;
- int ret;
- ret = guc_get_workqueue_space(gc, &wq_off);
- if (ret)
- return ret;
+ wq_off = gc->wq_tail;
+
+ /* advance the tail for next workqueue item */
+ gc->wq_tail += sizeof(struct guc_wq_item);
+ gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -123,5 +123,6 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
+int i915_guc_wq_reserve_space(struct i915_guc_client *client);
#endif
@@ -844,6 +844,8 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
+ int ret;
+
/*
* The first call merely notes the reserve request and is common for
* all back ends. The subsequent localised _begin() call actually
@@ -854,7 +856,13 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
- return intel_logical_ring_begin(request, 0);
+ ret = intel_logical_ring_begin(request, 0);
+ if (ret)
+ return ret;
+
+ ret = i915_guc_wq_reserve_space(request->i915->guc.execbuf_client);
+
+ return ret;
}
/**
From: Alex Dai <yu.dai@intel.com> Split GuC work queue space reserve and submission and move the space reserve to where ring space is reserved. The reason is that failure in intel_logical_ring_advance_and_submit won't be handled. In the case timeout happens, driver can return early in order to handle the error. --- drivers/gpu/drm/i915/i915_guc_submission.c | 21 +++++++++------------ drivers/gpu/drm/i915/intel_guc.h | 1 + drivers/gpu/drm/i915/intel_lrc.c | 10 +++++++++- 3 files changed, 19 insertions(+), 13 deletions(-)