From patchwork Fri Nov 20 12:43:44 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 7667441 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 47CCD9F1C2 for ; Fri, 20 Nov 2015 12:44:18 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id E573A2041A for ; Fri, 20 Nov 2015 12:44:16 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 84852203EC for ; Fri, 20 Nov 2015 12:44:15 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 092F86EAC7; Fri, 20 Nov 2015 04:44:15 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from fireflyinternet.com (mail.fireflyinternet.com [87.106.93.118]) by gabe.freedesktop.org (Postfix) with ESMTP id 257C46E2D0 for ; Fri, 20 Nov 2015 04:44:12 -0800 (PST) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.65.138; Received: from haswell.alporthouse.com (unverified [78.156.65.138]) by fireflyinternet.com (Firefly Internet (M1)) with ESMTP id 48539241-1500048 for multiple; Fri, 20 Nov 2015 12:44:28 +0000 Received: by haswell.alporthouse.com (sSMTP sendmail emulation); Fri, 20 Nov 2015 12:44:07 +0000 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Fri, 20 Nov 2015 12:43:44 +0000 Message-Id: <1448023432-10726-4-git-send-email-chris@chris-wilson.co.uk> X-Mailer: git-send-email 2.6.2 In-Reply-To: <1448023432-10726-1-git-send-email-chris@chris-wilson.co.uk> References: <1448023432-10726-1-git-send-email-chris@chris-wilson.co.uk> X-Originating-IP: 78.156.65.138 X-Country: code=GB country="United Kingdom" ip=78.156.65.138 Subject: [Intel-gfx] [PATCH 04/12] drm/i915: Unify intel_ring_begin() X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.8 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Combine the near identical implementations of intel_logical_ring_begin() and intel_ring_begin() - the only difference is that the logical wait has to check for a matching ring (which is assumed by legacy). Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_lrc.c | 149 ++------------------------------ drivers/gpu/drm/i915/intel_lrc.h | 1 - drivers/gpu/drm/i915/intel_mocs.c | 12 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 115 ++++++++++++------------ 4 files changed, 71 insertions(+), 206 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0db23c474045..02f798e4c726 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -646,48 +646,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request return 0; } -static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, - int bytes) -{ - struct intel_ringbuffer *ringbuf = req->ringbuf; - struct intel_engine_cs *ring = req->ring; - struct drm_i915_gem_request *target; - unsigned space; - int ret; - - if (intel_ring_space(ringbuf) >= bytes) - return 0; - - /* The whole point of reserving space is to not wait! */ - WARN_ON(ringbuf->reserved_in_use); - - list_for_each_entry(target, &ring->request_list, list) { - /* - * The request queue is per-engine, so can contain requests - * from multiple ringbuffers. Here, we must ignore any that - * aren't from the ringbuffer we're considering. - */ - if (target->ringbuf != ringbuf) - continue; - - /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ringbuf->tail, - ringbuf->size); - if (space >= bytes) - break; - } - - if (WARN_ON(&target->list == &ring->request_list)) - return -ENOSPC; - - ret = i915_wait_request(target); - if (ret) - return ret; - - ringbuf->space = space; - return 0; -} - /* * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload * @request: Request to advance the logical ringbuffer of. @@ -708,97 +666,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) execlists_context_queue(request); } -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) -{ - int rem = ringbuf->size - ringbuf->tail; - memset(ringbuf->virtual_start + ringbuf->tail, 0, rem); - - ringbuf->tail = 0; - intel_ring_update_space(ringbuf); -} - -static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) -{ - struct intel_ringbuffer *ringbuf = req->ringbuf; - int remain_usable = ringbuf->effective_size - ringbuf->tail; - int remain_actual = ringbuf->size - ringbuf->tail; - int ret, total_bytes, wait_bytes = 0; - bool need_wrap = false; - - if (ringbuf->reserved_in_use) - total_bytes = bytes; - else - total_bytes = bytes + ringbuf->reserved_size; - - if (unlikely(bytes > remain_usable)) { - /* - * Not enough space for the basic request. So need to flush - * out the remainder and then wait for base + reserved. - */ - wait_bytes = remain_actual + total_bytes; - need_wrap = true; - } else { - if (unlikely(total_bytes > remain_usable)) { - /* - * The base request will fit but the reserved space - * falls off the end. So only need to to wait for the - * reserved size after flushing out the remainder. - */ - wait_bytes = remain_actual + ringbuf->reserved_size; - need_wrap = true; - } else if (total_bytes > ringbuf->space) { - /* No wrapping required, just waiting. */ - wait_bytes = total_bytes; - } - } - - if (wait_bytes) { - ret = logical_ring_wait_for_space(req, wait_bytes); - if (unlikely(ret)) - return ret; - - if (need_wrap) - __wrap_ring_buffer(ringbuf); - } - - return 0; -} - -/** - * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands - * - * @request: The request to start some new work for - * @ctx: Logical ring context whose ringbuffer is being prepared. - * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. - * - * The ringbuffer might not be ready to accept the commands right away (maybe it needs to - * be wrapped, or wait a bit for the tail to be updated). This function takes care of that - * and also preallocates a request (every workload submission is still mediated through - * requests, same as it did with legacy ringbuffer submission). - * - * Return: non-zero if the ringbuffer is not ready to be written to. - */ -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) -{ - struct drm_i915_private *dev_priv; - int ret; - - WARN_ON(req == NULL); - dev_priv = req->i915; - - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); - if (ret) - return ret; - - ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); - if (ret) - return ret; - - req->ringbuf->space -= num_dwords * sizeof(uint32_t); - return 0; -} - int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) { /* @@ -811,7 +678,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) */ intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - return intel_logical_ring_begin(request, 0); + return intel_ring_begin(request, 0); } /** @@ -881,7 +748,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, if (ring == &dev_priv->ring[RCS] && instp_mode != dev_priv->relative_constants_mode) { - ret = intel_logical_ring_begin(params->request, 4); + ret = intel_ring_begin(params->request, 4); if (ret) return ret; @@ -1035,7 +902,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - ret = intel_logical_ring_begin(req, w->count * 2 + 2); + ret = intel_ring_begin(req, w->count * 2 + 2); if (ret) return ret; @@ -1472,7 +1339,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; - ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); + ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); if (ret) return ret; @@ -1516,7 +1383,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); } - ret = intel_logical_ring_begin(req, 4); + ret = intel_ring_begin(req, 4); if (ret) return ret; @@ -1577,7 +1444,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, uint32_t cmd; int ret; - ret = intel_logical_ring_begin(request, 4); + ret = intel_ring_begin(request, 4); if (ret) return ret; @@ -1644,7 +1511,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; - ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); + ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); if (ret) return ret; @@ -1690,7 +1557,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) * used as a workaround for not being allowed to do lite * restore with HEAD==TAIL (WaIdleLiteRestore). */ - ret = intel_logical_ring_begin(request, 8); + ret = intel_ring_begin(request, 8); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 861668919e5a..5402eca78a07 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -42,7 +42,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); void intel_logical_ring_stop(struct intel_engine_cs *ring); void intel_logical_ring_cleanup(struct intel_engine_cs *ring); int intel_logical_rings_init(struct drm_device *dev); -int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 399a131a94b6..ac0a982bbf55 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -181,11 +181,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) return -ENODEV; - ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); - if (ret) { - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); + ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); + if (ret) return ret; - } intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); @@ -238,11 +236,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) return -ENODEV; - ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); - if (ret) { - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); + ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); + if (ret) return ret; - } intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b3de8b1fde2f..fbee790ddaf0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2143,46 +2143,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) ring->buffer = NULL; } -static int ring_wait_for_space(struct intel_engine_cs *ring, int n) -{ - struct intel_ringbuffer *ringbuf = ring->buffer; - struct drm_i915_gem_request *request; - unsigned space; - int ret; - - if (intel_ring_space(ringbuf) >= n) - return 0; - - /* The whole point of reserving space is to not wait! */ - WARN_ON(ringbuf->reserved_in_use); - - list_for_each_entry(request, &ring->request_list, list) { - space = __intel_ring_space(request->postfix, ringbuf->tail, - ringbuf->size); - if (space >= n) - break; - } - - if (WARN_ON(&request->list == &ring->request_list)) - return -ENOSPC; - - ret = i915_wait_request(request); - if (ret) - return ret; - - ringbuf->space = space; - return 0; -} - -static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) -{ - int rem = ringbuf->size - ringbuf->tail; - memset(ringbuf->virtual_start + ringbuf->tail, 0, rem); - - ringbuf->tail = 0; - intel_ring_update_space(ringbuf); -} - int intel_ring_idle(struct intel_engine_cs *ring) { struct drm_i915_gem_request *req; @@ -2270,9 +2230,59 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) ringbuf->reserved_in_use = false; } -static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) +static int wait_for_space(struct drm_i915_gem_request *req, int bytes) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_engine_cs *ring = req->ring; + struct drm_i915_gem_request *target; + unsigned space; + int ret; + + if (intel_ring_space(ringbuf) >= bytes) + return 0; + + /* The whole point of reserving space is to not wait! */ + WARN_ON(ringbuf->reserved_in_use); + + list_for_each_entry(target, &ring->request_list, list) { + /* + * The request queue is per-engine, so can contain requests + * from multiple ringbuffers. Here, we must ignore any that + * aren't from the ringbuffer we're considering. + */ + if (target->ringbuf != ringbuf) + continue; + + /* Would completion of this request free enough space? */ + space = __intel_ring_space(target->postfix, ringbuf->tail, + ringbuf->size); + if (space >= bytes) + break; + } + + if (WARN_ON(&target->list == &ring->request_list)) + return -ENOSPC; + + ret = i915_wait_request(target); + if (ret) + return ret; + + ringbuf->space = space; + return 0; +} + +static void ring_wrap(struct intel_ringbuffer *ringbuf) +{ + int rem = ringbuf->size - ringbuf->tail; + memset(ringbuf->virtual_start + ringbuf->tail, 0, rem); + + ringbuf->tail = 0; + intel_ring_update_space(ringbuf); +} + +static int ring_prepare(struct drm_i915_gem_request *req, int bytes) +{ + struct intel_ringbuffer *ringbuf = req->ringbuf; int remain_usable = ringbuf->effective_size - ringbuf->tail; int remain_actual = ringbuf->size - ringbuf->tail; int ret, total_bytes, wait_bytes = 0; @@ -2306,38 +2316,31 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) } if (wait_bytes) { - ret = ring_wait_for_space(ring, wait_bytes); + ret = wait_for_space(req, wait_bytes); if (unlikely(ret)) return ret; if (need_wrap) - __wrap_ring_buffer(ringbuf); + ring_wrap(ringbuf); } return 0; } -int intel_ring_begin(struct drm_i915_gem_request *req, - int num_dwords) +int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { - struct intel_engine_cs *ring; - struct drm_i915_private *dev_priv; int ret; - WARN_ON(req == NULL); - ring = req->ring; - dev_priv = req->i915; - - ret = i915_gem_check_wedge(&dev_priv->gpu_error, - dev_priv->mm.interruptible); + ret = i915_gem_check_wedge(&req->i915->gpu_error, + req->i915->mm.interruptible); if (ret) return ret; - ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); + ret = ring_prepare(req, num_dwords * sizeof(uint32_t)); if (ret) return ret; - ring->buffer->space -= num_dwords * sizeof(uint32_t); + req->ringbuf->space -= num_dwords * sizeof(uint32_t); return 0; }