From patchwork Fri Nov 20 15:31:23 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 7669191 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id EE1E39F2EC for ; Fri, 20 Nov 2015 15:31:39 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id A29F32041E for ; Fri, 20 Nov 2015 15:31:38 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 617E22041C for ; Fri, 20 Nov 2015 15:31:37 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id B8CA26EAD4; Fri, 20 Nov 2015 07:31:36 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from fireflyinternet.com (mail.fireflyinternet.com [87.106.93.118]) by gabe.freedesktop.org (Postfix) with ESMTP id 8094E6EAD4 for ; Fri, 20 Nov 2015 07:31:35 -0800 (PST) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.65.138; Received: from haswell.alporthouse.com (unverified [78.156.65.138]) by fireflyinternet.com (Firefly Internet (M1)) with ESMTP id 48547553-1500048 for multiple; Fri, 20 Nov 2015 15:31:48 +0000 Received: by haswell.alporthouse.com (sSMTP sendmail emulation); Fri, 20 Nov 2015 15:31:25 +0000 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Fri, 20 Nov 2015 15:31:23 +0000 Message-Id: <1448033483-3884-1-git-send-email-chris@chris-wilson.co.uk> X-Mailer: git-send-email 2.6.2 In-Reply-To: <20151120144153.GL4437@intel.com> References: <20151120144153.GL4437@intel.com> MIME-Version: 1.0 X-Originating-IP: 78.156.65.138 X-Country: code=GB country="United Kingdom" ip=78.156.65.138 Subject: [Intel-gfx] [PATCH v3] drm/i915: Eliminate vmap overhead for cmd parser X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.8 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP With a little complexity to handle cmds straddling page boundaries, we can completely avoiding having to vmap the batch and the shadow batch objects whilst running the command parser. On ivb i7-3720MQ: x11perf -dot before 54.3M, after 53.2M (max 203M) glxgears before 7110 fps, after 7300 fps (max 7860 fps) Before: Time to blt 16384 bytes x 1: 12.400µs, 1.2GiB/s Time to blt 16384 bytes x 4096: 3.055µs, 5.0GiB/s After: Time to blt 16384 bytes x 1: 8.600µs, 1.8GiB/s Time to blt 16384 bytes x 4096: 2.456µs, 6.2GiB/s Removing the vmap is mostly a win, except we lose in a few cases where the batch size is greater than a page due to the extra complexity (loss of a simple cache efficient large copy, and boundary handling). v2: Reorder so that we do check oacontrol remaining set at end-of-batch v3: Stage the copy through a temporary page so that we only copy into dst commands that have been verified. v4: Move the batch offset/length validation to execbuf. Signed-off-by: Chris Wilson Cc: Ville Syrjälä --- drivers/gpu/drm/i915/i915_cmd_parser.c | 258 ++++++++++++++++----------------- 1 file changed, 127 insertions(+), 131 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 814d894ed925..86910e17505a 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -860,100 +860,6 @@ find_reg(const struct drm_i915_reg_descriptor *table, return NULL; } -static u32 *vmap_batch(struct drm_i915_gem_object *obj, - unsigned start, unsigned len) -{ - int i; - void *addr = NULL; - struct sg_page_iter sg_iter; - int first_page = start >> PAGE_SHIFT; - int last_page = (len + start + 4095) >> PAGE_SHIFT; - int npages = last_page - first_page; - struct page **pages; - - pages = drm_malloc_ab(npages, sizeof(*pages)); - if (pages == NULL) { - DRM_DEBUG_DRIVER("Failed to get space for pages\n"); - goto finish; - } - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { - pages[i++] = sg_page_iter_page(&sg_iter); - if (i == npages) - break; - } - - addr = vmap(pages, i, 0, PAGE_KERNEL); - if (addr == NULL) { - DRM_DEBUG_DRIVER("Failed to vmap pages\n"); - goto finish; - } - -finish: - if (pages) - drm_free_large(pages); - return (u32*)addr; -} - -/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ -static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, - struct drm_i915_gem_object *src_obj, - u32 batch_start_offset, - u32 batch_len) -{ - int needs_clflush = 0; - void *src_base, *src; - void *dst = NULL; - int ret; - - if (batch_len > dest_obj->base.size || - batch_len + batch_start_offset > src_obj->base.size) - return ERR_PTR(-E2BIG); - - if (WARN_ON(dest_obj->pages_pin_count == 0)) - return ERR_PTR(-ENODEV); - - ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); - if (ret) { - DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); - return ERR_PTR(ret); - } - - src_base = vmap_batch(src_obj, batch_start_offset, batch_len); - if (!src_base) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); - ret = -ENOMEM; - goto unpin_src; - } - - ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); - goto unmap_src; - } - - dst = vmap_batch(dest_obj, 0, batch_len); - if (!dst) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); - ret = -ENOMEM; - goto unmap_src; - } - - src = src_base + offset_in_page(batch_start_offset); - if (needs_clflush) - drm_clflush_virt_range(src, batch_len); - - memcpy(dst, src, batch_len); - -unmap_src: - vunmap(src_base); -unpin_src: - i915_gem_object_unpin_pages(src_obj); - - return ret ? ERR_PTR(ret) : dst; -} - /** * i915_needs_cmd_parser() - should a given ring use software command parsing? * @ring: the ring in question @@ -1097,6 +1003,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, } #define LENGTH_BIAS 2 +#define MAX_PARTIAL 256 /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations @@ -1120,39 +1027,91 @@ int i915_parse_cmds(struct intel_engine_cs *ring, u32 batch_len, bool is_master) { - u32 *cmd, *batch_base, *batch_end; + const struct drm_i915_cmd_descriptor *desc; + unsigned dst_iter, src_iter; + int needs_clflush = 0; + struct get_page rewind; + void *src, *dst, *tmp; + u32 partial, length; + unsigned in, out; struct drm_i915_cmd_descriptor default_desc = { 0 }; bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ - int ret = 0; + int ret; - batch_base = copy_batch(shadow_batch_obj, batch_obj, - batch_start_offset, batch_len); - if (IS_ERR(batch_base)) { - DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); - return PTR_ERR(batch_base); + if (WARN_ON(shadow_batch_obj->pages_pin_count == 0)) + return -ENODEV; + + ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); + if (ret) { + DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); + return ret; } - /* - * We use the batch length as size because the shadow object is as - * large or larger and copy_batch() will write MI_NOPs to the extra - * space. Parsing should be faster in some cases this way. + ret = i915_gem_object_set_to_cpu_domain(shadow_batch_obj, true); + if (ret) { + DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); + goto unpin; + } + + tmp = kmalloc(PAGE_SIZE + MAX_PARTIAL, GFP_TEMPORARY); + if (tmp == NULL) + return -ENOMEM; + + /* Keep the original cached iterator around as that is likely + * to be more useful in future ioctls. */ - batch_end = batch_base + (batch_len / sizeof(*batch_end)); + rewind = batch_obj->get_page; - cmd = batch_base; - while (cmd < batch_end) { - const struct drm_i915_cmd_descriptor *desc; - u32 length; + ret = -EINVAL; + + dst_iter = 0; + dst = kmap_atomic(i915_gem_object_get_page(shadow_batch_obj, dst_iter)); + out = 0; + + in = offset_in_page(batch_start_offset); + partial = 0; + for (src_iter = batch_start_offset >> PAGE_SHIFT; + src_iter < batch_obj->base.size >> PAGE_SHIFT; + src_iter++) { + u32 *cmd, *page_end, *batch_end; + u32 this; + + this = batch_len; + if (this > PAGE_SIZE - in) + this = PAGE_SIZE - in; - if (*cmd == MI_BATCH_BUFFER_END) + src = kmap_atomic(i915_gem_object_get_page(batch_obj, src_iter)); + if (needs_clflush) + drm_clflush_virt_range(src + in, this); + + if (this == PAGE_SIZE && partial == 0) + copy_page(tmp, src); + else + memcpy(tmp+partial, src+in, this); + + cmd = tmp; + page_end = tmp + this + partial; + batch_end = tmp + batch_len + partial; + partial = 0; + + do { + if (*cmd == MI_BATCH_BUFFER_END) { + if (oacontrol_set) { + DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); + goto unmap; + } + + cmd++; /* copy this cmd to dst */ + batch_len = this; /* no more src */ + ret = 0; break; + } desc = find_cmd(ring, *cmd, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); - ret = -EINVAL; - break; + goto unmap; } /* @@ -1162,7 +1121,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, */ if (desc->cmd.value == MI_BATCH_BUFFER_START) { ret = -EACCES; - break; + goto unmap; } if (desc->flags & CMD_DESC_FIXED) @@ -1170,36 +1129,73 @@ int i915_parse_cmds(struct intel_engine_cs *ring, else length = ((*cmd & desc->length.mask) + LENGTH_BIAS); - if ((batch_end - cmd) < length) { + if (unlikely(cmd + length > page_end)) { + if (unlikely(cmd + length > batch_end)) { DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", - *cmd, - length, - batch_end - cmd); - ret = -EINVAL; - break; + *cmd, length, batch_end - cmd); + goto unmap; } - if (!check_cmd(ring, desc, cmd, length, is_master, - &oacontrol_set)) { - ret = -EINVAL; + if (WARN_ON(length > MAX_PARTIAL)) { + ret = -ENODEV; + goto unmap; + } + + partial = 4*(page_end - cmd); break; } + if (!check_cmd(ring, desc, cmd, length, is_master, + &oacontrol_set)) + goto unmap; + cmd += length; - } + } while (cmd < page_end); - if (oacontrol_set) { - DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n"); - ret = -EINVAL; + /* Copy the validated page to the GPU batch */ + { + /* exclude partial cmd, we copy it next time */ + unsigned dst_length = (void *)cmd - tmp; + in = 0; + do { + int len; + + if (out == PAGE_SIZE) { + kunmap_atomic(dst); + dst = kmap_atomic(i915_gem_object_get_page(shadow_batch_obj, ++dst_iter)); + out = 0; } - if (cmd >= batch_end) { - DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); - ret = -EINVAL; + len = dst_length; + if (len + out > PAGE_SIZE) + len = PAGE_SIZE - out; + if (len == PAGE_SIZE) + copy_page(dst, tmp + in); + else + memcpy(dst + out, tmp + in, len); + in += len; + out += len; + dst_length -= len; + } while (dst_length); } - vunmap(batch_base); + batch_len -= this; + if (batch_len == 0) + break; + if (partial) + memmove(tmp, cmd, partial); + + kunmap_atomic(src); + in = 0; + } +unmap: + kunmap_atomic(src); + kunmap_atomic(dst); + batch_obj->get_page = rewind; + kfree(tmp); +unpin: + i915_gem_object_unpin_pages(batch_obj); return ret; }