From patchwork Mon Mar 6 23:54:14 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matthew Auld X-Patchwork-Id: 9607873 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 8C826601D2 for ; Mon, 6 Mar 2017 23:54:48 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7426B28490 for ; Mon, 6 Mar 2017 23:54:48 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 68E4228499; Mon, 6 Mar 2017 23:54:48 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.2 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 5D59628490 for ; Mon, 6 Mar 2017 23:54:47 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 5DF156E59F; Mon, 6 Mar 2017 23:54:34 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by gabe.freedesktop.org (Postfix) with ESMTPS id EA0756E599 for ; Mon, 6 Mar 2017 23:54:32 +0000 (UTC) Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga105.fm.intel.com with ESMTP; 06 Mar 2017 15:54:32 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.35,256,1484035200"; d="scan'208";a="233013128" Received: from abahri-mobl.ger.corp.intel.com (HELO mwahaha.ger.corp.intel.com) ([10.252.2.118]) by fmsmga004.fm.intel.com with ESMTP; 06 Mar 2017 15:54:31 -0800 From: Matthew Auld To: intel-gfx@lists.freedesktop.org Date: Mon, 6 Mar 2017 23:54:14 +0000 Message-Id: <20170306235414.23407-16-matthew.auld@intel.com> X-Mailer: git-send-email 2.9.3 In-Reply-To: <20170306235414.23407-1-matthew.auld@intel.com> References: <20170306235414.23407-1-matthew.auld@intel.com> Subject: [Intel-gfx] [PATCH 15/15] drm/i915/selftests: modify the gtt tests to also exercise huge pages X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Matthew Auld --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 123 ++++++++++++++++++-------- drivers/gpu/drm/i915/selftests/mock_gtt.c | 3 + 2 files changed, 89 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 9b2a7228a78f..4625f93a3890 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -92,12 +92,14 @@ static const struct drm_i915_gem_object_ops fake_ops = { }; static struct drm_i915_gem_object * -fake_dma_object(struct drm_i915_private *i915, u64 size) +fake_dma_object(struct drm_i915_private *i915, u64 size, unsigned long page_size) { struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(page_size & ~I915_GTT_PAGE_SIZE_MASK); + + size = roundup(size, page_size); if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); @@ -107,8 +109,13 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) goto err; drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &fake_ops); + obj->page_size = page_size; + + GEM_BUG_ON(!IS_ALIGNED(obj->base.size, obj->page_size)); + obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->cache_level = I915_CACHE_NONE; @@ -194,13 +201,14 @@ static int igt_ppgtt_alloc(void *arg) static int lowlevel_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { I915_RND_STATE(seed_prng); unsigned int size; /* Keep creating larger objects until one cannot fit into the hole */ - for (size = 12; (hole_end - hole_start) >> size; size++) { + for (size = ilog2(page_size); (hole_end - hole_start) >> size; size++) { I915_RND_SUBSTATE(prng, seed_prng); struct drm_i915_gem_object *obj; unsigned int *order, count, n; @@ -226,7 +234,7 @@ static int lowlevel_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(i915, BIT_ULL(size), page_size); if (IS_ERR(obj)) { kfree(order); break; @@ -303,18 +311,25 @@ static void close_object_list(struct list_head *objects, static int fill_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { const u64 hole_size = hole_end - hole_start; struct drm_i915_gem_object *obj; - const unsigned long max_pages = - min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT); - const unsigned long max_step = max(int_sqrt(max_pages), 2UL); - unsigned long npages, prime, flags; + const unsigned page_shift = ilog2(page_size); + unsigned long max_pages, max_step, npages, prime, flags; struct i915_vma *vma; LIST_HEAD(objects); int err; + hole_start = roundup(hole_start, page_size); + hole_end = rounddown(hole_end, page_size); + + GEM_BUG_ON(hole_start >= hole_end); + + max_pages = min_t(u64, ULONG_MAX - 1, hole_size/2 >> page_shift); + max_step = max(int_sqrt(max_pages), 2UL); + /* Try binding many VMA working inwards from either edge */ flags = PIN_OFFSET_FIXED | PIN_USER; @@ -323,7 +338,7 @@ static int fill_hole(struct drm_i915_private *i915, for_each_prime_number_from(prime, 2, max_step) { for (npages = 1; npages <= max_pages; npages *= prime) { - const u64 full_size = npages << PAGE_SHIFT; + const u64 full_size = npages << page_shift; const struct { const char *name; u64 offset; @@ -334,7 +349,7 @@ static int fill_hole(struct drm_i915_private *i915, { } }, *p; - obj = fake_dma_object(i915, full_size); + obj = fake_dma_object(i915, full_size, page_size); if (IS_ERR(obj)) break; @@ -359,7 +374,7 @@ static int fill_hole(struct drm_i915_private *i915, offset -= obj->base.size; } - err = i915_vma_pin(vma, 0, 0, offset | flags); + err = i915_vma_pin(vma, 0, page_size, offset | flags); if (err) { pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n", __func__, p->name, err, npages, prime, offset); @@ -367,7 +382,7 @@ static int fill_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, offset | flags)) { + i915_vma_misplaced(vma, 0, page_size, offset | flags)) { pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), offset); @@ -397,7 +412,7 @@ static int fill_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, offset | flags)) { + i915_vma_misplaced(vma, 0, page_size, offset | flags)) { pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n", __func__, p->name, vma->node.start, vma->node.size, offset); @@ -432,7 +447,7 @@ static int fill_hole(struct drm_i915_private *i915, offset -= obj->base.size; } - err = i915_vma_pin(vma, 0, 0, offset | flags); + err = i915_vma_pin(vma, 0, page_size, offset | flags); if (err) { pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n", __func__, p->name, err, npages, prime, offset); @@ -440,7 +455,7 @@ static int fill_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, offset | flags)) { + i915_vma_misplaced(vma, 0, page_size, offset | flags)) { pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), offset); @@ -470,7 +485,7 @@ static int fill_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, offset | flags)) { + i915_vma_misplaced(vma, 0, page_size, offset | flags)) { pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), offset); @@ -514,11 +529,13 @@ static int fill_hole(struct drm_i915_private *i915, static int walk_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { const u64 hole_size = hole_end - hole_start; + const unsigned page_shift = ilog2(page_size); const unsigned long max_pages = - min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT); + min_t(u64, ULONG_MAX - 1, hole_size >> page_shift); unsigned long flags; u64 size; @@ -534,7 +551,7 @@ static int walk_hole(struct drm_i915_private *i915, u64 addr; int err = 0; - obj = fake_dma_object(i915, size << PAGE_SHIFT); + obj = fake_dma_object(i915, size << page_shift, page_size); if (IS_ERR(obj)) break; @@ -547,7 +564,7 @@ static int walk_hole(struct drm_i915_private *i915, for (addr = hole_start; addr + obj->base.size < hole_end; addr += obj->base.size) { - err = i915_vma_pin(vma, 0, 0, addr | flags); + err = i915_vma_pin(vma, 0, page_size, addr | flags); if (err) { pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n", __func__, addr, vma->size, @@ -557,7 +574,7 @@ static int walk_hole(struct drm_i915_private *i915, i915_vma_unpin(vma); if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, addr | flags)) { + i915_vma_misplaced(vma, 0, page_size, addr | flags)) { pr_err("%s incorrect at %llx + %llx\n", __func__, addr, vma->size); err = -EINVAL; @@ -595,6 +612,7 @@ static int walk_hole(struct drm_i915_private *i915, static int pot_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { struct drm_i915_gem_object *obj; @@ -607,7 +625,7 @@ static int pot_hole(struct drm_i915_private *i915, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; - obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE); + obj = fake_dma_object(i915, 2 * page_size, page_size); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -619,15 +637,15 @@ static int pot_hole(struct drm_i915_private *i915, /* Insert a pair of pages across every pot boundary within the hole */ for (pot = fls64(hole_end - 1) - 1; - pot > ilog2(2 * I915_GTT_PAGE_SIZE); + pot > ilog2(2 * page_size); pot--) { u64 step = BIT_ULL(pot); u64 addr; - for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; - addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; + for (addr = round_up(hole_start + page_size, step) - page_size; + addr <= round_down(hole_end - 2*page_size, step) - page_size; addr += step) { - err = i915_vma_pin(vma, 0, 0, addr | flags); + err = i915_vma_pin(vma, 0, page_size, addr | flags); if (err) { pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n", __func__, @@ -671,6 +689,7 @@ static int pot_hole(struct drm_i915_private *i915, static int drunk_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { I915_RND_STATE(prng); @@ -682,7 +701,7 @@ static int drunk_hole(struct drm_i915_private *i915, flags |= PIN_GLOBAL; /* Keep creating larger objects until one cannot fit into the hole */ - for (size = 12; (hole_end - hole_start) >> size; size++) { + for (size = ilog2(page_size); (hole_end - hole_start) >> size; size++) { struct drm_i915_gem_object *obj; unsigned int *order, count, n; struct i915_vma *vma; @@ -706,7 +725,7 @@ static int drunk_hole(struct drm_i915_private *i915, * memory. We expect to hit -ENOMEM. */ - obj = fake_dma_object(i915, BIT_ULL(size)); + obj = fake_dma_object(i915, BIT_ULL(size), page_size); if (IS_ERR(obj)) { kfree(order); break; @@ -723,6 +742,8 @@ static int drunk_hole(struct drm_i915_private *i915, for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + GEM_BUG_ON(!IS_ALIGNED(addr, page_size)); + err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n", @@ -734,7 +755,7 @@ static int drunk_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, addr | flags)) { + i915_vma_misplaced(vma, 0, page_size, addr | flags)) { pr_err("%s incorrect at %llx + %llx\n", __func__, addr, BIT_ULL(size)); i915_vma_unpin(vma); @@ -771,11 +792,12 @@ static int drunk_hole(struct drm_i915_private *i915, static int __shrink_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { struct drm_i915_gem_object *obj; unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; - unsigned int order = 12; + unsigned int order = ilog2(page_size); LIST_HEAD(objects); int err = 0; u64 addr; @@ -786,7 +808,7 @@ static int __shrink_hole(struct drm_i915_private *i915, u64 size = BIT_ULL(order++); size = min(size, hole_end - addr); - obj = fake_dma_object(i915, size); + obj = fake_dma_object(i915, size, page_size); if (IS_ERR(obj)) { err = PTR_ERR(obj); break; @@ -802,7 +824,7 @@ static int __shrink_hole(struct drm_i915_private *i915, GEM_BUG_ON(vma->size != size); - err = i915_vma_pin(vma, 0, 0, addr | flags); + err = i915_vma_pin(vma, 0, page_size, addr | flags); if (err) { pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n", __func__, addr, size, hole_start, hole_end, err); @@ -810,7 +832,7 @@ static int __shrink_hole(struct drm_i915_private *i915, } if (!drm_mm_node_allocated(&vma->node) || - i915_vma_misplaced(vma, 0, 0, addr | flags)) { + i915_vma_misplaced(vma, 0, page_size, addr | flags)) { pr_err("%s incorrect at %llx + %llx\n", __func__, addr, size); i915_vma_unpin(vma); @@ -837,6 +859,7 @@ static int __shrink_hole(struct drm_i915_private *i915, static int shrink_hole(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time) { unsigned long prime; @@ -847,7 +870,8 @@ static int shrink_hole(struct drm_i915_private *i915, for_each_prime_number_from(prime, 0, ULONG_MAX - 1) { vm->fault_attr.interval = prime; - err = __shrink_hole(i915, vm, hole_start, hole_end, end_time); + err = __shrink_hole(i915, vm, hole_start, hole_end, page_size, + end_time); if (err) break; } @@ -861,12 +885,20 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time)) { struct drm_file *file; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); - int err; + unsigned long page_sizes[] = { + I915_GTT_PAGE_SIZE, + I915_GTT_PAGE_SIZE_64K, + I915_GTT_PAGE_SIZE_2M, + I915_GTT_PAGE_SIZE_1G, + }; + int err = 0; + int i; if (!USES_FULL_PPGTT(dev_priv)) return 0; @@ -884,7 +916,11 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, GEM_BUG_ON(offset_in_page(ppgtt->base.total)); GEM_BUG_ON(ppgtt->base.closed); - err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time); + for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { + if (INTEL_INFO(dev_priv)->page_size_mask & page_sizes[i]) + err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, + page_sizes[i], end_time); + } i915_ppgtt_close(&ppgtt->base); i915_ppgtt_put(ppgtt); @@ -940,6 +976,7 @@ static int exercise_ggtt(struct drm_i915_private *i915, int (*func)(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time)) { struct i915_ggtt *ggtt = &i915->ggtt; @@ -961,7 +998,8 @@ static int exercise_ggtt(struct drm_i915_private *i915, if (hole_start >= hole_end) continue; - err = func(i915, &ggtt->base, hole_start, hole_end, end_time); + err = func(i915, &ggtt->base, hole_start, hole_end, + I915_GTT_PAGE_SIZE, end_time); if (err) break; @@ -1104,12 +1142,20 @@ static int exercise_mock(struct drm_i915_private *i915, int (*func)(struct drm_i915_private *i915, struct i915_address_space *vm, u64 hole_start, u64 hole_end, + unsigned long page_size, unsigned long end_time)) { struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); + unsigned long page_sizes[] = { + I915_GTT_PAGE_SIZE, + I915_GTT_PAGE_SIZE_64K, + I915_GTT_PAGE_SIZE_2M, + I915_GTT_PAGE_SIZE_1G, + }; int err; + int i; ctx = mock_context(i915, "mock"); if (!ctx) @@ -1118,7 +1164,10 @@ static int exercise_mock(struct drm_i915_private *i915, ppgtt = ctx->ppgtt; GEM_BUG_ON(!ppgtt); - err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time); + for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { + err = func(i915, &ppgtt->base, 0, ppgtt->base.total, + page_sizes[i], end_time); + } mock_context_close(ctx); return err; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index a2137100d2f5..9428ea09d05d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -88,6 +88,9 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->base.unbind_vma = mock_unbind_ppgtt; ppgtt->base.cleanup = mock_cleanup; + /* For mock testing huge-page support */ + ppgtt->base.mm.color_adjust = i915_page_color_adjust; + return ppgtt; }