From patchwork Fri Oct 18 16:55:58 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matthew Auld X-Patchwork-Id: 11199235 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 5606A112B for ; Fri, 18 Oct 2019 16:56:24 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 743E420854 for ; Fri, 18 Oct 2019 16:56:17 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 743E420854 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=intel-gfx-bounces@lists.freedesktop.org Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 2112A6EB7B; Fri, 18 Oct 2019 16:56:15 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by gabe.freedesktop.org (Postfix) with ESMTPS id CD1056EB76 for ; Fri, 18 Oct 2019 16:56:12 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Oct 2019 09:56:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,312,1566889200"; d="scan'208";a="280283275" Received: from lgrafter-mobl2.ger.corp.intel.com (HELO mwahaha-bdw.ger.corp.intel.com) ([10.252.29.114]) by orsmga001.jf.intel.com with ESMTP; 18 Oct 2019 09:56:11 -0700 From: Matthew Auld To: intel-gfx@lists.freedesktop.org Date: Fri, 18 Oct 2019 17:55:58 +0100 Message-Id: <20191018165558.18518-6-matthew.auld@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20191018165558.18518-1-matthew.auld@intel.com> References: <20191018165558.18518-1-matthew.auld@intel.com> MIME-Version: 1.0 Subject: [Intel-gfx] [PATCH 6/6] drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Ditch the dubious static list of sizes to enumerate, in favour of choosing a random size within the limits of each backing store. With repeated CI runs this should give us a wider range of object sizes, and in turn more page-size combinations, while using less machine time. Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson --- .../gpu/drm/i915/gem/selftests/huge_pages.c | 198 +++++++++--------- 1 file changed, 94 insertions(+), 104 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index d4892769b739..3f7ac4473f33 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1314,20 +1314,33 @@ static int igt_ppgtt_exhaust_huge(void *arg) return err; } +static u32 igt_random_size(struct rnd_state *prng, + u32 min_page_size, + u32 max_page_size) +{ + u32 size; + u32 mask; + + GEM_BUG_ON(!is_power_of_2(min_page_size)); + GEM_BUG_ON(!is_power_of_2(max_page_size)); + GEM_BUG_ON(min_page_size < PAGE_SIZE); + GEM_BUG_ON(min_page_size > max_page_size); + + mask = GENMASK_ULL(ilog2(max_page_size), PAGE_SHIFT); + size = prandom_u32_state(prng) & mask; + if (size < min_page_size) + size |= min_page_size; + + return size; +} + static int igt_ppgtt_internal_huge(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_64K, - SZ_128K, - SZ_256K, - SZ_512K, - SZ_1M, - SZ_2M, - }; - int i; + I915_RND_STATE(prng); + u32 size; int err; /* @@ -1335,42 +1348,36 @@ static int igt_ppgtt_internal_huge(void *arg) * -- ensure that our writes land in the right place. */ - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; - - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + size = igt_random_size(&prng, SZ_64K, SZ_2M); - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; - - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { - pr_info("internal unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("internal write-huge failed with size=%u\n", - size); - goto out_unpin; - } + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { + pr_info("%s unable to allocate huge-page(s) with size=%u\n", + __func__, size); + err = -ENOMEM; + goto out_unpin; } - return 0; + err = igt_write_huge(ctx, obj); + if (err) + pr_err("%s write-huge failed with size=%u\n", __func__, size); out_unpin: i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); out_put: i915_gem_object_put(obj); + if (err == -ENOMEM) + err = 0; + return err; } @@ -1384,14 +1391,8 @@ static int igt_ppgtt_gemfs_huge(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_2M, - SZ_4M, - SZ_8M, - SZ_16M, - SZ_32M, - }; - int i; + I915_RND_STATE(prng); + u32 size; int err; /* @@ -1400,46 +1401,40 @@ static int igt_ppgtt_gemfs_huge(void *arg) */ if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); + pr_info("%s missing THP support, skipping\n", __func__); return 0; } - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; - - obj = i915_gem_object_create_shmem(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; + size = igt_random_size(&prng, SZ_2M, SZ_32M); - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { - pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } + obj = i915_gem_object_create_shmem(i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("gemfs write-huge failed with size=%u\n", - size); - goto out_unpin; - } + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { + pr_info("%s finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", + __func__, size); + err = -ENOMEM; + goto out_unpin; } - return 0; + err = igt_write_huge(ctx, obj); + if (err) + pr_err("%s write-huge failed with size=%u\n", __func__, size); out_unpin: i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); out_put: i915_gem_object_put(obj); + if (err == -ENOMEM) + err = 0; + return err; } @@ -1448,13 +1443,8 @@ static int igt_ppgtt_lmem_huge(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_64K, - SZ_512K, - SZ_1M, - SZ_2M, - }; - int i; + I915_RND_STATE(prng); + u32 size; int err; if (!HAS_LMEM(i915)) { @@ -1467,49 +1457,49 @@ static int igt_ppgtt_lmem_huge(void *arg) * -- ensure that our writes land in the right place. */ - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; - - obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - if (err == -E2BIG) { - pr_info("object too big for region!\n"); - return 0; - } + size = SZ_1G; +try_again: + size = igt_random_size(&prng, SZ_64K, rounddown_pow_of_two(size)); - return err; + obj = i915_gem_object_create_lmem(i915, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -E2BIG) { + size >>= 1; + goto try_again; } - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; - - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { - pr_info("LMEM unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } + return err; + } - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("LMEM write-huge failed with size=%u\n", size); - goto out_unpin; + err = i915_gem_object_pin_pages(obj); + if (err) { + if (err == -ENXIO) { + i915_gem_object_put(obj); + size >>= 1; + goto try_again; } + goto out_put; + } - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { + pr_info("%s unable to allocate huge-page(s) with size=%u\n", + __func__, size); + err = -ENOMEM; + goto out_unpin; } - return 0; + err = igt_write_huge(ctx, obj); + if (err) + pr_err("%s write-huge failed with size=%u\n", __func__, size); out_unpin: i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); out_put: i915_gem_object_put(obj); - if (err == -ENOMEM) + if (err == -ENOMEM || err == -ENXIO) err = 0; return err;