From patchwork Fri Jun 28 10:29:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 11021913 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 9566213B4 for ; Fri, 28 Jun 2019 10:30:00 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 83A701FFBD for ; Fri, 28 Jun 2019 10:30:00 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 76286286AD; Fri, 28 Jun 2019 10:30:00 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id AC68C1FFBD for ; Fri, 28 Jun 2019 10:29:59 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 410956E8CE; Fri, 28 Jun 2019 10:29:59 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from fireflyinternet.com (mail.fireflyinternet.com [109.228.58.192]) by gabe.freedesktop.org (Postfix) with ESMTPS id 8866E6E8CE for ; Fri, 28 Jun 2019 10:29:57 +0000 (UTC) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.65.138; Received: from haswell.alporthouse.com (unverified [78.156.65.138]) by fireflyinternet.com (Firefly Internet (M1)) with ESMTP id 17055876-1500050 for multiple; Fri, 28 Jun 2019 11:29:46 +0100 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Fri, 28 Jun 2019 11:29:44 +0100 Message-Id: <20190628102944.2855-1-chris@chris-wilson.co.uk> X-Mailer: git-send-email 2.20.1 MIME-Version: 1.0 Subject: [Intel-gfx] [PATCH] buddy X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: matthew.auld@intel.com Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Virus-Scanned: ClamAV using ClamSMTP A puzzle for you... --- drivers/gpu/drm/i915/i915_buddy.c | 17 +- drivers/gpu/drm/i915/selftests/i915_buddy.c | 230 +++++++++++++++++++- 2 files changed, 234 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index c0ac68d71d94..e5667540282a 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -16,7 +16,7 @@ static void mark_allocated(struct i915_buddy_block *block) block->header &= ~I915_BUDDY_HEADER_STATE; block->header |= I915_BUDDY_ALLOCATED; - list_del_init(&block->link); + list_del(&block->link); } static void mark_free(struct i915_buddy_mm *mm, @@ -34,7 +34,7 @@ static void mark_split(struct i915_buddy_block *block) block->header &= ~I915_BUDDY_HEADER_STATE; block->header |= I915_BUDDY_SPLIT; - list_del_init(&block->link); + list_del(&block->link); } int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 min_size) @@ -170,9 +170,6 @@ static int split_block(struct i915_buddy_mm *mm, block->left->parent = block; - INIT_LIST_HEAD(&block->left->link); - INIT_LIST_HEAD(&block->left->tmp_link); - block->right = kmem_cache_zalloc(mm->blocks, GFP_KERNEL); if (!block->right) { kmem_cache_free(mm->blocks, block->left); @@ -184,9 +181,6 @@ static int split_block(struct i915_buddy_mm *mm, block->right->parent = block; - INIT_LIST_HEAD(&block->right->link); - INIT_LIST_HEAD(&block->right->tmp_link); - mark_free(mm, block->left); mark_free(mm, block->right); @@ -213,9 +207,9 @@ get_buddy(struct i915_buddy_block *block) static void __i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block) { - list_del_init(&block->link); /* We have ownership now */ + struct i915_buddy_block *parent; - while (block->parent) { + while ((parent = block->parent)) { struct i915_buddy_block *buddy; buddy = get_buddy(block); @@ -228,7 +222,7 @@ static void __i915_buddy_free(struct i915_buddy_mm *mm, kmem_cache_free(mm->blocks, block); kmem_cache_free(mm->blocks, buddy); - block = block->parent; + block = parent; } mark_free(mm, block); @@ -248,6 +242,7 @@ void i915_buddy_free_list(struct i915_buddy_mm *mm, list_for_each_entry_safe(block, on, objects, link) i915_buddy_free(mm, block); + INIT_LIST_HEAD(objects); } /* diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 2159aa9f4867..943a9d08d2f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -295,7 +295,7 @@ static void igt_mm_config(u64 *size, u64 *min_size) *size = s; } -static int igt_buddy_alloc(void *arg) +static int igt_buddy_alloc_smoke(void *arg) { struct i915_buddy_mm mm; int max_order; @@ -385,6 +385,229 @@ static int igt_buddy_alloc(void *arg) return err; } +static int igt_buddy_alloc_pessimistic(void *arg) +{ + const unsigned int max_order = 16; + struct i915_buddy_block *block, *bn; + struct i915_buddy_mm mm; + unsigned int order; + LIST_HEAD(blocks); + int err; + + /* + * Create a pot-sized mm, then allocate one of each possible + * order within. This should leave the mm with exactly one + * page left. + */ + + err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + GEM_BUG_ON(mm.max_order != max_order); + + for (order = 0; order < max_order; order++) { + block = i915_buddy_alloc(&mm, order); + if (IS_ERR(block)) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + order); + err = PTR_ERR(block); + goto err; + } + + list_add_tail(&block->link, &blocks); + } + + /* And now the last remaining block available */ + block = i915_buddy_alloc(&mm, 0); + if (IS_ERR(block)) { + pr_info("buddy_alloc hit -ENOMEM on final alloc\n"); + err = PTR_ERR(block); + goto err; + } + list_add_tail(&block->link, &blocks); + + /* Should be completely full! */ + for (order = max_order; order--; ) { + block = i915_buddy_alloc(&mm, order); + if (!IS_ERR(block)) { + pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", + order); + list_add_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + + block = list_last_entry(&blocks, typeof(*block), link); + list_del(&block->link); + i915_buddy_free(&mm, block); + + /* As we free in increasing size, we make available larger blocks */ + order = 1; + list_for_each_entry_safe(block, bn, &blocks, link) { + list_del(&block->link); + i915_buddy_free(&mm, block); + + block = i915_buddy_alloc(&mm, order); + if (IS_ERR(block)) { + pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", + order); + err = PTR_ERR(block); + goto err; + } + i915_buddy_free(&mm, block); + order++; + } + + /* To confirm, now the whole mm should be available */ + block = i915_buddy_alloc(&mm, max_order); + if (IS_ERR(block)) { + pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", + max_order); + err = PTR_ERR(block); + goto err; + } + i915_buddy_free(&mm, block); + +err: + i915_buddy_free_list(&mm, &blocks); + i915_buddy_fini(&mm); + return err; +} + +static int igt_buddy_alloc_optimistic(void *arg) +{ + const int max_order = 16; + struct i915_buddy_block *block; + struct i915_buddy_mm mm; + LIST_HEAD(blocks); + int order; + int err; + + /* + * Create a mm with one block of each order available, and + * try to allocate them all. + */ + + err = i915_buddy_init(&mm, + PAGE_SIZE * ((1 << (max_order + 1)) - 1), + PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + GEM_BUG_ON(mm.max_order != max_order); + + for (order = 0; order <= max_order; order++) { + block = i915_buddy_alloc(&mm, order); + if (IS_ERR(block)) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + order); + err = PTR_ERR(block); + goto err; + } + + list_add_tail(&block->link, &blocks); + } + + /* Should be completely full! */ + block = i915_buddy_alloc(&mm, 0); + if (!IS_ERR(block)) { + pr_info("buddy_alloc unexpectedly succeeded, it should be full!"); + list_add_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + +err: + i915_buddy_free_list(&mm, &blocks); + i915_buddy_fini(&mm); + return err; +} + +static int igt_buddy_alloc_pathological(void *arg) +{ + const int max_order = 16; + struct i915_buddy_block *block; + struct i915_buddy_mm mm; + LIST_HEAD(blocks); + LIST_HEAD(buddies); + int order, top; + int err; + + /* + * Create a pot-sized mm, then allocate one of each possible + * order within. This should leave the mm with exactly one + * page left. Free the largest block, then whittle down again. + * Eventually we will have a fully 50% fragmented mm. + */ + + err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + GEM_BUG_ON(mm.max_order != max_order); + + for (top = max_order; top; top--) { + /* Make room by freeing the largest allocated block */ + block = list_first_entry_or_null(&blocks, typeof(*block), link); + if (block) { + list_del(&block->link); + i915_buddy_free(&mm, block); + } + + for (order = top; order--; ) { + block = i915_buddy_alloc(&mm, order); + if (IS_ERR(block)) { + pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n", + order, top); + err = PTR_ERR(block); + goto err; + } + list_add_tail(&block->link, &blocks); + } + + block = i915_buddy_alloc(&mm, top); + if (!IS_ERR(block)) { + pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", + top, max_order); + list_add_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + + /* Split the final block */ + block = i915_buddy_alloc(&mm, 0); + if (IS_ERR(block)) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + 0); + err = PTR_ERR(block); + goto err; + } + list_add_tail(&block->link, &blocks); + + /* Nothing larger than blocks of min_size now available */ + for (order = 1; order <= max_order; order++) { + block = i915_buddy_alloc(&mm, order); + if (!IS_ERR(block)) { + pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", + order); + list_add_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + +err: + i915_buddy_free_list(&mm, &blocks); + i915_buddy_fini(&mm); + return err; +} + static int igt_buddy_alloc_range(void *arg) { struct i915_buddy_mm mm; @@ -483,7 +706,10 @@ static int igt_buddy_alloc_range(void *arg) int i915_buddy_mock_selftests(void) { static const struct i915_subtest tests[] = { - SUBTEST(igt_buddy_alloc), + SUBTEST(igt_buddy_alloc_pessimistic), + SUBTEST(igt_buddy_alloc_optimistic), + SUBTEST(igt_buddy_alloc_pathological), + SUBTEST(igt_buddy_alloc_smoke), SUBTEST(igt_buddy_alloc_range), };