From patchwork Thu Jun 27 20:56:29 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Matthew Auld X-Patchwork-Id: 11020615 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id DBD5876 for ; Thu, 27 Jun 2019 20:58:07 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id D41C02870D for ; Thu, 27 Jun 2019 20:58:07 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id C84A228716; Thu, 27 Jun 2019 20:58:07 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 2FA832870D for ; Thu, 27 Jun 2019 20:58:07 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id DC84F6E84E; Thu, 27 Jun 2019 20:58:03 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by gabe.freedesktop.org (Postfix) with ESMTPS id 7911F6E873 for ; Thu, 27 Jun 2019 20:57:24 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Jun 2019 13:57:15 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.63,425,1557212400"; d="scan'208";a="164455803" Received: from unknown (HELO mwahaha-bdw.ger.corp.intel.com) ([10.252.4.227]) by fmsmga007.fm.intel.com with ESMTP; 27 Jun 2019 13:57:14 -0700 From: Matthew Auld To: intel-gfx@lists.freedesktop.org Date: Thu, 27 Jun 2019 21:56:29 +0100 Message-Id: <20190627205633.1143-34-matthew.auld@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190627205633.1143-1-matthew.auld@intel.com> References: <20190627205633.1143-1-matthew.auld@intel.com> MIME-Version: 1.0 Subject: [Intel-gfx] [PATCH v2 33/37] drm/i915: support basic object migration X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Virus-Scanned: ClamAV using ClamSMTP We are going want to able to move objects between different regions like system memory and local memory. In the future everything should be just another region. Signed-off-by: Matthew Auld Signed-off-by: Abdiel Janulgue Cc: Joonas Lahtinen Cc: Abdiel Janulgue --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 129 ++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 8 ++ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 2 +- .../drm/i915/selftests/intel_memory_region.c | 129 ++++++++++++++++++ 4 files changed, 267 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 343162bc8181..691af388e4e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -28,6 +28,7 @@ #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_object.h" +#include "i915_gem_object_blt.h" #include "i915_globals.h" static struct i915_global_object { @@ -171,6 +172,134 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) } } +int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj) +{ + int err; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (obj->mm.madv != I915_MADV_WILLNEED) + return -EINVAL; + + if (i915_gem_object_needs_bit17_swizzle(obj)) + return -EINVAL; + + if (atomic_read(&obj->mm.pages_pin_count) > + atomic_read(&obj->bind_count)) + return -EBUSY; + + if (obj->pin_global) + return -EBUSY; + + i915_gem_object_release_mmap(obj); + + GEM_BUG_ON(obj->mm.mapping); + GEM_BUG_ON(obj->base.filp && mapping_mapped(obj->base.filp->f_mapping)); + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + return i915_gem_object_unbind(obj); +} + +int i915_gem_object_migrate(struct drm_i915_gem_object *obj, + struct intel_context *ce, + enum intel_region_id id) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct drm_i915_gem_object *donor; + struct intel_memory_region *mem; + int err = 0; + + lockdep_assert_held(&i915->drm.struct_mutex); + + GEM_BUG_ON(id >= INTEL_MEMORY_UKNOWN); + GEM_BUG_ON(obj->memory_region->id == id); + GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); + + mem = i915->regions[id]; + + donor = i915_gem_object_create_region(mem, obj->base.size, 0); + if (IS_ERR(donor)) + return PTR_ERR(donor); + + /* Copy backing-pages if we have to */ + if (i915_gem_object_has_pages(obj)) { + struct sg_table *pages; + + err = i915_gem_object_pin_pages(obj); + if (err) + goto err_put_donor; + + err = i915_gem_object_copy_blt(obj, donor, ce); + if (err) + goto err_put_donor; + + i915_gem_object_lock(donor); + err = i915_gem_object_set_to_cpu_domain(donor, false); + i915_gem_object_unlock(donor); + if (err) + goto err_put_donor; + + i915_retire_requests(i915); + + i915_gem_object_unbind(donor); + err = i915_gem_object_unbind(obj); + if (err) + goto err_put_donor; + + mutex_lock(&obj->mm.lock); + + pages = fetch_and_zero(&obj->mm.pages); + obj->ops->put_pages(obj, pages); + + memcpy(&obj->mm.page_sizes, &donor->mm.page_sizes, + sizeof(struct i915_page_sizes)); + obj->mm.pages = __i915_gem_object_unset_pages(donor); + + obj->mm.get_page.sg_pos = obj->mm.pages->sgl; + obj->mm.get_page.sg_idx = 0; + __i915_gem_object_reset_page_iter(obj); + + mutex_unlock(&obj->mm.lock); + } + + if (obj->ops->release) + obj->ops->release(obj); + + /* We need still need a little special casing for shmem */ + if (obj->base.filp) + fput(fetch_and_zero(&obj->base.filp)); + else + obj->base.filp = fetch_and_zero(&donor->base.filp); + + obj->base.size = donor->base.size; + obj->memory_region = mem; + obj->flags = donor->flags; + obj->ops = donor->ops; + + list_replace_init(&donor->blocks, &obj->blocks); + + mutex_lock(&mem->obj_lock); + list_add(&obj->region_link, &mem->objects); + mutex_unlock(&mem->obj_lock); + + GEM_BUG_ON(i915_gem_object_has_pages(donor)); + GEM_BUG_ON(i915_gem_object_has_pinned_pages(donor)); + +err_put_donor: + i915_gem_object_put(donor); + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + + return err; +} + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index a7bfe79015ee..11afb4dea215 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -40,8 +40,16 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); +enum intel_region_id; +int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj); +int i915_gem_object_migrate(struct drm_i915_gem_object *obj, + struct intel_context *ce, + enum intel_region_id id); + void i915_gem_flush_free_objects(struct drm_i915_private *i915); +void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj); + struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 15eaaedffc46..c1bc047d5fc4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -139,7 +139,7 @@ void i915_gem_object_writeback(struct drm_i915_gem_object *obj) obj->ops->writeback(obj); } -static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) +void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 23c466a1b800..ccfdc4cbd174 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -491,6 +491,59 @@ static int igt_lmem_create(void *arg) return err; } +static int igt_smem_create_migrate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct drm_i915_gem_object *obj; + int err; + + /* Switch object backing-store on create */ + obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_SMEM); + if (err) + goto out_put; + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static int igt_lmem_create_migrate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct drm_i915_gem_object *obj; + int err; + + /* Switch object backing-store on create */ + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_LMEM); + if (err) + goto out_put; + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} static int igt_lmem_write_gpu(void *arg) { struct drm_i915_private *i915 = arg; @@ -601,6 +654,79 @@ static int igt_lmem_write_cpu(void *arg) return err; } +static int igt_lmem_pages_migrate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct drm_i915_gem_object *obj; + IGT_TIMEOUT(end_time); + I915_RND_STATE(prng); + u32 sz; + int err; + + sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE); + + obj = i915_gem_object_create_lmem(i915, sz, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_fill_blt(obj, ce, 0); + if (err) + goto out_put; + + do { + err = i915_gem_object_prepare_move(obj); + if (err) + goto out_put; + + if (i915_gem_object_is_lmem(obj)) { + err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_SMEM); + if (err) + goto out_put; + + if (i915_gem_object_is_lmem(obj)) { + pr_err("object still backed by lmem\n"); + err = -EINVAL; + } + + if (!list_empty(&obj->blocks)) { + pr_err("object leaking memory region\n"); + err = -EINVAL; + } + + if (!i915_gem_object_has_struct_page(obj)) { + pr_err("object not backed by struct page\n"); + err = -EINVAL; + } + + } else { + err = i915_gem_object_migrate(obj, ce, INTEL_MEMORY_LMEM); + if (err) + goto out_put; + + if (i915_gem_object_has_struct_page(obj)) { + pr_err("object still backed by struct page\n"); + err = -EINVAL; + } + + if (!i915_gem_object_is_lmem(obj)) { + pr_err("object not backed by lmem\n"); + err = -EINVAL; + } + } + + if (!err) + err = i915_gem_object_fill_blt(obj, ce, 0xdeadbeaf); + if (err) + break; + } while (!__igt_timeout(end_time, NULL)); + +out_put: + i915_gem_object_put(obj); + + return err; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -644,6 +770,9 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_lmem_create), SUBTEST(igt_lmem_write_cpu), SUBTEST(igt_lmem_write_gpu), + SUBTEST(igt_smem_create_migrate), + SUBTEST(igt_lmem_create_migrate), + SUBTEST(igt_lmem_pages_migrate), }; int err;