From patchwork Fri Aug 22 03:12:22 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 4761061 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id A493F9F377 for ; Fri, 22 Aug 2014 03:14:09 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 934A3201BA for ; Fri, 22 Aug 2014 03:14:07 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 4228820155 for ; Fri, 22 Aug 2014 03:14:06 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 98FE26E875; Thu, 21 Aug 2014 20:14:05 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id 7F4216E87C for ; Thu, 21 Aug 2014 20:13:56 -0700 (PDT) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP; 21 Aug 2014 20:13:56 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="375576157" Received: from unknown (HELO ironside.intel.com) ([10.255.12.192]) by FMSMGA003.fm.intel.com with ESMTP; 21 Aug 2014 20:10:02 -0700 From: Ben Widawsky To: Intel GFX Date: Thu, 21 Aug 2014 20:12:22 -0700 Message-Id: <1408677155-1840-60-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 2.0.4 In-Reply-To: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> References: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> Cc: Ben Widawsky , Ben Widawsky Subject: [Intel-gfx] [PATCH 59/68] drm/i915/bdw: implement alloc/teardown for 4lvl X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP The code for 4lvl works just as one would expect, and nicely it is able to call into the existing 3lvl page table code to handle all of the lower levels. PML4 has no special attributes. We do not track its zombie status because there will always be a PML4. So simply initialize it at creation, and destroy it at teardown. (A similar argument can be made for PDPs when not using sparse addresses). Almost none of the fanciness here will exercised since the switch isn't flipped until later. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 246 +++++++++++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_gem_gtt.h | 14 +- 2 files changed, 229 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 43df3ee..9b3358f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -405,9 +405,12 @@ static void __pdp_fini(struct i915_pagedirpo *pdp) static void free_pdp_single(struct i915_pagedirpo *pdp, struct drm_device *dev) { - __pdp_fini(pdp); - if (HAS_48B_PPGTT(dev)) + if (HAS_48B_PPGTT(dev)) { + __pdp_fini(pdp); + i915_dma_unmap_single(pdp, dev); + __free_page(pdp->page); kfree(pdp); + } } static int __pdp_init(struct i915_pagedirpo *pdp, @@ -433,6 +436,60 @@ static int __pdp_init(struct i915_pagedirpo *pdp, return 0; } +static struct i915_pagedirpo *alloc_pdp_single(struct i915_hw_ppgtt *ppgtt, + struct i915_pml4 *pml4) +{ + struct drm_device *dev = ppgtt->base.dev; + struct i915_pagedirpo *pdp; + int ret; + + BUG_ON(!HAS_48B_PPGTT(dev)); + + pdp = kmalloc(sizeof(*pdp), GFP_KERNEL); + if (!pdp) + return ERR_PTR(-ENOMEM); + + pdp->page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (!pdp->page) { + kfree(pdp); + return ERR_PTR(-ENOMEM); + } + + ret = __pdp_init(pdp, dev); + if (ret) { + __free_page(pdp->page); + kfree(pdp); + return ERR_PTR(ret); + } + + i915_dma_map_px_single(pdp, dev); + + return pdp; +} + +static void pml4_fini(struct i915_pml4 *pml4) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(pml4, struct i915_hw_ppgtt, pml4); + i915_dma_unmap_single(pml4, ppgtt->base.dev); + __free_page(pml4->page); + /* HACK */ + pml4->page = NULL; +} + +static int pml4_init(struct i915_hw_ppgtt *ppgtt) +{ + struct i915_pml4 *pml4 = &ppgtt->pml4; + + pml4->page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pml4->page) + return -ENOMEM; + + i915_dma_map_px_single(pml4, ppgtt->base.dev); + + return 0; +} + /* Broadwell Page Directory Pointer Descriptors */ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, @@ -611,7 +668,7 @@ static void gen8_map_pagetable_range(struct i915_address_space *vm, kunmap_atomic(pagedir); } -static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm, +static bool gen8_teardown_va_range_3lvl(struct i915_address_space *vm, struct i915_pagedirpo *pdp, uint64_t start, uint64_t length, bool dead) @@ -620,14 +677,23 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm, struct i915_pagedir *pd; struct i915_pagetab *pt; uint64_t temp; - uint32_t pdpe, pde; + uint32_t pdpe, pde, orig_start = start; BUG_ON(!pdp); + + if (pdp->zombie) { + free_pdp_single(pdp, dev); + trace_i915_pagedirpo_destroy(vm, 0, + orig_start & GENMASK_ULL(64, GEN8_PML4E_SHIFT), + GEN8_PML4E_SHIFT); + return true; + } + if (!pdp->pagedirs) { WARN(!bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)), "Page directory leak detected\n"); /* If pagedirs are already free, there is nothing to do.*/ - return; + return false; } gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { @@ -716,8 +782,18 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm, } } - if (dead && bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev))) - free_pdp_single(pdp, dev); + if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev))) { + if (!dead) { + pdp->zombie = 1; + } else { + free_pdp_single(pdp, dev); + trace_i915_pagedirpo_destroy(vm, 0, + orig_start & GENMASK_ULL(64, GEN8_PML4E_SHIFT), + GEN8_PML4E_SHIFT); + } + return true; + } + return false; } static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm, @@ -725,19 +801,49 @@ static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm, uint64_t start, uint64_t length, bool dead) { - BUG(); + struct i915_pagedirpo *pdp; + uint64_t temp, pml4e; + + gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + if (!pdp) + continue; + + if (gen8_teardown_va_range_3lvl(vm, pdp, start, length, dead)) { + clear_bit(pml4e, pml4->used_pml4es); + pml4->pdps[pml4e] = NULL; + } + + WARN_ON(!test_bit(pml4e, pml4->used_pml4es) && !pdp->zombie); + WARN_ON(test_bit(pml4e, pml4->used_pml4es) && pdp->zombie); + } } -static void gen8_teardown_va_range(struct i915_address_space *vm, - uint64_t start, uint64_t length) +static void __gen8_teardown_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length, + bool dead) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - if (!HAS_48B_PPGTT(vm->dev)) - gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, false); - else - gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, false); + if (!HAS_48B_PPGTT(vm->dev)) { + gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, dead); + if (dead) { + WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes, I915_PDPES_PER_PDP(vm->dev))); + __pdp_fini(&ppgtt->pdp); + } + } else { + gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, dead); + if (dead) { + WARN_ON(!bitmap_empty(ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4)); + pml4_fini(&ppgtt->pml4); + } + } +} + +static void gen8_teardown_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length) +{ + __gen8_teardown_va_range(vm, start, length, false); } static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) @@ -745,10 +851,12 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) trace_i915_va_teardown(&ppgtt->base, ppgtt->base.start, ppgtt->base.total, VM_TO_TRACE_NAME(&ppgtt->base)); - gen8_teardown_va_range_3lvl(&ppgtt->base, &ppgtt->pdp, - ppgtt->base.start, ppgtt->base.total, - true); - BUG_ON(ppgtt->pdp.pagedirs); /* FIXME: 48b */ + __gen8_teardown_va_range(&ppgtt->base, + ppgtt->base.start, ppgtt->base.total, true); + if (!HAS_48B_PPGTT(ppgtt->base.dev)) + BUG_ON(ppgtt->pdp.pagedirs); + else + BUG_ON(ppgtt->pml4.page); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) @@ -1029,12 +1137,81 @@ err_out: return ret; } -static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm, - struct i915_pml4 *pml4, - uint64_t start, - uint64_t length) +static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, + struct i915_pml4 *pml4, + uint64_t start, + uint64_t length) { - BUG(); + DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + struct i915_pagedirpo *pdp; + const uint64_t orig_start = start; + const uint64_t orig_length = length; + uint64_t temp, pml4e; + + /* Do the pml4 allocations first, so we don't need to track the newly + * allocated tables below the pdp */ + bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); + + /* The pagedirectory and pagetable allocations are done in the shared 3 + * and 4 level code. Just allocate the pdps. + */ + gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + if (!pdp) { + WARN_ON(test_bit(pml4e, pml4->used_pml4es)); + pdp = alloc_pdp_single(ppgtt, pml4); + if (IS_ERR(pdp)) + goto err_alloc; + + pml4->pdps[pml4e] = pdp; + set_bit(pml4e, new_pdps); + trace_i915_pagedirpo_alloc(&ppgtt->base, pml4e, + pml4e << GEN8_PML4E_SHIFT, + GEN8_PML4E_SHIFT); + + } else { + WARN(!pdp->zombie && + !test_bit(pml4e, pml4->used_pml4es), "%lld %p", pml4e, vm); + } + } + + WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, + "The allocation has spanned more than 512GB. " + "It is highly likely this is incorrect."); + + start = orig_start; + length = orig_length; + + gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + int ret; + + BUG_ON(!pdp); + + ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); + if (ret) + goto err_out; + } + + bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, + GEN8_PML4ES_PER_PML4); + + for_each_set_bit(pml4e, pml4->used_pml4es, GEN8_PML4ES_PER_PML4) + pml4->pdps[pml4e]->zombie = 0; + + return 0; + +err_out: + /* This will teardown more than we allocated. It should be fine, and + * makes code simpler. */ + start = orig_start; + length = orig_length; + gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) + gen8_teardown_va_range_3lvl(vm, pdp, start, length, false); + +err_alloc: + for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) + free_pdp_single(pdp, vm->dev); } static int gen8_alloc_va_range(struct i915_address_space *vm, @@ -1043,16 +1220,19 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - if (!HAS_48B_PPGTT(vm->dev)) - return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); - else + if (HAS_48B_PPGTT(vm->dev)) return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); + else + return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); } static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt) { free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev); - free_pdp_single(&ppgtt->pdp, ppgtt->base.dev); + if (HAS_48B_PPGTT(ppgtt->base.dev)) + pml4_fini(&ppgtt->pml4); + else + free_pdp_single(&ppgtt->pdp, ppgtt->base.dev); } /** @@ -1076,7 +1256,13 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->enable = gen8_ppgtt_enable; ppgtt->switch_mm = gen8_mm_switch; - if (!HAS_48B_PPGTT(ppgtt->base.dev)) { + if (HAS_48B_PPGTT(ppgtt->base.dev)) { + int ret = pml4_init(ppgtt); + if (ret) { + free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev); + return ret; + } + } else { int ret = __pdp_init(&ppgtt->pdp, false); if (ret) { free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev); @@ -1084,8 +1270,8 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size) } ppgtt->switch_mm = gen8_mm_switch; - } else - BUG(); /* Not yet implemented */ + trace_i915_pagedirpo_alloc(&ppgtt->base, 0, 0, GEN8_PML4E_SHIFT); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 9d90995..ba103bd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -103,6 +103,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #endif #define GEN8_PML4ES_PER_PML4 512 #define GEN8_PML4E_SHIFT 39 +#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1) #define GEN8_PDPE_SHIFT 30 /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page * tables */ @@ -226,6 +227,7 @@ struct i915_pagedirpo { dma_addr_t daddr; unsigned long *used_pdpes; struct i915_pagedir **pagedirs; + unsigned zombie:1; }; struct i915_pml4 { @@ -233,6 +235,7 @@ struct i915_pml4 { dma_addr_t daddr; DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4); struct i915_pagedirpo *pdps[GEN8_PML4ES_PER_PML4]; + /* Don't bother tracking zombie. Just always leave it around */ }; struct i915_address_space { @@ -455,9 +458,18 @@ static inline size_t gen6_pde_count(uint32_t addr, uint32_t length) temp = min(temp, length), \ start += temp, length -= temp) +#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ + for (iter = gen8_pml4e_index(start), pdp = (pml4)->pdps[iter]; \ + length > 0 && iter < GEN8_PML4ES_PER_PML4; \ + pdp = (pml4)->pdps[++iter], \ + temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ + temp = min(temp, length), \ + start += temp, length -= temp) + #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, I915_PDPES_PER_PDP(dev)) + /* Clamp length to the next pagetab boundary */ static inline uint64_t gen8_clamp_pt(uint64_t start, uint64_t length) { @@ -495,7 +507,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address) static inline uint32_t gen8_pml4e_index(uint64_t address) { - BUG(); + return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK; } static inline size_t gen8_pte_count(uint64_t addr, uint64_t length)