From patchwork Sat May 10 03:59:31 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 4146341 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 425A4C0ACC for ; Sat, 10 May 2014 04:02:05 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 51EF1201DC for ; Sat, 10 May 2014 04:02:04 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 55844201DE for ; Sat, 10 May 2014 04:02:03 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id D793F6F09A; Fri, 9 May 2014 21:02:02 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mail.bwidawsk.net (bwidawsk.net [166.78.191.112]) by gabe.freedesktop.org (Postfix) with ESMTP id 753156F097 for ; Fri, 9 May 2014 21:02:01 -0700 (PDT) Received: by mail.bwidawsk.net (Postfix, from userid 5001) id 79F54580B2; Fri, 9 May 2014 21:02:00 -0700 (PDT) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Spam-Level: X-Spam-Status: No, score=-4.8 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 Received: from ironside.intel.com (c-24-21-100-90.hsd1.or.comcast.net [24.21.100.90]) by mail.bwidawsk.net (Postfix) with ESMTPSA id 3B2A95806F; Fri, 9 May 2014 21:00:22 -0700 (PDT) From: Ben Widawsky To: Intel GFX Date: Fri, 9 May 2014 20:59:31 -0700 Message-Id: <1399694391-3935-37-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 1.9.2 In-Reply-To: <1399694391-3935-1-git-send-email-benjamin.widawsky@intel.com> References: <1399694391-3935-1-git-send-email-benjamin.widawsky@intel.com> Cc: Ben Widawsky , Ben Widawsky Subject: [Intel-gfx] [PATCH 36/56] drm/i915/bdw: Split out mappings X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Virus-Scanned: ClamAV using ClamSMTP When we do dynamic page table allocations for gen8, we'll need to have more control over how and when we map page tables, similar to gen6. This patch adds the functionality and calls it at init, which should have no functional change. The PDPEs are still a special case for now. We'll need a function for that in the future as well. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 94 ++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index eded6a1..e2bc274 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -533,6 +533,36 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, } } +static void __gen8_do_map_pt(gen8_ppgtt_pde_t *pde, + struct i915_pagetab *pt, + struct drm_device *dev) +{ + gen8_ppgtt_pde_t entry = + gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC); + *pde = entry; +} + +/* It's likely we'll map more than one pagetable at a time. This function will + * save us unnecessary kmap calls, but do no more functionally than multiple + * calls to map_pt. */ +static void gen8_map_pagetable_range(struct i915_pagedir *pd, + uint64_t start, + uint64_t length, + struct drm_device *dev) +{ + gen8_ppgtt_pde_t *pagedir = kmap_atomic(pd->page); + struct i915_pagetab *pt; + uint64_t temp, pde; + + gen8_for_each_pde(pt, pd, start, length, temp, pde) + __gen8_do_map_pt(pagedir + pde, pt, dev); + + if (!HAS_LLC(dev)) + drm_clflush_virt_range(pagedir, PAGE_SIZE); + + kunmap_atomic(pagedir); +} + static void gen8_teardown_va_range(struct i915_address_space *vm, uint64_t start, uint64_t length) { @@ -627,11 +657,14 @@ unwind_out: return -ENOMEM; } -static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, - uint64_t start, - uint64_t length) +static int gen8_alloc_va_range(struct i915_address_space *vm, + uint64_t start, + uint64_t length) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); struct i915_pagedir *pd; + const uint64_t orig_start = start; uint64_t temp; uint32_t pdpe; int ret; @@ -650,9 +683,8 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, return 0; - /* TODO: Check this for all cases */ err_out: - gen8_ppgtt_free(ppgtt); + gen8_teardown_va_range(vm, orig_start, start); return ret; } @@ -662,60 +694,38 @@ err_out: * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address * space. * - * FIXME: split allocation into smaller pieces. For now we only ever do this - * once, but with full PPGTT, the multiple contiguous allocations will be bad. - * TODO: Do something with the size parameter */ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) { - const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - int i, j, ret; - - if (size % (1<<30)) - DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); + struct i915_pagedir *pd; + uint64_t temp, start = 0; + const uint64_t orig_length = size; + uint32_t pdpe; + int ret; ppgtt->base.start = 0; ppgtt->base.total = size; + ppgtt->base.clear_range = gen8_ppgtt_clear_range; + ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; + ppgtt->base.cleanup = gen8_ppgtt_cleanup; + ppgtt->enable = gen8_ppgtt_enable; + ppgtt->switch_mm = gen8_mm_switch; ppgtt->scratch_pd = alloc_pt_scratch(ppgtt->base.dev); if (IS_ERR(ppgtt->scratch_pd)) return PTR_ERR(ppgtt->scratch_pd); - /* 1. Do all our allocations for page directories and page tables. */ - ret = gen8_ppgtt_alloc(ppgtt, ppgtt->base.start, ppgtt->base.total); + ret = gen8_alloc_va_range(&ppgtt->base, start, size); if (ret) { free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev); return ret; } - /* - * 2. Map all the page directory entires to point to the page tables - * we've allocated. - * - * For now, the PPGTT helper functions all require that the PDEs are - * plugged in correctly. So we do that now/here. For aliasing PPGTT, we - * will never need to touch the PDEs again. - */ - for (i = 0; i < max_pdp; i++) { - struct i915_pagedir *pd = ppgtt->pdp.pagedirs[i]; - gen8_ppgtt_pde_t *pd_vaddr; - pd_vaddr = kmap_atomic(ppgtt->pdp.pagedirs[i]->page); - for (j = 0; j < I915_PDES_PER_PD; j++) { - struct i915_pagetab *pt = pd->page_tables[j]; - dma_addr_t addr = pt->daddr; - pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, - I915_CACHE_LLC); - } - if (!HAS_LLC(ppgtt->base.dev)) - drm_clflush_virt_range(pd_vaddr, PAGE_SIZE); - kunmap_atomic(pd_vaddr); - } + start = 0; + size = orig_length; - ppgtt->enable = gen8_ppgtt_enable; - ppgtt->switch_mm = gen8_mm_switch; - ppgtt->base.clear_range = gen8_ppgtt_clear_range; - ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; - ppgtt->base.cleanup = gen8_ppgtt_cleanup; + gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe) + gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev); return 0; }