From patchwork Fri Aug 22 03:12:00 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 4760761 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 1D980C033A for ; Fri, 22 Aug 2014 03:13:38 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id C0F06201C0 for ; Fri, 22 Aug 2014 03:13:35 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 754FB201BA for ; Fri, 22 Aug 2014 03:13:34 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id AE7296E866; Thu, 21 Aug 2014 20:13:32 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by gabe.freedesktop.org (Postfix) with ESMTP id 6102F6E862 for ; Thu, 21 Aug 2014 20:13:29 -0700 (PDT) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga103.fm.intel.com with ESMTP; 21 Aug 2014 20:05:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="375575968" Received: from unknown (HELO ironside.intel.com) ([10.255.12.192]) by FMSMGA003.fm.intel.com with ESMTP; 21 Aug 2014 20:09:35 -0700 From: Ben Widawsky To: Intel GFX Date: Thu, 21 Aug 2014 20:12:00 -0700 Message-Id: <1408677155-1840-38-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 2.0.4 In-Reply-To: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> References: <1408677155-1840-1-git-send-email-benjamin.widawsky@intel.com> Cc: Ben Widawsky , Ben Widawsky Subject: [Intel-gfx] [PATCH 37/68] drm/i915: Clean up pagetable DMA map & unmap X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Map and unmap are common operations across all generations for pagetables. With a simple helper, we can get a nice net code reduction as well as simplified complexity. There is some room for optimization here, for instance with the multiple page mapping, that can be done in one pci_map operation. In that case however, the max value we'll ever see there is 512, and so I believe the simpler code makes this a worthwhile trade-off. Also, the range mapping functions are place holders to help transition the code. Eventually, mapping will only occur during a page allocation which will always be a discrete operation. Signed-off-by: Ben Widawsky --- drivers/gpu/drm/i915/i915_gem_gtt.c | 147 +++++++++++++++++++++--------------- 1 file changed, 85 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8df3b15..4bd1e07 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -203,6 +203,76 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +#define dma_unmap_pt_single(pt, dev) do { \ + pci_unmap_page((dev)->pdev, (pt)->daddr, 4096, PCI_DMA_BIDIRECTIONAL); \ +} while (0); + + +static void dma_unmap_pt_range(struct i915_pagedir *pd, + unsigned pde, size_t n, + struct drm_device *dev) +{ + if (WARN_ON(pde + n > I915_PDES_PER_PD)) + n = I915_PDES_PER_PD - pde; + + n += pde; + + for (; pde < n; pde++) + dma_unmap_pt_single(pd->page_tables[pde], dev); +} + +/** + * dma_map_pt_single() - Create a dma mapping for a page table + * @pt: Page table to get a DMA map for + * @dev: drm device + * + * Page table allocations are unified across all gens. They always require a + * single 4k allocation, as well as a DMA mapping. + * + * Return: 0 if success. + */ +static int dma_map_pt_single(struct i915_pagetab *pt, struct drm_device *dev) +{ + struct page *page; + dma_addr_t pt_addr; + int ret; + + page = pt->page; + pt_addr = pci_map_page(dev->pdev, page, 0, 4096, + PCI_DMA_BIDIRECTIONAL); + + ret = pci_dma_mapping_error(dev->pdev, pt_addr); + if (ret) + return ret; + + pt->daddr = pt_addr; + + return 0; +} + +static int dma_map_pt_range(struct i915_pagedir *pd, + unsigned pde, size_t n, + struct drm_device *dev) +{ + const int first = pde; + + if (WARN_ON(pde + n > I915_PDES_PER_PD)) + n = I915_PDES_PER_PD - pde; + + n += pde; + + for (; pde < n; pde++) { + int ret; + ret = dma_map_pt_single(pd->page_tables[pde], dev); + if (ret) { + dma_unmap_pt_range(pd, first, pde, dev); + return ret; + } + } + + return 0; +} + static void free_pt_single(struct i915_pagetab *pt) { if (WARN_ON(!pt->page)) @@ -211,7 +281,7 @@ static void free_pt_single(struct i915_pagetab *pt) kfree(pt); } -static struct i915_pagetab *alloc_pt_single(void) +static struct i915_pagetab *alloc_pt_single(struct drm_device *dev) { struct i915_pagetab *pt; @@ -234,6 +304,7 @@ static struct i915_pagetab *alloc_pt_single(void) * available to point to the allocated page tables. * @pde: First page directory entry for which we are allocating. * @count: Number of pages to allocate. + * @dev DRM device used for DMA mapping. * * Allocates multiple page table pages and sets the appropriate entries in the * page table structure within the page directory. Function cleans up after @@ -241,7 +312,8 @@ static struct i915_pagetab *alloc_pt_single(void) * * Return: 0 if allocation succeeded. */ -static int alloc_pt_range(struct i915_pagedir *pd, uint16_t pde, size_t count) +static int alloc_pt_range(struct i915_pagedir *pd, uint16_t pde, size_t count, + struct drm_device *dev) { int i, ret; @@ -251,7 +323,7 @@ static int alloc_pt_range(struct i915_pagedir *pd, uint16_t pde, size_t count) BUG_ON(pde + count > I915_PDES_PER_PD); for (i = pde; i < pde + count; i++) { - struct i915_pagetab *pt = alloc_pt_single(); + struct i915_pagetab *pt = alloc_pt_single(dev); if (IS_ERR(pt)) { ret = PTR_ERR(pt); goto err_out; @@ -507,7 +579,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { ret = alloc_pt_range(ppgtt->pdp.pagedir[i], - 0, I915_PDES_PER_PD); + 0, I915_PDES_PER_PD, ppgtt->base.dev); if (ret) goto unwind_out; } @@ -586,27 +658,6 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, return 0; } -static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, - const int pdpe, - const int pde) -{ - dma_addr_t pt_addr; - struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe]; - struct i915_pagetab *pt = pd->page_tables[pde]; - struct page *p = pt->page; - int ret; - - pt_addr = pci_map_page(ppgtt->base.dev->pdev, - p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); - if (ret) - return ret; - - pt->daddr = pt_addr; - - return 0; -} - /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -635,12 +686,15 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) * 2. Create DMA mappings for the page directories and page tables. */ for (i = 0; i < max_pdp; i++) { + struct i915_pagedir *pd; ret = gen8_ppgtt_setup_page_directories(ppgtt, i); if (ret) goto bail; + pd = ppgtt->pdp.pagedir[i]; + for (j = 0; j < I915_PDES_PER_PD; j++) { - ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); + ret = dma_map_pt_single(pd->page_tables[j], ppgtt->base.dev); if (ret) goto bail; } @@ -1058,16 +1112,6 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt) -{ - int i; - - for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->base.dev->pdev, - ppgtt->pd.page_tables[i]->daddr, - 4096, PCI_DMA_BIDIRECTIONAL); -} - static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; @@ -1087,7 +1131,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) drm_mm_takedown(&vm->mm); drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_dma_unmap_pages(ppgtt); + dma_unmap_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, vm->dev); gen6_ppgtt_free(ppgtt); } @@ -1140,7 +1184,8 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) if (ret) return ret; - ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries); + ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, + ppgtt->base.dev); if (ret) { drm_mm_remove_node(&ppgtt->node); return ret; @@ -1149,29 +1194,6 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) return 0; } -static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - int i; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - struct page *page; - dma_addr_t pt_addr; - - page = ppgtt->pd.page_tables[i]->page; - pt_addr = pci_map_page(dev->pdev, page, 0, 4096, - PCI_DMA_BIDIRECTIONAL); - - if (pci_dma_mapping_error(dev->pdev, pt_addr)) { - gen6_ppgtt_dma_unmap_pages(ppgtt); - return -EIO; - } - - ppgtt->pd.page_tables[i]->daddr = pt_addr; - } - - return 0; -} static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { @@ -1196,7 +1218,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) if (ret) return ret; - ret = gen6_ppgtt_setup_page_tables(ppgtt); + ret = dma_map_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, + ppgtt->base.dev); if (ret) { gen6_ppgtt_free(ppgtt); return ret;