From patchwork Thu Feb 20 06:05:43 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ben Widawsky X-Patchwork-Id: 3684431 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id A39F49F39B for ; Thu, 20 Feb 2014 06:07:02 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 951642017B for ; Thu, 20 Feb 2014 06:07:01 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 8145C201D5 for ; Thu, 20 Feb 2014 06:07:00 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 646FBFB0D1; Wed, 19 Feb 2014 22:06:56 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by gabe.freedesktop.org (Postfix) with ESMTP id C4B33FB0BB for ; Wed, 19 Feb 2014 22:05:56 -0800 (PST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga101.jf.intel.com with ESMTP; 19 Feb 2014 22:05:55 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,510,1389772800"; d="scan'208";a="478230156" Received: from unknown (HELO ironside.amr.corp.intel.com) ([10.255.14.6]) by fmsmga001.fm.intel.com with ESMTP; 19 Feb 2014 22:05:54 -0800 From: Ben Widawsky To: Intel GFX Date: Wed, 19 Feb 2014 22:05:43 -0800 Message-Id: <1392876349-24684-4-git-send-email-benjamin.widawsky@intel.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1392876349-24684-1-git-send-email-benjamin.widawsky@intel.com> References: <1392876349-24684-1-git-send-email-benjamin.widawsky@intel.com> In-Reply-To: <1392244132-6806-1-git-send-email-benjamin.widawsky@intel.com> References: <1392244132-6806-1-git-send-email-benjamin.widawsky@intel.com> Cc: Ben Widawsky , Ben Widawsky Subject: [Intel-gfx] [PATCH 3/9] drm/i915/bdw: Split ppgtt initialization up X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces@lists.freedesktop.org Errors-To: intel-gfx-bounces@lists.freedesktop.org X-Spam-Status: No, score=-4.8 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Like cleanup in an earlier patch, the code becomes much more readable, and easier to extend if we extract out helper functions for the various stages of init. Note that with this patch it becomes really simple, and tempting to begin using the 'goto out' idiom with explicit free/fini semantics. I've kept the error path as similar as possible to the cleanup() function to make sure cleanup is as robust as possible v2: Remove comment "NB:From here on, ppgtt->base.cleanup() should function properly" Update commit message to reflect above v3: Rebased on top of bugfixes found in the previous patch by Imre Moved number of pd pages assertion to the proper place (Imre) v4: Allocate dma address space for num_pd_pages, not num_pd_entries (Ben) Don't use gen8_pt_dma_addr after free on error path (Imre) With new fix from v4 of the previous patch. Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/i915_gem_gtt.c | 164 +++++++++++++++++++++++++----------- 1 file changed, 116 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7956659..0af3587 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -366,6 +366,113 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) gen8_ppgtt_free(ppgtt); } +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) +{ + struct page *pt_pages; + const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + + pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); + if (!pt_pages) + return -ENOMEM; + + ppgtt->gen8_pt_pages = pt_pages; + ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); + + return 0; +} + +static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) +{ + int i; + + for (i = 0; i < ppgtt->num_pd_pages; i++) { + ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, + sizeof(dma_addr_t), + GFP_KERNEL); + if (!ppgtt->gen8_pt_dma_addr[i]) + return -ENOMEM; + } + + return 0; +} + +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) +{ + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); + if (!ppgtt->pd_pages) + return -ENOMEM; + + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); + + return 0; +} + +static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) +{ + int ret; + + ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); + if (ret) + return ret; + + ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); + if (ret) { + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); + return ret; + } + + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + + ret = gen8_ppgtt_allocate_dma(ppgtt); + if (ret) + gen8_ppgtt_free(ppgtt); + + return ret; +} + +static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, + const int pd) +{ + dma_addr_t pd_addr; + int ret; + + pd_addr = pci_map_page(ppgtt->base.dev->pdev, + &ppgtt->pd_pages[pd], 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); + if (ret) + return ret; + + ppgtt->pd_dma_addr[pd] = pd_addr; + + return 0; +} + +static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, + const int pd, + const int pt) +{ + dma_addr_t pt_addr; + struct page *p; + int ret; + + p = &ppgtt->gen8_pt_pages[pd * GEN8_PDES_PER_PAGE + pt]; + pt_addr = pci_map_page(ppgtt->base.dev->pdev, + p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); + if (ret) + return ret; + + ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; + + return 0; +} + /** * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -378,69 +485,30 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) */ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) { - struct page *pt_pages; const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; - struct pci_dev *hwdev = ppgtt->base.dev->pdev; + const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; int i, j, ret; if (size % (1<<30)) DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); - /* 1. Do all our allocations for page directories and page tables */ - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); - if (!ppgtt->pd_pages) - return -ENOMEM; - - pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); - if (!pt_pages) { - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); - return -ENOMEM; - } - - ppgtt->gen8_pt_pages = pt_pages; - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); - ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); - - for (i = 0; i < max_pdp; i++) { - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, - sizeof(dma_addr_t), - GFP_KERNEL); - if (!ppgtt->gen8_pt_dma_addr[i]) { - ret = -ENOMEM; - goto bail; - } - } + /* 1. Do all our allocations for page directories and page tables. */ + ret = gen8_ppgtt_alloc(ppgtt, max_pdp); + if (ret) + return ret; /* - * 2. Create all the DMA mappings for the page directories and page - * tables + * 2. Create DMA mappings for the page directories and page tables. */ for (i = 0; i < max_pdp; i++) { - dma_addr_t pd_addr, pt_addr; - - /* Get the page directory mappings */ - pd_addr = pci_map_page(hwdev, &ppgtt->pd_pages[i], 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); + ret = gen8_ppgtt_setup_page_directories(ppgtt, i); if (ret) goto bail; - ppgtt->pd_dma_addr[i] = pd_addr; - - /* And the page table mappings per page directory */ for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; - - pt_addr = pci_map_page(hwdev, p, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - ret = pci_dma_mapping_error(hwdev, pt_addr); + ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); if (ret) goto bail; - - ppgtt->gen8_pt_dma_addr[i][j] = pt_addr; } } @@ -479,7 +547,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", ppgtt->num_pt_pages, - (ppgtt->num_pt_pages - num_pt_pages) + + (ppgtt->num_pt_pages - min_pt_pages) + size % (1<<30)); return 0;