From patchwork Tue Jun 11 17:27:27 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Kuoppala X-Patchwork-Id: 10987935 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7067C14BB for ; Tue, 11 Jun 2019 17:27:40 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 505B526E1A for ; Tue, 11 Jun 2019 17:27:40 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 44B1A287B1; Tue, 11 Jun 2019 17:27:40 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id B247F26E1A for ; Tue, 11 Jun 2019 17:27:39 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id A8EBC891D5; Tue, 11 Jun 2019 17:27:36 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by gabe.freedesktop.org (Postfix) with ESMTPS id 934C4891D2 for ; Tue, 11 Jun 2019 17:27:35 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 11 Jun 2019 10:27:35 -0700 X-ExtLoop1: 1 Received: from rosetta.fi.intel.com ([10.237.72.186]) by fmsmga001.fm.intel.com with ESMTP; 11 Jun 2019 10:27:33 -0700 Received: by rosetta.fi.intel.com (Postfix, from userid 1000) id 3187984055B; Tue, 11 Jun 2019 20:27:33 +0300 (EEST) From: Mika Kuoppala To: intel-gfx@lists.freedesktop.org Date: Tue, 11 Jun 2019 20:27:27 +0300 Message-Id: <20190611172731.19174-5-mika.kuoppala@linux.intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190611172731.19174-1-mika.kuoppala@linux.intel.com> References: <20190611172731.19174-1-mika.kuoppala@linux.intel.com> Subject: [Intel-gfx] [PATCH 5/9] drm/i915/gtt: Generalize alloc_pd X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Virus-Scanned: ClamAV using ClamSMTP Allocate all page directory variants with alloc_pd. As the lvl3 and lvl4 variants differ in manipulation, we need to check for existence of backing phys page before accessing it. Signed-off-by: Mika Kuoppala --- drivers/gpu/drm/i915/i915_gem_gtt.c | 100 ++++++++++++---------------- 1 file changed, 42 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 73aaf9481dab..bbcf3be28e19 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -717,10 +717,17 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } +static inline bool pd_has_phys_page(const struct i915_page_directory * const pd) +{ + return pd->base.page; +} + static void free_pd(struct i915_address_space *vm, struct i915_page_directory *pd) { - cleanup_px(vm, pd); + if (likely(pd_has_phys_page(pd))) + cleanup_px(vm, pd); + kfree(pd); } @@ -732,37 +739,12 @@ static void init_pd_with_page(struct i915_address_space *vm, memset_p(pd->entry, pt, 512); } -static struct i915_page_directory *alloc_pdp(struct i915_address_space *vm) -{ - struct i915_page_directory *pdp; - - pdp = __alloc_pd(i915_pdpes_per_pdp(vm)); - if (!pdp) - return ERR_PTR(-ENOMEM); - - if (i915_vm_is_4lvl(vm)) { - if (unlikely(setup_px(vm, pdp))) { - kfree(pdp); - return ERR_PTR(-ENOMEM); - } - } - - return pdp; -} - -static void free_pdp(struct i915_address_space *vm, - struct i915_page_directory *pdp) -{ - if (i915_vm_is_4lvl(vm)) - cleanup_px(vm, pdp); - - kfree(pdp); -} - static void init_pd(struct i915_address_space *vm, struct i915_page_directory * const pd, struct i915_page_directory * const to) { + GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); + fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC)); memset_p(pd->entry, to, 512); } @@ -840,14 +822,13 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, return !atomic_read(&pd->used); } -static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, - struct i915_page_directory *pdp, +static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp, struct i915_page_directory *pd, unsigned int pdpe) { gen8_ppgtt_pdpe_t *vaddr; - if (!i915_vm_is_4lvl(vm)) + if (!pd_has_phys_page(pdp)) return; vaddr = kmap_atomic_px(pdp); @@ -875,7 +856,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, spin_lock(&pdp->lock); if (!atomic_read(&pd->used)) { - gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); + gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); pdp->entry[pdpe] = vm->scratch_pd; GEM_BUG_ON(!atomic_read(&pdp->used)); @@ -936,7 +917,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, } spin_unlock(&pml4->lock); if (free) - free_pdp(vm, pdp); + free_pd(vm, pdp); } } @@ -1240,7 +1221,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } if (i915_vm_is_4lvl(vm)) { - vm->scratch_pdp = alloc_pdp(vm); + vm->scratch_pdp = alloc_pd(vm); if (IS_ERR(vm->scratch_pdp)) { ret = PTR_ERR(vm->scratch_pdp); goto free_pd; @@ -1302,7 +1283,7 @@ static void gen8_free_scratch(struct i915_address_space *vm) return; if (i915_vm_is_4lvl(vm)) - free_pdp(vm, vm->scratch_pdp); + free_pd(vm, vm->scratch_pdp); free_pd(vm, vm->scratch_pd); free_pt(vm, vm->scratch_pt); cleanup_scratch_page(vm); @@ -1322,7 +1303,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, free_pd(vm, pdp->entry[i]); } - free_pdp(vm, pdp); + free_pd(vm, pdp); } static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) @@ -1429,7 +1410,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, old = cmpxchg(&pdp->entry[pdpe], vm->scratch_pd, pd); if (old == vm->scratch_pd) { - gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); + gen8_ppgtt_set_pdpe(pdp, pd, pdpe); atomic_inc(&pdp->used); } else { free_pd(vm, pd); @@ -1455,7 +1436,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, unwind_pd: spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { - gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); + gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); free_pd(vm, pd); @@ -1485,13 +1466,12 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pdp == vm->scratch_pdp) { struct i915_page_directory *old; spin_unlock(&pml4->lock); - pdp = alloc_pdp(vm); + pdp = alloc_pd(vm); if (IS_ERR(pdp)) goto unwind; @@ -1501,7 +1481,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, if (old == vm->scratch_pdp) { gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); } else { - free_pdp(vm, pdp); + free_pd(vm, pdp); pdp = old; } @@ -1525,7 +1505,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - free_pdp(vm, pdp); + free_pd(vm, pdp); } spin_unlock(&pml4->lock); unwind: @@ -1548,7 +1528,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) goto unwind; init_pd_with_page(vm, pd, vm->scratch_pt); - gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); + gen8_ppgtt_set_pdpe(pdp, pd, pdpe); atomic_inc(&pdp->used); } @@ -1560,7 +1540,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) unwind: start -= from; gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { - gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); + gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); free_pd(vm, pd); } atomic_set(&pdp->used, 0); @@ -1592,7 +1572,7 @@ static void ppgtt_init(struct drm_i915_private *i915, static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) { struct i915_ppgtt *ppgtt; - int err; + int ret; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) @@ -1614,17 +1594,21 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) ppgtt->vm.pt_kmap_wc = true; - err = gen8_init_scratch(&ppgtt->vm); - if (err) + ret = gen8_init_scratch(&ppgtt->vm); + if (ret) goto err_free; - ppgtt->pd = alloc_pdp(&ppgtt->vm); - if (IS_ERR(ppgtt->pd)) { - err = PTR_ERR(ppgtt->pd); - goto err_scratch; + ppgtt->pd = __alloc_pd(i915_pdpes_per_pdp(&ppgtt->vm)); + if (!ppgtt->pd) { + ret = -ENOMEM; + goto err_free_scratch; } if (i915_vm_is_4lvl(&ppgtt->vm)) { + ret = setup_px(&ppgtt->vm, ppgtt->pd); + if (ret) + goto err_free_pdp; + init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp); ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; @@ -1639,9 +1623,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) GEN8_3LVL_PDPES); if (intel_vgpu_active(i915)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_pdp; + ret = gen8_preallocate_top_level_pdp(ppgtt); + if (ret) + goto err_free_pdp; } ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; @@ -1656,13 +1640,13 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ppgtt; -err_pdp: - free_pdp(&ppgtt->vm, ppgtt->pd); -err_scratch: +err_free_pdp: + free_pd(&ppgtt->vm, ppgtt->pd); +err_free_scratch: gen8_free_scratch(&ppgtt->vm); err_free: kfree(ppgtt); - return ERR_PTR(err); + return ERR_PTR(ret); } /* Write pde (index) from the page directory @pd to the page table @pt */