Message ID | 1434044935-22615-1-git-send-email-mika.kuoppala@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 6/11/2015 6:48 PM, Mika Kuoppala wrote: > All the paging structures are now similar and mapped for > dma. The unmapping is taken care of by common accessors, so > don't overload the reader with such details. > > v2: Be consistent with goto labels (Michel) Reviewed-by: Michel Thierry <michel.thierry@intel.com> > > Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> > --- > drivers/gpu/drm/i915/i915_gem_gtt.c | 40 ++++++++++++++++++------------------- > 1 file changed, 19 insertions(+), 21 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 65ee92f..048c701 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -330,8 +330,7 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) > memset(p, 0, sizeof(*p)); > } > > -static void unmap_and_free_pt(struct i915_page_table *pt, > - struct drm_device *dev) > +static void free_pt(struct drm_device *dev, struct i915_page_table *pt) > { > cleanup_page_dma(dev, &pt->base); > kfree(pt->used_ptes); > @@ -387,8 +386,7 @@ fail_bitmap: > return ERR_PTR(ret); > } > > -static void unmap_and_free_pd(struct i915_page_directory *pd, > - struct drm_device *dev) > +static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) > { > if (pd->base.page) { > cleanup_page_dma(dev, &pd->base); > @@ -409,17 +407,17 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev) > pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), > sizeof(*pd->used_pdes), GFP_KERNEL); > if (!pd->used_pdes) > - goto free_pd; > + goto fail_bitmap; > > ret = setup_page_dma(dev, &pd->base); > if (ret) > - goto free_bitmap; > + goto fail_page_m; > > return pd; > > -free_bitmap: > +fail_page_m: > kfree(pd->used_pdes); > -free_pd: > +fail_bitmap: > kfree(pd); > > return ERR_PTR(ret); > @@ -614,7 +612,7 @@ static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_dev > if (WARN_ON(!pd->page_table[i])) > continue; > > - unmap_and_free_pt(pd->page_table[i], dev); > + free_pt(dev, pd->page_table[i]); > pd->page_table[i] = NULL; > } > } > @@ -630,11 +628,11 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) > continue; > > gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); > - unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev); > + free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]); > } > > - unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev); > - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); > + free_pd(ppgtt->base.dev, ppgtt->scratch_pd); > + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); > } > > /** > @@ -687,7 +685,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, > > unwind_out: > for_each_set_bit(pde, new_pts, I915_PDES) > - unmap_and_free_pt(pd->page_table[pde], dev); > + free_pt(dev, pd->page_table[pde]); > > return -ENOMEM; > } > @@ -745,7 +743,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, > > unwind_out: > for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) > - unmap_and_free_pd(pdp->page_directory[pdpe], dev); > + free_pd(dev, pdp->page_directory[pdpe]); > > return -ENOMEM; > } > @@ -903,11 +901,11 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, > err_out: > while (pdpe--) { > for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) > - unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev); > + free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]); > } > > for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) > - unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev); > + free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]); > > free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); > mark_tlbs_dirty(ppgtt); > @@ -1353,7 +1351,7 @@ unwind_out: > struct i915_page_table *pt = ppgtt->pd.page_table[pde]; > > ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; > - unmap_and_free_pt(pt, vm->dev); > + free_pt(vm->dev, pt); > } > > mark_tlbs_dirty(ppgtt); > @@ -1372,11 +1370,11 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) > > gen6_for_all_pdes(pt, ppgtt, pde) { > if (pt != ppgtt->scratch_pt) > - unmap_and_free_pt(pt, ppgtt->base.dev); > + free_pt(ppgtt->base.dev, pt); > } > > - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); > - unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev); > + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); > + free_pd(ppgtt->base.dev, &ppgtt->pd); > } > > static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) > @@ -1426,7 +1424,7 @@ alloc: > return 0; > > err_out: > - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); > + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); > return ret; > } > >
On Mon, Jun 22, 2015 at 03:09:27PM +0100, Michel Thierry wrote: > On 6/11/2015 6:48 PM, Mika Kuoppala wrote: > >All the paging structures are now similar and mapped for > >dma. The unmapping is taken care of by common accessors, so > >don't overload the reader with such details. > > > >v2: Be consistent with goto labels (Michel) > > Reviewed-by: Michel Thierry <michel.thierry@intel.com> Just to make sure we don't have merge fail going on here: Some of the earlier patches don't have an r-b yet, which means I can't pick up the later ones either. -Daniel
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 65ee92f..048c701 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -330,8 +330,7 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) memset(p, 0, sizeof(*p)); } -static void unmap_and_free_pt(struct i915_page_table *pt, - struct drm_device *dev) +static void free_pt(struct drm_device *dev, struct i915_page_table *pt) { cleanup_page_dma(dev, &pt->base); kfree(pt->used_ptes); @@ -387,8 +386,7 @@ fail_bitmap: return ERR_PTR(ret); } -static void unmap_and_free_pd(struct i915_page_directory *pd, - struct drm_device *dev) +static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) { if (pd->base.page) { cleanup_page_dma(dev, &pd->base); @@ -409,17 +407,17 @@ static struct i915_page_directory *alloc_pd(struct drm_device *dev) pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), sizeof(*pd->used_pdes), GFP_KERNEL); if (!pd->used_pdes) - goto free_pd; + goto fail_bitmap; ret = setup_page_dma(dev, &pd->base); if (ret) - goto free_bitmap; + goto fail_page_m; return pd; -free_bitmap: +fail_page_m: kfree(pd->used_pdes); -free_pd: +fail_bitmap: kfree(pd); return ERR_PTR(ret); @@ -614,7 +612,7 @@ static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_dev if (WARN_ON(!pd->page_table[i])) continue; - unmap_and_free_pt(pd->page_table[i], dev); + free_pt(dev, pd->page_table[i]); pd->page_table[i] = NULL; } } @@ -630,11 +628,11 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) continue; gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); - unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev); + free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]); } - unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev); - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + free_pd(ppgtt->base.dev, ppgtt->scratch_pd); + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); } /** @@ -687,7 +685,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt, unwind_out: for_each_set_bit(pde, new_pts, I915_PDES) - unmap_and_free_pt(pd->page_table[pde], dev); + free_pt(dev, pd->page_table[pde]); return -ENOMEM; } @@ -745,7 +743,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, unwind_out: for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES) - unmap_and_free_pd(pdp->page_directory[pdpe], dev); + free_pd(dev, pdp->page_directory[pdpe]); return -ENOMEM; } @@ -903,11 +901,11 @@ static int gen8_alloc_va_range(struct i915_address_space *vm, err_out: while (pdpe--) { for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES) - unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev); + free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]); } for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES) - unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev); + free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]); free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); mark_tlbs_dirty(ppgtt); @@ -1353,7 +1351,7 @@ unwind_out: struct i915_page_table *pt = ppgtt->pd.page_table[pde]; ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; - unmap_and_free_pt(pt, vm->dev); + free_pt(vm->dev, pt); } mark_tlbs_dirty(ppgtt); @@ -1372,11 +1370,11 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) gen6_for_all_pdes(pt, ppgtt, pde) { if (pt != ppgtt->scratch_pt) - unmap_and_free_pt(pt, ppgtt->base.dev); + free_pt(ppgtt->base.dev, pt); } - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); - unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev); + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); + free_pd(ppgtt->base.dev, &ppgtt->pd); } static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) @@ -1426,7 +1424,7 @@ alloc: return 0; err_out: - unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + free_pt(ppgtt->base.dev, ppgtt->scratch_pt); return ret; }
All the paging structures are now similar and mapped for dma. The unmapping is taken care of by common accessors, so don't overload the reader with such details. v2: Be consistent with goto labels (Michel) Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 40 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 21 deletions(-)