Message ID | 20190627205633.1143-16-matthew.auld@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce memory region concept (including device local memory) | expand |
Quoting Matthew Auld (2019-06-27 21:56:11) > @@ -1020,16 +1022,23 @@ static void reloc_cache_reset(struct reloc_cache *cache) > i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); > } else { > wmb(); > - io_mapping_unmap_atomic((void __iomem *)vaddr); > - if (cache->node.allocated) { > - struct i915_ggtt *ggtt = cache_to_ggtt(cache); > - > - ggtt->vm.clear_range(&ggtt->vm, > - cache->node.start, > - cache->node.size); > - drm_mm_remove_node(&cache->node); > + > + if (cache->is_lmem) { > + io_mapping_unmap_atomic((void __iomem *)vaddr); > + i915_gem_object_unpin_pages((struct drm_i915_gem_object *)cache->node.mm); > + cache->is_lmem = false; > } else { > - i915_vma_unpin((struct i915_vma *)cache->node.mm); > + io_mapping_unmap_atomic((void __iomem *)vaddr); The first step of each branch is the same. What am I missing? > + if (cache->node.allocated) { > + struct i915_ggtt *ggtt = cache_to_ggtt(cache); > + > + ggtt->vm.clear_range(&ggtt->vm, > + cache->node.start, > + cache->node.size); > + drm_mm_remove_node(&cache->node); > + } else { > + i915_vma_unpin((struct i915_vma *)cache->node.mm); > + } > } > } > > @@ -1069,6 +1078,40 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, > return vaddr; > } > > +static void *reloc_lmem(struct drm_i915_gem_object *obj, > + struct reloc_cache *cache, > + unsigned long page) > +{ > + void *vaddr; > + int err; > + > + GEM_BUG_ON(use_cpu_reloc(cache, obj)); > + > + if (cache->vaddr) { > + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); > + } else { > + i915_gem_object_lock(obj); > + err = i915_gem_object_set_to_wc_domain(obj, true); > + i915_gem_object_unlock(obj); > + if (err) > + return ERR_PTR(err); > + > + err = i915_gem_object_pin_pages(obj); > + if (err) > + return ERR_PTR(err); > + > + cache->node.mm = (void *)obj; > + cache->is_lmem = true; > + } > + > + vaddr = i915_gem_object_lmem_io_map_page(obj, page); Secret atomic. Notice the asymmetric release. > + cache->vaddr = (unsigned long)vaddr; > + cache->page = page; > + > + return vaddr; > +} > + > static void *reloc_iomap(struct drm_i915_gem_object *obj, > struct reloc_cache *cache, > unsigned long page) > @@ -1145,8 +1188,12 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj, > vaddr = unmask_page(cache->vaddr); > } else { > vaddr = NULL; > - if ((cache->vaddr & KMAP) == 0) > - vaddr = reloc_iomap(obj, cache, page); > + if ((cache->vaddr & KMAP) == 0) { > + if (i915_gem_object_is_lmem(obj)) > + vaddr = reloc_lmem(obj, cache, page); > + else > + vaddr = reloc_iomap(obj, cache, page); > + } > if (!vaddr) > vaddr = reloc_kmap(obj, cache, page); > } > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1c5dfbfad71b..b724143e88d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -251,6 +251,7 @@ struct i915_execbuffer { bool has_llc : 1; bool has_fence : 1; bool needs_unfenced : 1; + bool is_lmem : 1; struct i915_request *rq; u32 *rq_cmd; @@ -963,6 +964,7 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); cache->has_fence = cache->gen < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; + cache->is_lmem = false; cache->node.allocated = false; cache->rq = NULL; cache->rq_size = 0; @@ -1020,16 +1022,23 @@ static void reloc_cache_reset(struct reloc_cache *cache) i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); } else { wmb(); - io_mapping_unmap_atomic((void __iomem *)vaddr); - if (cache->node.allocated) { - struct i915_ggtt *ggtt = cache_to_ggtt(cache); - - ggtt->vm.clear_range(&ggtt->vm, - cache->node.start, - cache->node.size); - drm_mm_remove_node(&cache->node); + + if (cache->is_lmem) { + io_mapping_unmap_atomic((void __iomem *)vaddr); + i915_gem_object_unpin_pages((struct drm_i915_gem_object *)cache->node.mm); + cache->is_lmem = false; } else { - i915_vma_unpin((struct i915_vma *)cache->node.mm); + io_mapping_unmap_atomic((void __iomem *)vaddr); + if (cache->node.allocated) { + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + + ggtt->vm.clear_range(&ggtt->vm, + cache->node.start, + cache->node.size); + drm_mm_remove_node(&cache->node); + } else { + i915_vma_unpin((struct i915_vma *)cache->node.mm); + } } } @@ -1069,6 +1078,40 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, return vaddr; } +static void *reloc_lmem(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + unsigned long page) +{ + void *vaddr; + int err; + + GEM_BUG_ON(use_cpu_reloc(cache, obj)); + + if (cache->vaddr) { + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); + } else { + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) + return ERR_PTR(err); + + err = i915_gem_object_pin_pages(obj); + if (err) + return ERR_PTR(err); + + cache->node.mm = (void *)obj; + cache->is_lmem = true; + } + + vaddr = i915_gem_object_lmem_io_map_page(obj, page); + + cache->vaddr = (unsigned long)vaddr; + cache->page = page; + + return vaddr; +} + static void *reloc_iomap(struct drm_i915_gem_object *obj, struct reloc_cache *cache, unsigned long page) @@ -1145,8 +1188,12 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj, vaddr = unmask_page(cache->vaddr); } else { vaddr = NULL; - if ((cache->vaddr & KMAP) == 0) - vaddr = reloc_iomap(obj, cache, page); + if ((cache->vaddr & KMAP) == 0) { + if (i915_gem_object_is_lmem(obj)) + vaddr = reloc_lmem(obj, cache, page); + else + vaddr = reloc_iomap(obj, cache, page); + } if (!vaddr) vaddr = reloc_kmap(obj, cache, page); }
Add LMEM support for the CPU reloc path. When doing relocations we have both a GPU and CPU reloc path, as well as some debugging options to force a particular path. The GPU reloc path is preferred when the object is not currently idle, otherwise we use the CPU reloc path. Since we can't kmap the object, and the mappable aperture might not be available, add support for mapping it through LMEMBAR. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> --- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 69 ++++++++++++++++--- 1 file changed, 58 insertions(+), 11 deletions(-)