Message ID | 20241021211835.1675640-3-matthew.brost@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Fix non-contiguous VRAM BO access in Xe | expand |
Matthew Brost <matthew.brost@intel.com> writes: > Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible > VRAM easily be accessed. Add ttm_bo_access, which is similar to > ttm_bo_vm_access, to access such memory. > > v4: > - Fix checkpatch warnings (CI) > v5: > - Fix checkpatch warnings (CI) > > Reported-by: Christoph Manszewski <christoph.manszewski@intel.com> > Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > Signed-off-by: Matthew Brost <matthew.brost@intel.com> With the igt/xe_eudebug* coverage, Tested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> > --- > drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++ > drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 +---------------------- > include/drm/ttm/ttm_bo.h | 2 + > 3 files changed, 89 insertions(+), 64 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c > index d939925efa81..41bb5a7477d3 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c > @@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, > > return progress; > } > + > +static int ttm_bo_access_kmap(struct ttm_buffer_object *bo, > + unsigned long offset, > + void *buf, int len, int write) > +{ > + unsigned long page = offset >> PAGE_SHIFT; > + unsigned long bytes_left = len; > + int ret; > + > + /* Copy a page at a time, that way no extra virtual address > + * mapping is needed > + */ > + offset -= page << PAGE_SHIFT; > + do { > + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > + struct ttm_bo_kmap_obj map; > + void *ptr; > + bool is_iomem; > + > + ret = ttm_bo_kmap(bo, page, 1, &map); > + if (ret) > + return ret; > + > + ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > + WARN_ON_ONCE(is_iomem); > + if (write) > + memcpy(ptr, buf, bytes); > + else > + memcpy(buf, ptr, bytes); > + ttm_bo_kunmap(&map); > + > + page++; > + buf += bytes; > + bytes_left -= bytes; > + offset = 0; > + } while (bytes_left); > + > + return len; > +} > + > +/** > + * ttm_bo_access - Helper to access a buffer object > + * > + * @bo: ttm buffer object > + * @offset: access offset into buffer object > + * @buf: pointer to caller memory to read into or write from > + * @len: length of access > + * @write: write access > + * > + * Utility function to access a buffer object. Useful when buffer object cannot > + * be easily mapped (non-contiguous, non-visible, etc...). > + * > + * Returns: > + * 0 if successful, negative error code on failure. > + */ > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > + void *buf, int len, int write) > +{ > + int ret; > + > + if (len < 1 || (offset + len) > bo->base.size) > + return -EIO; > + > + ret = ttm_bo_reserve(bo, true, false, NULL); > + if (ret) > + return ret; > + > + switch (bo->resource->mem_type) { > + case TTM_PL_SYSTEM: > + fallthrough; > + case TTM_PL_TT: > + ret = ttm_bo_access_kmap(bo, offset, buf, len, write); > + break; > + default: > + if (bo->bdev->funcs->access_memory) > + ret = bo->bdev->funcs->access_memory > + (bo, offset, buf, len, write); > + else > + ret = -EIO; > + } > + > + ttm_bo_unreserve(bo); > + > + return ret; > +} > +EXPORT_SYMBOL(ttm_bo_access); > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c > index 2c699ed1963a..20b1e5f78684 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c > @@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma) > } > EXPORT_SYMBOL(ttm_bo_vm_close); > > -static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, > - unsigned long offset, > - uint8_t *buf, int len, int write) > -{ > - unsigned long page = offset >> PAGE_SHIFT; > - unsigned long bytes_left = len; > - int ret; > - > - /* Copy a page at a time, that way no extra virtual address > - * mapping is needed > - */ > - offset -= page << PAGE_SHIFT; > - do { > - unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > - struct ttm_bo_kmap_obj map; > - void *ptr; > - bool is_iomem; > - > - ret = ttm_bo_kmap(bo, page, 1, &map); > - if (ret) > - return ret; > - > - ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > - WARN_ON_ONCE(is_iomem); > - if (write) > - memcpy(ptr, buf, bytes); > - else > - memcpy(buf, ptr, bytes); > - ttm_bo_kunmap(&map); > - > - page++; > - buf += bytes; > - bytes_left -= bytes; > - offset = 0; > - } while (bytes_left); > - > - return len; > -} > - > int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > void *buf, int len, int write) > { > @@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > unsigned long offset = (addr) - vma->vm_start + > ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) > << PAGE_SHIFT); > - int ret; > - > - if (len < 1 || (offset + len) > bo->base.size) > - return -EIO; > > - ret = ttm_bo_reserve(bo, true, false, NULL); > - if (ret) > - return ret; > - > - switch (bo->resource->mem_type) { > - case TTM_PL_SYSTEM: > - fallthrough; > - case TTM_PL_TT: > - ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); > - break; > - default: > - if (bo->bdev->funcs->access_memory) > - ret = bo->bdev->funcs->access_memory( > - bo, offset, buf, len, write); > - else > - ret = -EIO; > - } > - > - ttm_bo_unreserve(bo); > - > - return ret; > + return ttm_bo_access(bo, offset, buf, len, write); > } > EXPORT_SYMBOL(ttm_bo_vm_access); > > diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h > index 5804408815be..8ea11cd8df39 100644 > --- a/include/drm/ttm/ttm_bo.h > +++ b/include/drm/ttm/ttm_bo.h > @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); > int ttm_bo_evict_first(struct ttm_device *bdev, > struct ttm_resource_manager *man, > struct ttm_operation_ctx *ctx); > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > + void *buf, int len, int write); > vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, > struct vm_fault *vmf); > vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > -- > 2.34.1
On Tue, Oct 22, 2024 at 12:18:52PM +0300, Mika Kuoppala wrote: > Matthew Brost <matthew.brost@intel.com> writes: > > > Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible > > VRAM easily be accessed. Add ttm_bo_access, which is similar to > > ttm_bo_vm_access, to access such memory. > > > > v4: > > - Fix checkpatch warnings (CI) > > v5: > > - Fix checkpatch warnings (CI) > > > > Reported-by: Christoph Manszewski <christoph.manszewski@intel.com> > > Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > > With the igt/xe_eudebug* coverage, > > Tested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Thanks! Is this for the series or just this patch? Matt > > > --- > > drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++ > > drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 +---------------------- > > include/drm/ttm/ttm_bo.h | 2 + > > 3 files changed, 89 insertions(+), 64 deletions(-) > > > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c > > index d939925efa81..41bb5a7477d3 100644 > > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c > > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c > > @@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, > > > > return progress; > > } > > + > > +static int ttm_bo_access_kmap(struct ttm_buffer_object *bo, > > + unsigned long offset, > > + void *buf, int len, int write) > > +{ > > + unsigned long page = offset >> PAGE_SHIFT; > > + unsigned long bytes_left = len; > > + int ret; > > + > > + /* Copy a page at a time, that way no extra virtual address > > + * mapping is needed > > + */ > > + offset -= page << PAGE_SHIFT; > > + do { > > + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > > + struct ttm_bo_kmap_obj map; > > + void *ptr; > > + bool is_iomem; > > + > > + ret = ttm_bo_kmap(bo, page, 1, &map); > > + if (ret) > > + return ret; > > + > > + ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > > + WARN_ON_ONCE(is_iomem); > > + if (write) > > + memcpy(ptr, buf, bytes); > > + else > > + memcpy(buf, ptr, bytes); > > + ttm_bo_kunmap(&map); > > + > > + page++; > > + buf += bytes; > > + bytes_left -= bytes; > > + offset = 0; > > + } while (bytes_left); > > + > > + return len; > > +} > > + > > +/** > > + * ttm_bo_access - Helper to access a buffer object > > + * > > + * @bo: ttm buffer object > > + * @offset: access offset into buffer object > > + * @buf: pointer to caller memory to read into or write from > > + * @len: length of access > > + * @write: write access > > + * > > + * Utility function to access a buffer object. Useful when buffer object cannot > > + * be easily mapped (non-contiguous, non-visible, etc...). > > + * > > + * Returns: > > + * 0 if successful, negative error code on failure. > > + */ > > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > > + void *buf, int len, int write) > > +{ > > + int ret; > > + > > + if (len < 1 || (offset + len) > bo->base.size) > > + return -EIO; > > + > > + ret = ttm_bo_reserve(bo, true, false, NULL); > > + if (ret) > > + return ret; > > + > > + switch (bo->resource->mem_type) { > > + case TTM_PL_SYSTEM: > > + fallthrough; > > + case TTM_PL_TT: > > + ret = ttm_bo_access_kmap(bo, offset, buf, len, write); > > + break; > > + default: > > + if (bo->bdev->funcs->access_memory) > > + ret = bo->bdev->funcs->access_memory > > + (bo, offset, buf, len, write); > > + else > > + ret = -EIO; > > + } > > + > > + ttm_bo_unreserve(bo); > > + > > + return ret; > > +} > > +EXPORT_SYMBOL(ttm_bo_access); > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c > > index 2c699ed1963a..20b1e5f78684 100644 > > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c > > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c > > @@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma) > > } > > EXPORT_SYMBOL(ttm_bo_vm_close); > > > > -static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, > > - unsigned long offset, > > - uint8_t *buf, int len, int write) > > -{ > > - unsigned long page = offset >> PAGE_SHIFT; > > - unsigned long bytes_left = len; > > - int ret; > > - > > - /* Copy a page at a time, that way no extra virtual address > > - * mapping is needed > > - */ > > - offset -= page << PAGE_SHIFT; > > - do { > > - unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > > - struct ttm_bo_kmap_obj map; > > - void *ptr; > > - bool is_iomem; > > - > > - ret = ttm_bo_kmap(bo, page, 1, &map); > > - if (ret) > > - return ret; > > - > > - ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > > - WARN_ON_ONCE(is_iomem); > > - if (write) > > - memcpy(ptr, buf, bytes); > > - else > > - memcpy(buf, ptr, bytes); > > - ttm_bo_kunmap(&map); > > - > > - page++; > > - buf += bytes; > > - bytes_left -= bytes; > > - offset = 0; > > - } while (bytes_left); > > - > > - return len; > > -} > > - > > int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > > void *buf, int len, int write) > > { > > @@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > > unsigned long offset = (addr) - vma->vm_start + > > ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) > > << PAGE_SHIFT); > > - int ret; > > - > > - if (len < 1 || (offset + len) > bo->base.size) > > - return -EIO; > > > > - ret = ttm_bo_reserve(bo, true, false, NULL); > > - if (ret) > > - return ret; > > - > > - switch (bo->resource->mem_type) { > > - case TTM_PL_SYSTEM: > > - fallthrough; > > - case TTM_PL_TT: > > - ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); > > - break; > > - default: > > - if (bo->bdev->funcs->access_memory) > > - ret = bo->bdev->funcs->access_memory( > > - bo, offset, buf, len, write); > > - else > > - ret = -EIO; > > - } > > - > > - ttm_bo_unreserve(bo); > > - > > - return ret; > > + return ttm_bo_access(bo, offset, buf, len, write); > > } > > EXPORT_SYMBOL(ttm_bo_vm_access); > > > > diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h > > index 5804408815be..8ea11cd8df39 100644 > > --- a/include/drm/ttm/ttm_bo.h > > +++ b/include/drm/ttm/ttm_bo.h > > @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); > > int ttm_bo_evict_first(struct ttm_device *bdev, > > struct ttm_resource_manager *man, > > struct ttm_operation_ctx *ctx); > > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > > + void *buf, int len, int write); > > vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, > > struct vm_fault *vmf); > > vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > > -- > > 2.34.1
Matthew Brost <matthew.brost@intel.com> writes: > On Tue, Oct 22, 2024 at 12:18:52PM +0300, Mika Kuoppala wrote: >> Matthew Brost <matthew.brost@intel.com> writes: >> >> > Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible >> > VRAM easily be accessed. Add ttm_bo_access, which is similar to >> > ttm_bo_vm_access, to access such memory. >> > >> > v4: >> > - Fix checkpatch warnings (CI) >> > v5: >> > - Fix checkpatch warnings (CI) >> > >> > Reported-by: Christoph Manszewski <christoph.manszewski@intel.com> >> > Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> >> > Signed-off-by: Matthew Brost <matthew.brost@intel.com> >> >> With the igt/xe_eudebug* coverage, >> >> Tested-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> > > Thanks! Is this for the series or just this patch? > Whole series applied but the coverage tested only ttm_bo_access(). -Mika > Matt > >> >> > --- >> > drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++ >> > drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 +---------------------- >> > include/drm/ttm/ttm_bo.h | 2 + >> > 3 files changed, 89 insertions(+), 64 deletions(-) >> > >> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c >> > index d939925efa81..41bb5a7477d3 100644 >> > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c >> > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c >> > @@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, >> > >> > return progress; >> > } >> > + >> > +static int ttm_bo_access_kmap(struct ttm_buffer_object *bo, >> > + unsigned long offset, >> > + void *buf, int len, int write) >> > +{ >> > + unsigned long page = offset >> PAGE_SHIFT; >> > + unsigned long bytes_left = len; >> > + int ret; >> > + >> > + /* Copy a page at a time, that way no extra virtual address >> > + * mapping is needed >> > + */ >> > + offset -= page << PAGE_SHIFT; >> > + do { >> > + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); >> > + struct ttm_bo_kmap_obj map; >> > + void *ptr; >> > + bool is_iomem; >> > + >> > + ret = ttm_bo_kmap(bo, page, 1, &map); >> > + if (ret) >> > + return ret; >> > + >> > + ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; >> > + WARN_ON_ONCE(is_iomem); >> > + if (write) >> > + memcpy(ptr, buf, bytes); >> > + else >> > + memcpy(buf, ptr, bytes); >> > + ttm_bo_kunmap(&map); >> > + >> > + page++; >> > + buf += bytes; >> > + bytes_left -= bytes; >> > + offset = 0; >> > + } while (bytes_left); >> > + >> > + return len; >> > +} >> > + >> > +/** >> > + * ttm_bo_access - Helper to access a buffer object >> > + * >> > + * @bo: ttm buffer object >> > + * @offset: access offset into buffer object >> > + * @buf: pointer to caller memory to read into or write from >> > + * @len: length of access >> > + * @write: write access >> > + * >> > + * Utility function to access a buffer object. Useful when buffer object cannot >> > + * be easily mapped (non-contiguous, non-visible, etc...). >> > + * >> > + * Returns: >> > + * 0 if successful, negative error code on failure. >> > + */ >> > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, >> > + void *buf, int len, int write) >> > +{ >> > + int ret; >> > + >> > + if (len < 1 || (offset + len) > bo->base.size) >> > + return -EIO; >> > + >> > + ret = ttm_bo_reserve(bo, true, false, NULL); >> > + if (ret) >> > + return ret; >> > + >> > + switch (bo->resource->mem_type) { >> > + case TTM_PL_SYSTEM: >> > + fallthrough; >> > + case TTM_PL_TT: >> > + ret = ttm_bo_access_kmap(bo, offset, buf, len, write); >> > + break; >> > + default: >> > + if (bo->bdev->funcs->access_memory) >> > + ret = bo->bdev->funcs->access_memory >> > + (bo, offset, buf, len, write); >> > + else >> > + ret = -EIO; >> > + } >> > + >> > + ttm_bo_unreserve(bo); >> > + >> > + return ret; >> > +} >> > +EXPORT_SYMBOL(ttm_bo_access); >> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c >> > index 2c699ed1963a..20b1e5f78684 100644 >> > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c >> > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c >> > @@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma) >> > } >> > EXPORT_SYMBOL(ttm_bo_vm_close); >> > >> > -static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, >> > - unsigned long offset, >> > - uint8_t *buf, int len, int write) >> > -{ >> > - unsigned long page = offset >> PAGE_SHIFT; >> > - unsigned long bytes_left = len; >> > - int ret; >> > - >> > - /* Copy a page at a time, that way no extra virtual address >> > - * mapping is needed >> > - */ >> > - offset -= page << PAGE_SHIFT; >> > - do { >> > - unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); >> > - struct ttm_bo_kmap_obj map; >> > - void *ptr; >> > - bool is_iomem; >> > - >> > - ret = ttm_bo_kmap(bo, page, 1, &map); >> > - if (ret) >> > - return ret; >> > - >> > - ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; >> > - WARN_ON_ONCE(is_iomem); >> > - if (write) >> > - memcpy(ptr, buf, bytes); >> > - else >> > - memcpy(buf, ptr, bytes); >> > - ttm_bo_kunmap(&map); >> > - >> > - page++; >> > - buf += bytes; >> > - bytes_left -= bytes; >> > - offset = 0; >> > - } while (bytes_left); >> > - >> > - return len; >> > -} >> > - >> > int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, >> > void *buf, int len, int write) >> > { >> > @@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, >> > unsigned long offset = (addr) - vma->vm_start + >> > ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) >> > << PAGE_SHIFT); >> > - int ret; >> > - >> > - if (len < 1 || (offset + len) > bo->base.size) >> > - return -EIO; >> > >> > - ret = ttm_bo_reserve(bo, true, false, NULL); >> > - if (ret) >> > - return ret; >> > - >> > - switch (bo->resource->mem_type) { >> > - case TTM_PL_SYSTEM: >> > - fallthrough; >> > - case TTM_PL_TT: >> > - ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); >> > - break; >> > - default: >> > - if (bo->bdev->funcs->access_memory) >> > - ret = bo->bdev->funcs->access_memory( >> > - bo, offset, buf, len, write); >> > - else >> > - ret = -EIO; >> > - } >> > - >> > - ttm_bo_unreserve(bo); >> > - >> > - return ret; >> > + return ttm_bo_access(bo, offset, buf, len, write); >> > } >> > EXPORT_SYMBOL(ttm_bo_vm_access); >> > >> > diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h >> > index 5804408815be..8ea11cd8df39 100644 >> > --- a/include/drm/ttm/ttm_bo.h >> > +++ b/include/drm/ttm/ttm_bo.h >> > @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); >> > int ttm_bo_evict_first(struct ttm_device *bdev, >> > struct ttm_resource_manager *man, >> > struct ttm_operation_ctx *ctx); >> > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, >> > + void *buf, int len, int write); >> > vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, >> > struct vm_fault *vmf); >> > vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, >> > -- >> > 2.34.1
On 21/10/2024 22:18, Matthew Brost wrote: > Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible > VRAM easily be accessed. Add ttm_bo_access, which is similar to > ttm_bo_vm_access, to access such memory. > > v4: > - Fix checkpatch warnings (CI) > v5: > - Fix checkpatch warnings (CI) > > Reported-by: Christoph Manszewski <christoph.manszewski@intel.com> > Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > Signed-off-by: Matthew Brost <matthew.brost@intel.com> > --- > drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++ > drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 +---------------------- > include/drm/ttm/ttm_bo.h | 2 + > 3 files changed, 89 insertions(+), 64 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c > index d939925efa81..41bb5a7477d3 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c > @@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, > > return progress; > } > + > +static int ttm_bo_access_kmap(struct ttm_buffer_object *bo, > + unsigned long offset, > + void *buf, int len, int write) > +{ > + unsigned long page = offset >> PAGE_SHIFT; > + unsigned long bytes_left = len; > + int ret; > + > + /* Copy a page at a time, that way no extra virtual address > + * mapping is needed > + */ > + offset -= page << PAGE_SHIFT; > + do { > + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > + struct ttm_bo_kmap_obj map; > + void *ptr; > + bool is_iomem; > + > + ret = ttm_bo_kmap(bo, page, 1, &map); > + if (ret) > + return ret; > + > + ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > + WARN_ON_ONCE(is_iomem); > + if (write) > + memcpy(ptr, buf, bytes); > + else > + memcpy(buf, ptr, bytes); > + ttm_bo_kunmap(&map); > + > + page++; > + buf += bytes; > + bytes_left -= bytes; > + offset = 0; > + } while (bytes_left); > + > + return len; > +} > + > +/** > + * ttm_bo_access - Helper to access a buffer object > + * > + * @bo: ttm buffer object > + * @offset: access offset into buffer object > + * @buf: pointer to caller memory to read into or write from > + * @len: length of access > + * @write: write access > + * > + * Utility function to access a buffer object. Useful when buffer object cannot > + * be easily mapped (non-contiguous, non-visible, etc...). > + * > + * Returns: > + * 0 if successful, negative error code on failure. @len if successful Reviewed-by: Matthew Auld <matthew.auld@intel.com> > + */ > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > + void *buf, int len, int write) > +{ > + int ret; > + > + if (len < 1 || (offset + len) > bo->base.size) > + return -EIO; > + > + ret = ttm_bo_reserve(bo, true, false, NULL); > + if (ret) > + return ret; > + > + switch (bo->resource->mem_type) { > + case TTM_PL_SYSTEM: > + fallthrough; > + case TTM_PL_TT: > + ret = ttm_bo_access_kmap(bo, offset, buf, len, write); > + break; > + default: > + if (bo->bdev->funcs->access_memory) > + ret = bo->bdev->funcs->access_memory > + (bo, offset, buf, len, write); > + else > + ret = -EIO; > + } > + > + ttm_bo_unreserve(bo); > + > + return ret; > +} > +EXPORT_SYMBOL(ttm_bo_access); > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c > index 2c699ed1963a..20b1e5f78684 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c > @@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma) > } > EXPORT_SYMBOL(ttm_bo_vm_close); > > -static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, > - unsigned long offset, > - uint8_t *buf, int len, int write) > -{ > - unsigned long page = offset >> PAGE_SHIFT; > - unsigned long bytes_left = len; > - int ret; > - > - /* Copy a page at a time, that way no extra virtual address > - * mapping is needed > - */ > - offset -= page << PAGE_SHIFT; > - do { > - unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); > - struct ttm_bo_kmap_obj map; > - void *ptr; > - bool is_iomem; > - > - ret = ttm_bo_kmap(bo, page, 1, &map); > - if (ret) > - return ret; > - > - ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; > - WARN_ON_ONCE(is_iomem); > - if (write) > - memcpy(ptr, buf, bytes); > - else > - memcpy(buf, ptr, bytes); > - ttm_bo_kunmap(&map); > - > - page++; > - buf += bytes; > - bytes_left -= bytes; > - offset = 0; > - } while (bytes_left); > - > - return len; > -} > - > int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > void *buf, int len, int write) > { > @@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, > unsigned long offset = (addr) - vma->vm_start + > ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) > << PAGE_SHIFT); > - int ret; > - > - if (len < 1 || (offset + len) > bo->base.size) > - return -EIO; > > - ret = ttm_bo_reserve(bo, true, false, NULL); > - if (ret) > - return ret; > - > - switch (bo->resource->mem_type) { > - case TTM_PL_SYSTEM: > - fallthrough; > - case TTM_PL_TT: > - ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); > - break; > - default: > - if (bo->bdev->funcs->access_memory) > - ret = bo->bdev->funcs->access_memory( > - bo, offset, buf, len, write); > - else > - ret = -EIO; > - } > - > - ttm_bo_unreserve(bo); > - > - return ret; > + return ttm_bo_access(bo, offset, buf, len, write); > } > EXPORT_SYMBOL(ttm_bo_vm_access); > > diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h > index 5804408815be..8ea11cd8df39 100644 > --- a/include/drm/ttm/ttm_bo.h > +++ b/include/drm/ttm/ttm_bo.h > @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); > int ttm_bo_evict_first(struct ttm_device *bdev, > struct ttm_resource_manager *man, > struct ttm_operation_ctx *ctx); > +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, > + void *buf, int len, int write); > vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, > struct vm_fault *vmf); > vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d939925efa81..41bb5a7477d3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, return progress; } + +static int ttm_bo_access_kmap(struct ttm_buffer_object *bo, + unsigned long offset, + void *buf, int len, int write) +{ + unsigned long page = offset >> PAGE_SHIFT; + unsigned long bytes_left = len; + int ret; + + /* Copy a page at a time, that way no extra virtual address + * mapping is needed + */ + offset -= page << PAGE_SHIFT; + do { + unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); + struct ttm_bo_kmap_obj map; + void *ptr; + bool is_iomem; + + ret = ttm_bo_kmap(bo, page, 1, &map); + if (ret) + return ret; + + ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; + WARN_ON_ONCE(is_iomem); + if (write) + memcpy(ptr, buf, bytes); + else + memcpy(buf, ptr, bytes); + ttm_bo_kunmap(&map); + + page++; + buf += bytes; + bytes_left -= bytes; + offset = 0; + } while (bytes_left); + + return len; +} + +/** + * ttm_bo_access - Helper to access a buffer object + * + * @bo: ttm buffer object + * @offset: access offset into buffer object + * @buf: pointer to caller memory to read into or write from + * @len: length of access + * @write: write access + * + * Utility function to access a buffer object. Useful when buffer object cannot + * be easily mapped (non-contiguous, non-visible, etc...). + * + * Returns: + * 0 if successful, negative error code on failure. + */ +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write) +{ + int ret; + + if (len < 1 || (offset + len) > bo->base.size) + return -EIO; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (ret) + return ret; + + switch (bo->resource->mem_type) { + case TTM_PL_SYSTEM: + fallthrough; + case TTM_PL_TT: + ret = ttm_bo_access_kmap(bo, offset, buf, len, write); + break; + default: + if (bo->bdev->funcs->access_memory) + ret = bo->bdev->funcs->access_memory + (bo, offset, buf, len, write); + else + ret = -EIO; + } + + ttm_bo_unreserve(bo); + + return ret; +} +EXPORT_SYMBOL(ttm_bo_access); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 2c699ed1963a..20b1e5f78684 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma) } EXPORT_SYMBOL(ttm_bo_vm_close); -static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, - unsigned long offset, - uint8_t *buf, int len, int write) -{ - unsigned long page = offset >> PAGE_SHIFT; - unsigned long bytes_left = len; - int ret; - - /* Copy a page at a time, that way no extra virtual address - * mapping is needed - */ - offset -= page << PAGE_SHIFT; - do { - unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); - struct ttm_bo_kmap_obj map; - void *ptr; - bool is_iomem; - - ret = ttm_bo_kmap(bo, page, 1, &map); - if (ret) - return ret; - - ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; - WARN_ON_ONCE(is_iomem); - if (write) - memcpy(ptr, buf, bytes); - else - memcpy(buf, ptr, bytes); - ttm_bo_kunmap(&map); - - page++; - buf += bytes; - bytes_left -= bytes; - offset = 0; - } while (bytes_left); - - return len; -} - int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { @@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, unsigned long offset = (addr) - vma->vm_start + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) << PAGE_SHIFT); - int ret; - - if (len < 1 || (offset + len) > bo->base.size) - return -EIO; - ret = ttm_bo_reserve(bo, true, false, NULL); - if (ret) - return ret; - - switch (bo->resource->mem_type) { - case TTM_PL_SYSTEM: - fallthrough; - case TTM_PL_TT: - ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); - break; - default: - if (bo->bdev->funcs->access_memory) - ret = bo->bdev->funcs->access_memory( - bo, offset, buf, len, write); - else - ret = -EIO; - } - - ttm_bo_unreserve(bo); - - return ret; + return ttm_bo_access(bo, offset, buf, len, write); } EXPORT_SYMBOL(ttm_bo_vm_access); diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 5804408815be..8ea11cd8df39 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo); int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, struct ttm_operation_ctx *ctx); +int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf); vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible VRAM easily be accessed. Add ttm_bo_access, which is similar to ttm_bo_vm_access, to access such memory. v4: - Fix checkpatch warnings (CI) v5: - Fix checkpatch warnings (CI) Reported-by: Christoph Manszewski <christoph.manszewski@intel.com> Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++ drivers/gpu/drm/ttm/ttm_bo_vm.c | 65 +---------------------- include/drm/ttm/ttm_bo.h | 2 + 3 files changed, 89 insertions(+), 64 deletions(-)