diff mbox

[1/2] drm/ttm: Implement vm_operations_struct.access v2

Message ID 1500350223-4913-1-git-send-email-Felix.Kuehling@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Felix Kuehling July 18, 2017, 3:57 a.m. UTC
Allows gdb to access contents of user mode mapped BOs. System memory
is handled by TTM using kmap. Other memory pools require a new driver
callback in ttm_bo_driver.

v2:
* kmap only one page at a time
* swap in BO if needed
* make driver callback more generic to handle private memory pools
* document callback return value
* WARN_ON -> WARN_ON_ONCE

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_vm.c | 79 ++++++++++++++++++++++++++++++++++++++++-
 include/drm/ttm/ttm_bo_driver.h | 17 +++++++++
 2 files changed, 95 insertions(+), 1 deletion(-)

Comments

Christian König July 18, 2017, 2:14 p.m. UTC | #1
Am 18.07.2017 um 05:57 schrieb Felix Kuehling:
> Allows gdb to access contents of user mode mapped BOs. System memory
> is handled by TTM using kmap. Other memory pools require a new driver
> callback in ttm_bo_driver.
>
> v2:
> * kmap only one page at a time
> * swap in BO if needed
> * make driver callback more generic to handle private memory pools
> * document callback return value
> * WARN_ON -> WARN_ON_ONCE
>
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com> for both.

> ---
>   drivers/gpu/drm/ttm/ttm_bo_vm.c | 79 ++++++++++++++++++++++++++++++++++++++++-
>   include/drm/ttm/ttm_bo_driver.h | 17 +++++++++
>   2 files changed, 95 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 9f53df9..945985e 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -294,10 +294,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
>   	vma->vm_private_data = NULL;
>   }
>   
> +static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
> +				 unsigned long offset,
> +				 void *buf, int len, int write)
> +{
> +	unsigned long page = offset >> PAGE_SHIFT;
> +	unsigned long bytes_left = len;
> +	int ret;
> +
> +	/* Copy a page at a time, that way no extra virtual address
> +	 * mapping is needed
> +	 */
> +	offset -= page << PAGE_SHIFT;
> +	do {
> +		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
> +		struct ttm_bo_kmap_obj map;
> +		void *ptr;
> +		bool is_iomem;
> +
> +		ret = ttm_bo_kmap(bo, page, 1, &map);
> +		if (ret)
> +			return ret;
> +
> +		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
> +		WARN_ON_ONCE(is_iomem);
> +		if (write)
> +			memcpy(ptr, buf, bytes);
> +		else
> +			memcpy(buf, ptr, bytes);
> +		ttm_bo_kunmap(&map);
> +
> +		page++;
> +		bytes_left -= bytes;
> +		offset = 0;
> +	} while (bytes_left);
> +
> +	return len;
> +}
> +
> +static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
> +			    void *buf, int len, int write)
> +{
> +	unsigned long offset = (addr) - vma->vm_start;
> +	struct ttm_buffer_object *bo = vma->vm_private_data;
> +	int ret;
> +
> +	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
> +		return -EIO;
> +
> +	ret = ttm_bo_reserve(bo, true, false, NULL);
> +	if (ret)
> +		return ret;
> +
> +	switch(bo->mem.mem_type) {
> +	case TTM_PL_SYSTEM:
> +		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
> +			ret = ttm_tt_swapin(bo->ttm);
> +			if (unlikely(ret != 0))
> +				return ret;
> +		}
> +		/* fall through */
> +	case TTM_PL_TT:
> +		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
> +		break;
> +	default:
> +		if (bo->bdev->driver->access_memory)
> +			ret = bo->bdev->driver->access_memory(
> +				bo, offset, buf, len, write);
> +		else
> +			ret = -EIO;
> +	}
> +
> +	ttm_bo_unreserve(bo);
> +
> +	return ret;
> +}
> +
>   static const struct vm_operations_struct ttm_bo_vm_ops = {
>   	.fault = ttm_bo_vm_fault,
>   	.open = ttm_bo_vm_open,
> -	.close = ttm_bo_vm_close
> +	.close = ttm_bo_vm_close,
> +	.access = ttm_bo_vm_access
>   };
>   
>   static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index 6bbd34d..04380ba 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -471,6 +471,23 @@ struct ttm_bo_driver {
>   	 */
>   	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
>   				    unsigned long page_offset);
> +
> +	/**
> +	 * Read/write memory buffers for ptrace access
> +	 *
> +	 * @bo: the BO to access
> +	 * @offset: the offset from the start of the BO
> +	 * @buf: pointer to source/destination buffer
> +	 * @len: number of bytes to copy
> +	 * @write: whether to read (0) from or write (non-0) to BO
> +	 *
> +	 * If successful, this function should return the number of
> +	 * bytes copied, -EIO otherwise. If the number of bytes
> +	 * returned is < len, the function may be called again with
> +	 * the remainder of the buffer to copy.
> +	 */
> +	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
> +			     void *buf, int len, int write);
>   };
>   
>   /**
Michel Dänzer July 18, 2017, 2:21 p.m. UTC | #2
On 17/07/17 11:57 PM, Felix Kuehling wrote:
> Allows gdb to access contents of user mode mapped BOs. System memory
> is handled by TTM using kmap. Other memory pools require a new driver
> callback in ttm_bo_driver.
> 
> v2:
> * kmap only one page at a time
> * swap in BO if needed
> * make driver callback more generic to handle private memory pools
> * document callback return value
> * WARN_ON -> WARN_ON_ONCE
> 
> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 9f53df9..945985e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -294,10 +294,87 @@  static void ttm_bo_vm_close(struct vm_area_struct *vma)
 	vma->vm_private_data = NULL;
 }
 
+static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
+				 unsigned long offset,
+				 void *buf, int len, int write)
+{
+	unsigned long page = offset >> PAGE_SHIFT;
+	unsigned long bytes_left = len;
+	int ret;
+
+	/* Copy a page at a time, that way no extra virtual address
+	 * mapping is needed
+	 */
+	offset -= page << PAGE_SHIFT;
+	do {
+		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+		struct ttm_bo_kmap_obj map;
+		void *ptr;
+		bool is_iomem;
+
+		ret = ttm_bo_kmap(bo, page, 1, &map);
+		if (ret)
+			return ret;
+
+		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+		WARN_ON_ONCE(is_iomem);
+		if (write)
+			memcpy(ptr, buf, bytes);
+		else
+			memcpy(buf, ptr, bytes);
+		ttm_bo_kunmap(&map);
+
+		page++;
+		bytes_left -= bytes;
+		offset = 0;
+	} while (bytes_left);
+
+	return len;
+}
+
+static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+			    void *buf, int len, int write)
+{
+	unsigned long offset = (addr) - vma->vm_start;
+	struct ttm_buffer_object *bo = vma->vm_private_data;
+	int ret;
+
+	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+		return -EIO;
+
+	ret = ttm_bo_reserve(bo, true, false, NULL);
+	if (ret)
+		return ret;
+
+	switch(bo->mem.mem_type) {
+	case TTM_PL_SYSTEM:
+		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+			ret = ttm_tt_swapin(bo->ttm);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+		/* fall through */
+	case TTM_PL_TT:
+		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
+		break;
+	default:
+		if (bo->bdev->driver->access_memory)
+			ret = bo->bdev->driver->access_memory(
+				bo, offset, buf, len, write);
+		else
+			ret = -EIO;
+	}
+
+	ttm_bo_unreserve(bo);
+
+	return ret;
+}
+
 static const struct vm_operations_struct ttm_bo_vm_ops = {
 	.fault = ttm_bo_vm_fault,
 	.open = ttm_bo_vm_open,
-	.close = ttm_bo_vm_close
+	.close = ttm_bo_vm_close,
+	.access = ttm_bo_vm_access
 };
 
 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6bbd34d..04380ba 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -471,6 +471,23 @@  struct ttm_bo_driver {
 	 */
 	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
 				    unsigned long page_offset);
+
+	/**
+	 * Read/write memory buffers for ptrace access
+	 *
+	 * @bo: the BO to access
+	 * @offset: the offset from the start of the BO
+	 * @buf: pointer to source/destination buffer
+	 * @len: number of bytes to copy
+	 * @write: whether to read (0) from or write (non-0) to BO
+	 *
+	 * If successful, this function should return the number of
+	 * bytes copied, -EIO otherwise. If the number of bytes
+	 * returned is < len, the function may be called again with
+	 * the remainder of the buffer to copy.
+	 */
+	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+			     void *buf, int len, int write);
 };
 
 /**