diff mbox series

[v3,2/8] ttm: turn ttm_bo_device.vma_manager into a pointer

Message ID 20190808093702.29512-3-kraxel@redhat.com (mailing list archive)
State New, archived
Headers show
Series drm: add gem ttm helpers | expand

Commit Message

Gerd Hoffmann Aug. 8, 2019, 9:36 a.m. UTC
Rename the embedded struct vma_offset_manager, it is named _vma_manager
now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
embedded ttm_bo_device._vma_manager by default.

Add ttm_bo_device_init_with_vma_manager() function which allows to
initialize ttm with a different vma manager.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 include/drm/ttm/ttm_bo_driver.h | 11 +++++++++--
 drivers/gpu/drm/ttm/ttm_bo.c    | 29 +++++++++++++++++++++--------
 drivers/gpu/drm/ttm/ttm_bo_vm.c |  6 +++---
 3 files changed, 33 insertions(+), 13 deletions(-)

Comments

Christian König Aug. 8, 2019, 9:48 a.m. UTC | #1
Am 08.08.19 um 11:36 schrieb Gerd Hoffmann:
> Rename the embedded struct vma_offset_manager, it is named _vma_manager
> now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
> embedded ttm_bo_device._vma_manager by default.
>
> Add ttm_bo_device_init_with_vma_manager() function which allows to
> initialize ttm with a different vma manager.

Can't we go down the route of completely removing the vma_manager from 
TTM? ttm_bo_mmap() would get the BO as parameter instead.

That would also make the verify_access callback completely superfluous 
and looks like a good step into the right direction of de-midlayering.

Christian.

>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
> ---
>   include/drm/ttm/ttm_bo_driver.h | 11 +++++++++--
>   drivers/gpu/drm/ttm/ttm_bo.c    | 29 +++++++++++++++++++++--------
>   drivers/gpu/drm/ttm/ttm_bo_vm.c |  6 +++---
>   3 files changed, 33 insertions(+), 13 deletions(-)
>
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index 3f1935c19a66..2f84d6bcd1a7 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -441,7 +441,8 @@ extern struct ttm_bo_global {
>    *
>    * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
>    * @man: An array of mem_type_managers.
> - * @vma_manager: Address space manager
> + * @vma_manager: Address space manager (pointer)
> + * @_vma_manager: Address space manager (enbedded)
>    * lru_lock: Spinlock that protects the buffer+device lru lists and
>    * ddestroy lists.
>    * @dev_mapping: A pointer to the struct address_space representing the
> @@ -464,7 +465,8 @@ struct ttm_bo_device {
>   	/*
>   	 * Protected by internal locks.
>   	 */
> -	struct drm_vma_offset_manager vma_manager;
> +	struct drm_vma_offset_manager *vma_manager;
> +	struct drm_vma_offset_manager _vma_manager;
>   
>   	/*
>   	 * Protected by the global:lru lock.
> @@ -597,6 +599,11 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
>   		       struct ttm_bo_driver *driver,
>   		       struct address_space *mapping,
>   		       bool need_dma32);
> +int ttm_bo_device_init_with_vma_manager(struct ttm_bo_device *bdev,
> +					struct ttm_bo_driver *driver,
> +					struct address_space *mapping,
> +					struct drm_vma_offset_manager *vma_manager,
> +					bool need_dma32);
>   
>   /**
>    * ttm_bo_unmap_virtual
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 10a861a1690c..0ed1a1182962 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -672,7 +672,7 @@ static void ttm_bo_release(struct kref *kref)
>   	struct ttm_bo_device *bdev = bo->bdev;
>   	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
>   
> -	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
> +	drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
>   	ttm_mem_io_lock(man, false);
>   	ttm_mem_io_free_vm(bo);
>   	ttm_mem_io_unlock(man);
> @@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   	 */
>   	if (bo->type == ttm_bo_type_device ||
>   	    bo->type == ttm_bo_type_sg)
> -		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
> +		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
>   					 bo->mem.num_pages);
>   
>   	/* passed reservation objects should already be locked,
> @@ -1704,7 +1704,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
>   			pr_debug("Swap list %d was clean\n", i);
>   	spin_unlock(&glob->lru_lock);
>   
> -	drm_vma_offset_manager_destroy(&bdev->vma_manager);
> +	drm_vma_offset_manager_destroy(&bdev->_vma_manager);
>   
>   	if (!ret)
>   		ttm_bo_global_release();
> @@ -1713,10 +1713,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
>   }
>   EXPORT_SYMBOL(ttm_bo_device_release);
>   
> -int ttm_bo_device_init(struct ttm_bo_device *bdev,
> -		       struct ttm_bo_driver *driver,
> -		       struct address_space *mapping,
> -		       bool need_dma32)
> +int ttm_bo_device_init_with_vma_manager(struct ttm_bo_device *bdev,
> +					struct ttm_bo_driver *driver,
> +					struct address_space *mapping,
> +					struct drm_vma_offset_manager *vma_manager,
> +					bool need_dma32)
>   {
>   	struct ttm_bo_global *glob = &ttm_bo_glob;
>   	int ret;
> @@ -1737,7 +1738,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
>   	if (unlikely(ret != 0))
>   		goto out_no_sys;
>   
> -	drm_vma_offset_manager_init(&bdev->vma_manager,
> +	bdev->vma_manager = vma_manager;
> +	drm_vma_offset_manager_init(&bdev->_vma_manager,
>   				    DRM_FILE_PAGE_OFFSET_START,
>   				    DRM_FILE_PAGE_OFFSET_SIZE);
>   	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
> @@ -1754,6 +1756,17 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
>   	ttm_bo_global_release();
>   	return ret;
>   }
> +EXPORT_SYMBOL(ttm_bo_device_init_with_vma_manager);
> +
> +int ttm_bo_device_init(struct ttm_bo_device *bdev,
> +		       struct ttm_bo_driver *driver,
> +		       struct address_space *mapping,
> +		       bool need_dma32)
> +{
> +	return ttm_bo_device_init_with_vma_manager(bdev, driver, mapping,
> +						   &bdev->_vma_manager,
> +						   need_dma32);
> +}
>   EXPORT_SYMBOL(ttm_bo_device_init);
>   
>   /*
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 85f5bcbe0c76..d4eecde8d050 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -409,16 +409,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
>   	struct drm_vma_offset_node *node;
>   	struct ttm_buffer_object *bo = NULL;
>   
> -	drm_vma_offset_lock_lookup(&bdev->vma_manager);
> +	drm_vma_offset_lock_lookup(bdev->vma_manager);
>   
> -	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
> +	node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
>   	if (likely(node)) {
>   		bo = container_of(node, struct ttm_buffer_object,
>   				  base.vma_node);
>   		bo = ttm_bo_get_unless_zero(bo);
>   	}
>   
> -	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
> +	drm_vma_offset_unlock_lookup(bdev->vma_manager);
>   
>   	if (!bo)
>   		pr_err("Could not find buffer object to map\n");
Gerd Hoffmann Aug. 8, 2019, 10:35 a.m. UTC | #2
On Thu, Aug 08, 2019 at 09:48:49AM +0000, Koenig, Christian wrote:
> Am 08.08.19 um 11:36 schrieb Gerd Hoffmann:
> > Rename the embedded struct vma_offset_manager, it is named _vma_manager
> > now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
> > embedded ttm_bo_device._vma_manager by default.
> >
> > Add ttm_bo_device_init_with_vma_manager() function which allows to
> > initialize ttm with a different vma manager.
> 
> Can't we go down the route of completely removing the vma_manager from 
> TTM? ttm_bo_mmap() would get the BO as parameter instead.

It surely makes sense to target that.  This patch can be a first step
into that direction.  It allows gem and ttm to use the same
vma_offset_manager (see patch #3), which in turn makes various gem
functions work on ttm objects (see patch #4 for vram helpers).

> That would also make the verify_access callback completely superfluous 
> and looks like a good step into the right direction of de-midlayering.

Hmm, right, noticed that too while working on another patch series.
Guess I'll try to merge those two and see where I end up ...

cheers,
  Gerd
Daniel Vetter Aug. 8, 2019, 12:02 p.m. UTC | #3
On Thu, Aug 08, 2019 at 12:35:21PM +0200, Gerd Hoffmann wrote:
> On Thu, Aug 08, 2019 at 09:48:49AM +0000, Koenig, Christian wrote:
> > Am 08.08.19 um 11:36 schrieb Gerd Hoffmann:
> > > Rename the embedded struct vma_offset_manager, it is named _vma_manager
> > > now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
> > > embedded ttm_bo_device._vma_manager by default.
> > >
> > > Add ttm_bo_device_init_with_vma_manager() function which allows to
> > > initialize ttm with a different vma manager.
> > 
> > Can't we go down the route of completely removing the vma_manager from 
> > TTM? ttm_bo_mmap() would get the BO as parameter instead.
> 
> It surely makes sense to target that.  This patch can be a first step
> into that direction.  It allows gem and ttm to use the same
> vma_offset_manager (see patch #3), which in turn makes various gem
> functions work on ttm objects (see patch #4 for vram helpers).

+1 on cleaning this up for good, at least long-term ...

> > That would also make the verify_access callback completely superfluous 
> > and looks like a good step into the right direction of de-midlayering.
> 
> Hmm, right, noticed that too while working on another patch series.
> Guess I'll try to merge those two and see where I end up ...

... but if it gets too invasive I'd vote for incremental changes. Even if
we completely rip out the vma/mmap lookup stuff from ttm, we still need to
keep a copy somewhere for vmwgfx. Or would the evil plan be the vmwgfx
would use the gem mmap helpers too?
-Daniel
Thomas Hellström (VMware) Aug. 8, 2019, 12:43 p.m. UTC | #4
On 8/8/19 2:02 PM, Daniel Vetter wrote:
> On Thu, Aug 08, 2019 at 12:35:21PM +0200, Gerd Hoffmann wrote:
>> On Thu, Aug 08, 2019 at 09:48:49AM +0000, Koenig, Christian wrote:
>>> Am 08.08.19 um 11:36 schrieb Gerd Hoffmann:
>>>> Rename the embedded struct vma_offset_manager, it is named _vma_manager
>>>> now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
>>>> embedded ttm_bo_device._vma_manager by default.
>>>>
>>>> Add ttm_bo_device_init_with_vma_manager() function which allows to
>>>> initialize ttm with a different vma manager.
>>> Can't we go down the route of completely removing the vma_manager from
>>> TTM? ttm_bo_mmap() would get the BO as parameter instead.
>> It surely makes sense to target that.  This patch can be a first step
>> into that direction.  It allows gem and ttm to use the same
>> vma_offset_manager (see patch #3), which in turn makes various gem
>> functions work on ttm objects (see patch #4 for vram helpers).
> +1 on cleaning this up for good, at least long-term ...
>
>>> That would also make the verify_access callback completely superfluous
>>> and looks like a good step into the right direction of de-midlayering.
>> Hmm, right, noticed that too while working on another patch series.
>> Guess I'll try to merge those two and see where I end up ...
> ... but if it gets too invasive I'd vote for incremental changes. Even if
> we completely rip out the vma/mmap lookup stuff from ttm, we still need to
> keep a copy somewhere for vmwgfx. Or would the evil plan be the vmwgfx
> would use the gem mmap helpers too?

I don't think it would be too invasive. We could simply move 
ttm_bo_vm_lookup into a vmw_mmap.

/Thomas




> -Daniel
Christian König Aug. 8, 2019, 12:57 p.m. UTC | #5
Am 08.08.19 um 14:43 schrieb Thomas Hellström (VMware):
> On 8/8/19 2:02 PM, Daniel Vetter wrote:
>> On Thu, Aug 08, 2019 at 12:35:21PM +0200, Gerd Hoffmann wrote:
>>> On Thu, Aug 08, 2019 at 09:48:49AM +0000, Koenig, Christian wrote:
>>>> Am 08.08.19 um 11:36 schrieb Gerd Hoffmann:
>>>>> Rename the embedded struct vma_offset_manager, it is named 
>>>>> _vma_manager
>>>>> now.  ttm_bo_device.vma_manager is a pointer now, pointing to the
>>>>> embedded ttm_bo_device._vma_manager by default.
>>>>>
>>>>> Add ttm_bo_device_init_with_vma_manager() function which allows to
>>>>> initialize ttm with a different vma manager.
>>>> Can't we go down the route of completely removing the vma_manager from
>>>> TTM? ttm_bo_mmap() would get the BO as parameter instead.
>>> It surely makes sense to target that.  This patch can be a first step
>>> into that direction.  It allows gem and ttm to use the same
>>> vma_offset_manager (see patch #3), which in turn makes various gem
>>> functions work on ttm objects (see patch #4 for vram helpers).
>> +1 on cleaning this up for good, at least long-term ...
>>
>>>> That would also make the verify_access callback completely superfluous
>>>> and looks like a good step into the right direction of de-midlayering.
>>> Hmm, right, noticed that too while working on another patch series.
>>> Guess I'll try to merge those two and see where I end up ...
>> ... but if it gets too invasive I'd vote for incremental changes. 
>> Even if
>> we completely rip out the vma/mmap lookup stuff from ttm, we still 
>> need to
>> keep a copy somewhere for vmwgfx. Or would the evil plan be the vmwgfx
>> would use the gem mmap helpers too?
>
> I don't think it would be too invasive. We could simply move 
> ttm_bo_vm_lookup into a vmw_mmap.

Yeah, agree. vmwgfx would just inherit what TTM is currently doing and 
everybody else would start to use the GEM helpers.

Switching the vma_manager to a pointer might be helpful in the middle of 
the patch set, but as stand a lone change that looks like a detour to me.

I suggest to start by adding the bo as parameter to ttm_bo_mmap and 
moving the lockup out of that function.

Christian.

>
> /Thomas
>
>
>
>
>> -Daniel
>
>
Gerd Hoffmann Aug. 8, 2019, 1:40 p.m. UTC | #6
Hi,

> > > That would also make the verify_access callback completely superfluous 
> > > and looks like a good step into the right direction of de-midlayering.
> > 
> > Hmm, right, noticed that too while working on another patch series.
> > Guess I'll try to merge those two and see where I end up ...
> 
> ... but if it gets too invasive I'd vote for incremental changes.

Yep, this is what I'm up to.  Sketching things up with vram helpers and
qxl, in a way that we can switch over drivers one by one.

Once all drivers are switched removing ttm_bo_device.vma_manager
altogether should be easy.

> Even if
> we completely rip out the vma/mmap lookup stuff from ttm, we still need to
> keep a copy somewhere for vmwgfx.

If vmwgfx is the only user we can probably just move things from ttm to
vmwgfx.

> Or would the evil plan be the vmwgfx
> would use the gem mmap helpers too?

That would work as well ;)

cheers,
  Gerd
diff mbox series

Patch

diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3f1935c19a66..2f84d6bcd1a7 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -441,7 +441,8 @@  extern struct ttm_bo_global {
  *
  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
  * @man: An array of mem_type_managers.
- * @vma_manager: Address space manager
+ * @vma_manager: Address space manager (pointer)
+ * @_vma_manager: Address space manager (enbedded)
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @dev_mapping: A pointer to the struct address_space representing the
@@ -464,7 +465,8 @@  struct ttm_bo_device {
 	/*
 	 * Protected by internal locks.
 	 */
-	struct drm_vma_offset_manager vma_manager;
+	struct drm_vma_offset_manager *vma_manager;
+	struct drm_vma_offset_manager _vma_manager;
 
 	/*
 	 * Protected by the global:lru lock.
@@ -597,6 +599,11 @@  int ttm_bo_device_init(struct ttm_bo_device *bdev,
 		       struct ttm_bo_driver *driver,
 		       struct address_space *mapping,
 		       bool need_dma32);
+int ttm_bo_device_init_with_vma_manager(struct ttm_bo_device *bdev,
+					struct ttm_bo_driver *driver,
+					struct address_space *mapping,
+					struct drm_vma_offset_manager *vma_manager,
+					bool need_dma32);
 
 /**
  * ttm_bo_unmap_virtual
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 10a861a1690c..0ed1a1182962 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -672,7 +672,7 @@  static void ttm_bo_release(struct kref *kref)
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
-	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
+	drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
 	ttm_mem_io_lock(man, false);
 	ttm_mem_io_free_vm(bo);
 	ttm_mem_io_unlock(man);
@@ -1353,7 +1353,7 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	 */
 	if (bo->type == ttm_bo_type_device ||
 	    bo->type == ttm_bo_type_sg)
-		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
+		ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
 					 bo->mem.num_pages);
 
 	/* passed reservation objects should already be locked,
@@ -1704,7 +1704,7 @@  int ttm_bo_device_release(struct ttm_bo_device *bdev)
 			pr_debug("Swap list %d was clean\n", i);
 	spin_unlock(&glob->lru_lock);
 
-	drm_vma_offset_manager_destroy(&bdev->vma_manager);
+	drm_vma_offset_manager_destroy(&bdev->_vma_manager);
 
 	if (!ret)
 		ttm_bo_global_release();
@@ -1713,10 +1713,11 @@  int ttm_bo_device_release(struct ttm_bo_device *bdev)
 }
 EXPORT_SYMBOL(ttm_bo_device_release);
 
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
-		       struct ttm_bo_driver *driver,
-		       struct address_space *mapping,
-		       bool need_dma32)
+int ttm_bo_device_init_with_vma_manager(struct ttm_bo_device *bdev,
+					struct ttm_bo_driver *driver,
+					struct address_space *mapping,
+					struct drm_vma_offset_manager *vma_manager,
+					bool need_dma32)
 {
 	struct ttm_bo_global *glob = &ttm_bo_glob;
 	int ret;
@@ -1737,7 +1738,8 @@  int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	if (unlikely(ret != 0))
 		goto out_no_sys;
 
-	drm_vma_offset_manager_init(&bdev->vma_manager,
+	bdev->vma_manager = vma_manager;
+	drm_vma_offset_manager_init(&bdev->_vma_manager,
 				    DRM_FILE_PAGE_OFFSET_START,
 				    DRM_FILE_PAGE_OFFSET_SIZE);
 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
@@ -1754,6 +1756,17 @@  int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	ttm_bo_global_release();
 	return ret;
 }
+EXPORT_SYMBOL(ttm_bo_device_init_with_vma_manager);
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+		       struct ttm_bo_driver *driver,
+		       struct address_space *mapping,
+		       bool need_dma32)
+{
+	return ttm_bo_device_init_with_vma_manager(bdev, driver, mapping,
+						   &bdev->_vma_manager,
+						   need_dma32);
+}
 EXPORT_SYMBOL(ttm_bo_device_init);
 
 /*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 85f5bcbe0c76..d4eecde8d050 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -409,16 +409,16 @@  static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
 	struct drm_vma_offset_node *node;
 	struct ttm_buffer_object *bo = NULL;
 
-	drm_vma_offset_lock_lookup(&bdev->vma_manager);
+	drm_vma_offset_lock_lookup(bdev->vma_manager);
 
-	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+	node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
 	if (likely(node)) {
 		bo = container_of(node, struct ttm_buffer_object,
 				  base.vma_node);
 		bo = ttm_bo_get_unless_zero(bo);
 	}
 
-	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+	drm_vma_offset_unlock_lookup(bdev->vma_manager);
 
 	if (!bo)
 		pr_err("Could not find buffer object to map\n");