diff mbox series

[v2,4/5] drm/i915: ttm backend dont provide mmap_offset for kernel buffers

Message ID 20220412151838.1298956-5-bob.beckett@collabora.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: ttm for stolen region | expand

Commit Message

Bob Beckett April 12, 2022, 3:18 p.m. UTC
stolen/kernel buffers should not be mmapable by userland.
do not provide callbacks to facilitate this for these buffers.

Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 32 +++++++++++++++++++++----
 1 file changed, 27 insertions(+), 5 deletions(-)

Comments

Thomas Hellström April 14, 2022, 2:05 p.m. UTC | #1
On Tue, 2022-04-12 at 15:18 +0000, Robert Beckett wrote:
> stolen/kernel buffers should not be mmapable by userland.
> do not provide callbacks to facilitate this for these buffers.
> 
> Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 32 +++++++++++++++++++++--
> --
>  1 file changed, 27 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> index a878910a563c..b20f81836c54 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> @@ -1092,8 +1092,8 @@ static void i915_ttm_unmap_virtual(struct
> drm_i915_gem_object *obj)
>         ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
>  }
>  
> -static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
> -       .name = "i915_gem_object_ttm",
> +static const struct drm_i915_gem_object_ops
> i915_gem_ttm_user_obj_ops = {
> +       .name = "i915_gem_object_ttm_user",
>         .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
>                  I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
>  
> @@ -1111,6 +1111,21 @@ static const struct drm_i915_gem_object_ops
> i915_gem_ttm_obj_ops = {
>         .mmap_ops = &vm_ops_ttm,
>  };
>  
> +static const struct drm_i915_gem_object_ops
> i915_gem_ttm_kern_obj_ops = {
> +       .name = "i915_gem_object_ttm_kern",
> +       .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
> +                I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
> +
> +       .get_pages = i915_ttm_get_pages,
> +       .put_pages = i915_ttm_put_pages,
> +       .truncate = i915_ttm_truncate,
> +       .shrink = i915_ttm_shrink,
> +
> +       .adjust_lru = i915_ttm_adjust_lru,
> +       .delayed_free = i915_ttm_delayed_free,
> +       .migrate = i915_ttm_migrate,
> +};

Do we really need two different ops here?

Since if we don't have mmap ops, basically that tells GEM it should do
the mmapping rather than TTM. 

That might of course come in handy for the shmem backend, but I don't
fully follow why we need this for stolen.

Also for the framebuffer handed over from BIOS to fbdev, Does that need
mmapping and if so, how do we handle that?


/Thomas




> +
>  void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
>  {
>         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
> @@ -1165,10 +1180,19 @@ int __i915_gem_ttm_object_init(struct
> intel_memory_region *mem,
>                 .no_wait_gpu = false,
>         };
>         enum ttm_bo_type bo_type;
> +       const struct drm_i915_gem_object_ops *ops;
>         int ret;
>  
>         drm_gem_private_object_init(&i915->drm, &obj->base, size);
> -       i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class,
> flags);
> +
> +       if (flags & I915_BO_ALLOC_USER &&
> intel_region_to_ttm_type(mem) != I915_PL_STOLEN) {
> +               bo_type = ttm_bo_type_device;
> +               ops = &i915_gem_ttm_user_obj_ops;
> +       } else {
> +               bo_type = ttm_bo_type_kernel;
> +               ops = &i915_gem_ttm_kern_obj_ops;
> +       }
> +       i915_gem_object_init(obj, ops, &lock_class, flags);
>  
>         obj->bo_offset = offset;
>  
> @@ -1178,8 +1202,6 @@ int __i915_gem_ttm_object_init(struct
> intel_memory_region *mem,
>  
>         INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL |
> __GFP_NOWARN);
>         mutex_init(&obj->ttm.get_io_page.lock);
> -       bo_type = (obj->flags & I915_BO_ALLOC_USER) ?
> ttm_bo_type_device :
> -               ttm_bo_type_kernel;
>  
>         obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
>
Bob Beckett April 14, 2022, 4:13 p.m. UTC | #2
On 14/04/2022 15:05, Thomas Hellström wrote:
> On Tue, 2022-04-12 at 15:18 +0000, Robert Beckett wrote:
>> stolen/kernel buffers should not be mmapable by userland.
>> do not provide callbacks to facilitate this for these buffers.
>>
>> Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
>> ---
>>   drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 32 +++++++++++++++++++++--
>> --
>>   1 file changed, 27 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>> b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>> index a878910a563c..b20f81836c54 100644
>> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>> @@ -1092,8 +1092,8 @@ static void i915_ttm_unmap_virtual(struct
>> drm_i915_gem_object *obj)
>>          ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
>>   }
>>   
>> -static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
>> -       .name = "i915_gem_object_ttm",
>> +static const struct drm_i915_gem_object_ops
>> i915_gem_ttm_user_obj_ops = {
>> +       .name = "i915_gem_object_ttm_user",
>>          .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
>>                   I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
>>   
>> @@ -1111,6 +1111,21 @@ static const struct drm_i915_gem_object_ops
>> i915_gem_ttm_obj_ops = {
>>          .mmap_ops = &vm_ops_ttm,
>>   };
>>   
>> +static const struct drm_i915_gem_object_ops
>> i915_gem_ttm_kern_obj_ops = {
>> +       .name = "i915_gem_object_ttm_kern",
>> +       .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
>> +                I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
>> +
>> +       .get_pages = i915_ttm_get_pages,
>> +       .put_pages = i915_ttm_put_pages,
>> +       .truncate = i915_ttm_truncate,
>> +       .shrink = i915_ttm_shrink,
>> +
>> +       .adjust_lru = i915_ttm_adjust_lru,
>> +       .delayed_free = i915_ttm_delayed_free,
>> +       .migrate = i915_ttm_migrate,
>> +};
> 
> Do we really need two different ops here?
> 
> Since if we don't have mmap ops, basically that tells GEM it should do
> the mmapping rather than TTM.
> 
> That might of course come in handy for the shmem backend, but I don't
> fully follow why we need this for stolen.

the main rationale for doing this was to avoid 
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:can_mmap() presuming 
that is can use I915_MMAP_TYPE_FIXED

As the original backend also did not have mmap_offset handlers for 
stolen, this seemed like a reasonable design.

If desired, we could add a special case for the testing logic, but those 
special cases have tendency to multiply.

> 
> Also for the framebuffer handed over from BIOS to fbdev, Does that need
> mmapping and if so, how do we handle that?
> 

I'm not sure of the usecase there. Do you know of any igt test that 
tests this? I can investigate further if you do not.

> 
> /Thomas
> 
> 
> 
> 
>> +
>>   void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
>>   {
>>          struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
>> @@ -1165,10 +1180,19 @@ int __i915_gem_ttm_object_init(struct
>> intel_memory_region *mem,
>>                  .no_wait_gpu = false,
>>          };
>>          enum ttm_bo_type bo_type;
>> +       const struct drm_i915_gem_object_ops *ops;
>>          int ret;
>>   
>>          drm_gem_private_object_init(&i915->drm, &obj->base, size);
>> -       i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class,
>> flags);
>> +
>> +       if (flags & I915_BO_ALLOC_USER &&
>> intel_region_to_ttm_type(mem) != I915_PL_STOLEN) {
>> +               bo_type = ttm_bo_type_device;
>> +               ops = &i915_gem_ttm_user_obj_ops;
>> +       } else {
>> +               bo_type = ttm_bo_type_kernel;
>> +               ops = &i915_gem_ttm_kern_obj_ops;
>> +       }
>> +       i915_gem_object_init(obj, ops, &lock_class, flags);
>>   
>>          obj->bo_offset = offset;
>>   
>> @@ -1178,8 +1202,6 @@ int __i915_gem_ttm_object_init(struct
>> intel_memory_region *mem,
>>   
>>          INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL |
>> __GFP_NOWARN);
>>          mutex_init(&obj->ttm.get_io_page.lock);
>> -       bo_type = (obj->flags & I915_BO_ALLOC_USER) ?
>> ttm_bo_type_device :
>> -               ttm_bo_type_kernel;
>>   
>>          obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
>>   
> 
>
Thomas Hellström April 27, 2022, 10:14 a.m. UTC | #3
Sorry for late reply,

On Thu, 2022-04-14 at 17:13 +0100, Robert Beckett wrote:
> 
> 
> On 14/04/2022 15:05, Thomas Hellström wrote:
> > On Tue, 2022-04-12 at 15:18 +0000, Robert Beckett wrote:
> > > stolen/kernel buffers should not be mmapable by userland.
> > > do not provide callbacks to facilitate this for these buffers.
> > > 
> > > Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
> > > ---
> > >   drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 32
> > > +++++++++++++++++++++--
> > > --
> > >   1 file changed, 27 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > index a878910a563c..b20f81836c54 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > @@ -1092,8 +1092,8 @@ static void i915_ttm_unmap_virtual(struct
> > > drm_i915_gem_object *obj)
> > >          ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
> > >   }
> > >   
> > > -static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops
> > > = {
> > > -       .name = "i915_gem_object_ttm",
> > > +static const struct drm_i915_gem_object_ops
> > > i915_gem_ttm_user_obj_ops = {
> > > +       .name = "i915_gem_object_ttm_user",
> > >          .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
> > >                   I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
> > >   
> > > @@ -1111,6 +1111,21 @@ static const struct
> > > drm_i915_gem_object_ops
> > > i915_gem_ttm_obj_ops = {
> > >          .mmap_ops = &vm_ops_ttm,
> > >   };
> > >   
> > > +static const struct drm_i915_gem_object_ops
> > > i915_gem_ttm_kern_obj_ops = {
> > > +       .name = "i915_gem_object_ttm_kern",
> > > +       .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
> > > +                I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
> > > +
> > > +       .get_pages = i915_ttm_get_pages,
> > > +       .put_pages = i915_ttm_put_pages,
> > > +       .truncate = i915_ttm_truncate,
> > > +       .shrink = i915_ttm_shrink,
> > > +
> > > +       .adjust_lru = i915_ttm_adjust_lru,
> > > +       .delayed_free = i915_ttm_delayed_free,
> > > +       .migrate = i915_ttm_migrate,
> > > +};
> > 
> > Do we really need two different ops here?
> > 
> > Since if we don't have mmap ops, basically that tells GEM it should
> > do
> > the mmapping rather than TTM.
> > 
> > That might of course come in handy for the shmem backend, but I
> > don't
> > fully follow why we need this for stolen.
> 
> the main rationale for doing this was to avoid 
> drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c:can_mmap()
> presuming 
> that is can use I915_MMAP_TYPE_FIXED
> 
> As the original backend also did not have mmap_offset handlers for 
> stolen, this seemed like a reasonable design.
> 
> If desired, we could add a special case for the testing logic, but
> those 
> special cases have tendency to multiply.
> 
> > 
> > Also for the framebuffer handed over from BIOS to fbdev, Does that
> > need
> > mmapping and if so, how do we handle that?
> > 
> 
> I'm not sure of the usecase there. Do you know of any igt test that 
> tests this? I can investigate further if you do not.

It would be if we the fbdev driver at startup inherits some image that
bios has preloaded into stolen, and then a client tries to write into
it. Not sure that this is a real use case though, or whether, in that
case, that takes a separate path for user-space mappings.

/Thomas



> 
> > 
> > /Thomas
> > 
> > 
> > 
> > 
> > > +
> > >   void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
> > >   {
> > >          struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
> > > @@ -1165,10 +1180,19 @@ int __i915_gem_ttm_object_init(struct
> > > intel_memory_region *mem,
> > >                  .no_wait_gpu = false,
> > >          };
> > >          enum ttm_bo_type bo_type;
> > > +       const struct drm_i915_gem_object_ops *ops;
> > >          int ret;
> > >   
> > >          drm_gem_private_object_init(&i915->drm, &obj->base,
> > > size);
> > > -       i915_gem_object_init(obj, &i915_gem_ttm_obj_ops,
> > > &lock_class,
> > > flags);
> > > +
> > > +       if (flags & I915_BO_ALLOC_USER &&
> > > intel_region_to_ttm_type(mem) != I915_PL_STOLEN) {
> > > +               bo_type = ttm_bo_type_device;
> > > +               ops = &i915_gem_ttm_user_obj_ops;
> > > +       } else {
> > > +               bo_type = ttm_bo_type_kernel;
> > > +               ops = &i915_gem_ttm_kern_obj_ops;
> > > +       }
> > > +       i915_gem_object_init(obj, ops, &lock_class, flags);
> > >   
> > >          obj->bo_offset = offset;
> > >   
> > > @@ -1178,8 +1202,6 @@ int __i915_gem_ttm_object_init(struct
> > > intel_memory_region *mem,
> > >   
> > >          INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL
> > > |
> > > __GFP_NOWARN);
> > >          mutex_init(&obj->ttm.get_io_page.lock);
> > > -       bo_type = (obj->flags & I915_BO_ALLOC_USER) ?
> > > ttm_bo_type_device :
> > > -               ttm_bo_type_kernel;
> > >   
> > >          obj->base.vma_node.driver_private =
> > > i915_gem_to_ttm(obj);
> > >   
> > 
> >
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index a878910a563c..b20f81836c54 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1092,8 +1092,8 @@  static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
 	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
 }
 
-static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
-	.name = "i915_gem_object_ttm",
+static const struct drm_i915_gem_object_ops i915_gem_ttm_user_obj_ops = {
+	.name = "i915_gem_object_ttm_user",
 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
 		 I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
 
@@ -1111,6 +1111,21 @@  static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
 	.mmap_ops = &vm_ops_ttm,
 };
 
+static const struct drm_i915_gem_object_ops i915_gem_ttm_kern_obj_ops = {
+	.name = "i915_gem_object_ttm_kern",
+	.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
+		 I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
+
+	.get_pages = i915_ttm_get_pages,
+	.put_pages = i915_ttm_put_pages,
+	.truncate = i915_ttm_truncate,
+	.shrink = i915_ttm_shrink,
+
+	.adjust_lru = i915_ttm_adjust_lru,
+	.delayed_free = i915_ttm_delayed_free,
+	.migrate = i915_ttm_migrate,
+};
+
 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
@@ -1165,10 +1180,19 @@  int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 		.no_wait_gpu = false,
 	};
 	enum ttm_bo_type bo_type;
+	const struct drm_i915_gem_object_ops *ops;
 	int ret;
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+
+	if (flags & I915_BO_ALLOC_USER && intel_region_to_ttm_type(mem) != I915_PL_STOLEN) {
+		bo_type = ttm_bo_type_device;
+		ops = &i915_gem_ttm_user_obj_ops;
+	} else {
+		bo_type = ttm_bo_type_kernel;
+		ops = &i915_gem_ttm_kern_obj_ops;
+	}
+	i915_gem_object_init(obj, ops, &lock_class, flags);
 
 	obj->bo_offset = offset;
 
@@ -1178,8 +1202,6 @@  int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 
 	INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
 	mutex_init(&obj->ttm.get_io_page.lock);
-	bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
-		ttm_bo_type_kernel;
 
 	obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);