diff mbox series

[3/5] drm/vmwgfx: use core drm to extend/check vmw_execbuf_ioctl

Message ID 20190522164119.24139-3-emil.l.velikov@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/5] vmwgfx: drop empty lastclose stub | expand

Commit Message

Emil Velikov May 22, 2019, 4:41 p.m. UTC
From: Emil Velikov <emil.velikov@collabora.com>

Currently vmw_execbuf_ioctl() open-codes the permission checking, size
extending and copying that is already done in core drm.

Kill all the duplication, adding a few comments for clarity.

Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
---
Thomas, VMware team,

Please give this some testing on your end. I've only tested it against
mesa-master.

Thanks
Emil
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++----------------
 3 files changed, 23 insertions(+), 45 deletions(-)

Comments

Thomas Hellstrom May 22, 2019, 7:01 p.m. UTC | #1
Hi, Emil,

On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> From: Emil Velikov <emil.velikov@collabora.com>
> 
> Currently vmw_execbuf_ioctl() open-codes the permission checking,
> size
> extending and copying that is already done in core drm.
> 
> Kill all the duplication, adding a few comments for clarity.

Ah, there is core functionality for this now.

What worries me though with the core approach is that the sizes are not
capped by the size of the kernel argument definition, which makes
mailicious user-space being able to force kmallocs() the size of the
maximum ioctl size. Should probably be fixed before pushing this.


> 
> Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
> Cc: Thomas Hellstrom <thellstrom@vmware.com>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
> ---
> Thomas, VMware team,
> 
> Please give this some testing on your end. I've only tested it
> against
> mesa-master.

I'll review tomorrow and do some testing. Need to see if I can dig up
user-space apps with version 0...

Thanks,

Thomas

> 
> Thanks
> Emil
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++------------
> ----
>  3 files changed, 23 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> index d3f108f7e52d..2cb6ae219e43 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] =
> {
>  		      DRM_RENDER_ALLOW),
>  	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
>  		      DRM_AUTH | DRM_RENDER_ALLOW),
> -	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> +	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
>  		      DRM_RENDER_ALLOW),
>  	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
>  		      DRM_RENDER_ALLOW),
> @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> *filp, unsigned int cmd,
>  			&vmw_ioctls[nr - DRM_COMMAND_BASE];
>  
>  		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> -			ret = (long) drm_ioctl_permit(ioctl->flags,
> file_priv);
> -			if (unlikely(ret != 0))
> -				return ret;
> -
> -			if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> IOC_IN))
> -				goto out_io_encoding;
> -
> -			return (long) vmw_execbuf_ioctl(dev, arg,
> file_priv,
> -							_IOC_SIZE(cmd))
> ;
> +			return ioctl_func(filp, cmd, arg);
>  		} else if (nr == DRM_COMMAND_BASE +
> DRM_VMW_UPDATE_LAYOUT) {
>  			if (!drm_is_current_master(file_priv) &&
>  			    !capable(CAP_SYS_ADMIN))
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 9be2176cc260..f5bfac85f793 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -910,8 +910,8 @@ static inline struct page *vmw_piter_page(struct
> vmw_piter *viter)
>   * Command submission - vmwgfx_execbuf.c
>   */
>  
> -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> data,
> -			     struct drm_file *file_priv, size_t size);
> +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> +			     struct drm_file *file_priv);
>  extern int vmw_execbuf_process(struct drm_file *file_priv,
>  			       struct vmw_private *dev_priv,
>  			       void __user *user_commands,
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 2ff7ba04d8c8..767e2b99618d 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> vmw_private *dev_priv)
>  	mutex_unlock(&dev_priv->cmdbuf_mutex);
>  }
>  
> -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
> -		      struct drm_file *file_priv, size_t size)
> +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> +		      struct drm_file *file_priv)
>  {
>  	struct vmw_private *dev_priv = vmw_priv(dev);
> -	struct drm_vmw_execbuf_arg arg;
> +	struct drm_vmw_execbuf_arg *arg = data;
>  	int ret;
> -	static const size_t copy_offset[] = {
> -		offsetof(struct drm_vmw_execbuf_arg, context_handle),
> -		sizeof(struct drm_vmw_execbuf_arg)};
>  	struct dma_fence *in_fence = NULL;
>  
> -	if (unlikely(size < copy_offset[0])) {
> -		VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> -			       DRM_VMW_EXECBUF);
> -		return -EINVAL;
> -	}
> -
> -	if (copy_from_user(&arg, (void __user *) data, copy_offset[0])
> != 0)
> -		return -EFAULT;
> -
>  	/*
>  	 * Extend the ioctl argument while maintaining backwards
> compatibility:
> -	 * We take different code paths depending on the value of
> arg.version.
> +	 * We take different code paths depending on the value of arg-
> >version.
> +	 *
> +	 * Note: The ioctl argument is extended and zeropadded by core
> DRM.
>  	 */
> -	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> -		     arg.version == 0)) {
> +	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> +		     arg->version == 0)) {
>  		VMW_DEBUG_USER("Incorrect execbuf version.\n");
>  		return -EINVAL;
>  	}
>  
> -	if (arg.version > 1 &&
> -	    copy_from_user(&arg.context_handle,
> -			   (void __user *) (data + copy_offset[0]),
> -			   copy_offset[arg.version - 1] -
> copy_offset[0]) != 0)
> -		return -EFAULT;
> -
> -	switch (arg.version) {
> +	switch (arg->version) {
>  	case 1:
> -		arg.context_handle = (uint32_t) -1;
> +		/* For v1 core DRM have extended + zeropadded the data
> */
> +		arg->context_handle = (uint32_t) -1;
>  		break;
>  	case 2:
>  	default:
> +		/* For v2 and later core DRM would have correctly
> copied it */
>  		break;
>  	}
>  
>  	/* If imported a fence FD from elsewhere, then wait on it */
> -	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> -		in_fence = sync_file_get_fence(arg.imported_fence_fd);
> +	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> +		in_fence = sync_file_get_fence(arg->imported_fence_fd);
>  
>  		if (!in_fence) {
>  			VMW_DEBUG_USER("Cannot get imported fence\n");
> @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev,
> unsigned long data,
>  		return ret;
>  
>  	ret = vmw_execbuf_process(file_priv, dev_priv,
> -				  (void __user *)(unsigned
> long)arg.commands,
> -				  NULL, arg.command_size,
> arg.throttle_us,
> -				  arg.context_handle,
> -				  (void __user *)(unsigned
> long)arg.fence_rep,
> -				  NULL, arg.flags);
> +				  (void __user *)(unsigned long)arg-
> >commands,
> +				  NULL, arg->command_size, arg-
> >throttle_us,
> +				  arg->context_handle,
> +				  (void __user *)(unsigned long)arg-
> >fence_rep,
> +				  NULL, arg->flags);
>  
>  	ttm_read_unlock(&dev_priv->reservation_sem);
>  	if (unlikely(ret != 0))
Daniel Vetter May 22, 2019, 7:09 p.m. UTC | #2
On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <thellstrom@vmware.com> wrote:
>
> Hi, Emil,
>
> On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> > From: Emil Velikov <emil.velikov@collabora.com>
> >
> > Currently vmw_execbuf_ioctl() open-codes the permission checking,
> > size
> > extending and copying that is already done in core drm.
> >
> > Kill all the duplication, adding a few comments for clarity.
>
> Ah, there is core functionality for this now.
>
> What worries me though with the core approach is that the sizes are not
> capped by the size of the kernel argument definition, which makes
> mailicious user-space being able to force kmallocs() the size of the
> maximum ioctl size. Should probably be fixed before pushing this.

Hm I always worked under the assumption that kmalloc and friends
should be userspace hardened. Otherwise stuff like kmalloc_array
doesn't make any sense, everyone just feeds it unchecked input and
expects that helper to handle overflows.

If we assume kmalloc isn't hardened against that, then we have a much
bigger problem than just vmwgfx ioctls ...
-Daniel

>
>
> >
> > Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
> > Cc: Thomas Hellstrom <thellstrom@vmware.com>
> > Cc: Daniel Vetter <daniel@ffwll.ch>
> > Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
> > ---
> > Thomas, VMware team,
> >
> > Please give this some testing on your end. I've only tested it
> > against
> > mesa-master.
>
> I'll review tomorrow and do some testing. Need to see if I can dig up
> user-space apps with version 0...
>
> Thanks,
>
> Thomas
>
> >
> > Thanks
> > Emil
> > ---
> >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
> >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
> >  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++------------
> > ----
> >  3 files changed, 23 insertions(+), 45 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > index d3f108f7e52d..2cb6ae219e43 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] =
> > {
> >                     DRM_RENDER_ALLOW),
> >       VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
> >                     DRM_AUTH | DRM_RENDER_ALLOW),
> > -     VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> > +     VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
> >                     DRM_RENDER_ALLOW),
> >       VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
> >                     DRM_RENDER_ALLOW),
> > @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> > *filp, unsigned int cmd,
> >                       &vmw_ioctls[nr - DRM_COMMAND_BASE];
> >
> >               if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> > -                     ret = (long) drm_ioctl_permit(ioctl->flags,
> > file_priv);
> > -                     if (unlikely(ret != 0))
> > -                             return ret;
> > -
> > -                     if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> > IOC_IN))
> > -                             goto out_io_encoding;
> > -
> > -                     return (long) vmw_execbuf_ioctl(dev, arg,
> > file_priv,
> > -                                                     _IOC_SIZE(cmd))
> > ;
> > +                     return ioctl_func(filp, cmd, arg);
> >               } else if (nr == DRM_COMMAND_BASE +
> > DRM_VMW_UPDATE_LAYOUT) {
> >                       if (!drm_is_current_master(file_priv) &&
> >                           !capable(CAP_SYS_ADMIN))
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > index 9be2176cc260..f5bfac85f793 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > @@ -910,8 +910,8 @@ static inline struct page *vmw_piter_page(struct
> > vmw_piter *viter)
> >   * Command submission - vmwgfx_execbuf.c
> >   */
> >
> > -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> > data,
> > -                          struct drm_file *file_priv, size_t size);
> > +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > +                          struct drm_file *file_priv);
> >  extern int vmw_execbuf_process(struct drm_file *file_priv,
> >                              struct vmw_private *dev_priv,
> >                              void __user *user_commands,
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > index 2ff7ba04d8c8..767e2b99618d 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> > vmw_private *dev_priv)
> >       mutex_unlock(&dev_priv->cmdbuf_mutex);
> >  }
> >
> > -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
> > -                   struct drm_file *file_priv, size_t size)
> > +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > +                   struct drm_file *file_priv)
> >  {
> >       struct vmw_private *dev_priv = vmw_priv(dev);
> > -     struct drm_vmw_execbuf_arg arg;
> > +     struct drm_vmw_execbuf_arg *arg = data;
> >       int ret;
> > -     static const size_t copy_offset[] = {
> > -             offsetof(struct drm_vmw_execbuf_arg, context_handle),
> > -             sizeof(struct drm_vmw_execbuf_arg)};
> >       struct dma_fence *in_fence = NULL;
> >
> > -     if (unlikely(size < copy_offset[0])) {
> > -             VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> > -                            DRM_VMW_EXECBUF);
> > -             return -EINVAL;
> > -     }
> > -
> > -     if (copy_from_user(&arg, (void __user *) data, copy_offset[0])
> > != 0)
> > -             return -EFAULT;
> > -
> >       /*
> >        * Extend the ioctl argument while maintaining backwards
> > compatibility:
> > -      * We take different code paths depending on the value of
> > arg.version.
> > +      * We take different code paths depending on the value of arg-
> > >version.
> > +      *
> > +      * Note: The ioctl argument is extended and zeropadded by core
> > DRM.
> >        */
> > -     if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> > -                  arg.version == 0)) {
> > +     if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> > +                  arg->version == 0)) {
> >               VMW_DEBUG_USER("Incorrect execbuf version.\n");
> >               return -EINVAL;
> >       }
> >
> > -     if (arg.version > 1 &&
> > -         copy_from_user(&arg.context_handle,
> > -                        (void __user *) (data + copy_offset[0]),
> > -                        copy_offset[arg.version - 1] -
> > copy_offset[0]) != 0)
> > -             return -EFAULT;
> > -
> > -     switch (arg.version) {
> > +     switch (arg->version) {
> >       case 1:
> > -             arg.context_handle = (uint32_t) -1;
> > +             /* For v1 core DRM have extended + zeropadded the data
> > */
> > +             arg->context_handle = (uint32_t) -1;
> >               break;
> >       case 2:
> >       default:
> > +             /* For v2 and later core DRM would have correctly
> > copied it */
> >               break;
> >       }
> >
> >       /* If imported a fence FD from elsewhere, then wait on it */
> > -     if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > -             in_fence = sync_file_get_fence(arg.imported_fence_fd);
> > +     if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > +             in_fence = sync_file_get_fence(arg->imported_fence_fd);
> >
> >               if (!in_fence) {
> >                       VMW_DEBUG_USER("Cannot get imported fence\n");
> > @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev,
> > unsigned long data,
> >               return ret;
> >
> >       ret = vmw_execbuf_process(file_priv, dev_priv,
> > -                               (void __user *)(unsigned
> > long)arg.commands,
> > -                               NULL, arg.command_size,
> > arg.throttle_us,
> > -                               arg.context_handle,
> > -                               (void __user *)(unsigned
> > long)arg.fence_rep,
> > -                               NULL, arg.flags);
> > +                               (void __user *)(unsigned long)arg-
> > >commands,
> > +                               NULL, arg->command_size, arg-
> > >throttle_us,
> > +                               arg->context_handle,
> > +                               (void __user *)(unsigned long)arg-
> > >fence_rep,
> > +                               NULL, arg->flags);
> >
> >       ttm_read_unlock(&dev_priv->reservation_sem);
> >       if (unlikely(ret != 0))
Thomas Hellstrom May 23, 2019, 8:52 a.m. UTC | #3
On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> From: Emil Velikov <emil.velikov@collabora.com>
> 
> Currently vmw_execbuf_ioctl() open-codes the permission checking,
> size
> extending and copying that is already done in core drm.
> 
> Kill all the duplication, adding a few comments for clarity.
> 
> Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
> Cc: Thomas Hellstrom <thellstrom@vmware.com>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: Emil Velikov <emil.velikov@collabora.com>

Tested using piglit quick using execbuf versions 1 and 2.

Tested-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>


> ---
> Thomas, VMware team,
> 
> Please give this some testing on your end. I've only tested it
> against
> mesa-master.
> 
> Thanks
> Emil
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++------------
> ----
>  3 files changed, 23 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> index d3f108f7e52d..2cb6ae219e43 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] =
> {
>  		      DRM_RENDER_ALLOW),
>  	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
>  		      DRM_AUTH | DRM_RENDER_ALLOW),
> -	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> +	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
>  		      DRM_RENDER_ALLOW),
>  	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
>  		      DRM_RENDER_ALLOW),
> @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> *filp, unsigned int cmd,
>  			&vmw_ioctls[nr - DRM_COMMAND_BASE];
>  
>  		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> -			ret = (long) drm_ioctl_permit(ioctl->flags,
> file_priv);
> -			if (unlikely(ret != 0))
> -				return ret;
> -
> -			if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> IOC_IN))
> -				goto out_io_encoding;
> -
> -			return (long) vmw_execbuf_ioctl(dev, arg,
> file_priv,
> -							_IOC_SIZE(cmd))
> ;
> +			return ioctl_func(filp, cmd, arg);
>  		} else if (nr == DRM_COMMAND_BASE +
> DRM_VMW_UPDATE_LAYOUT) {
>  			if (!drm_is_current_master(file_priv) &&
>  			    !capable(CAP_SYS_ADMIN))
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 9be2176cc260..f5bfac85f793 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -910,8 +910,8 @@ static inline struct page *vmw_piter_page(struct
> vmw_piter *viter)
>   * Command submission - vmwgfx_execbuf.c
>   */
>  
> -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> data,
> -			     struct drm_file *file_priv, size_t size);
> +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> +			     struct drm_file *file_priv);
>  extern int vmw_execbuf_process(struct drm_file *file_priv,
>  			       struct vmw_private *dev_priv,
>  			       void __user *user_commands,
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 2ff7ba04d8c8..767e2b99618d 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> vmw_private *dev_priv)
>  	mutex_unlock(&dev_priv->cmdbuf_mutex);
>  }
>  
> -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
> -		      struct drm_file *file_priv, size_t size)
> +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> +		      struct drm_file *file_priv)
>  {
>  	struct vmw_private *dev_priv = vmw_priv(dev);
> -	struct drm_vmw_execbuf_arg arg;
> +	struct drm_vmw_execbuf_arg *arg = data;
>  	int ret;
> -	static const size_t copy_offset[] = {
> -		offsetof(struct drm_vmw_execbuf_arg, context_handle),
> -		sizeof(struct drm_vmw_execbuf_arg)};
>  	struct dma_fence *in_fence = NULL;
>  
> -	if (unlikely(size < copy_offset[0])) {
> -		VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> -			       DRM_VMW_EXECBUF);
> -		return -EINVAL;
> -	}
> -
> -	if (copy_from_user(&arg, (void __user *) data, copy_offset[0])
> != 0)
> -		return -EFAULT;
> -
>  	/*
>  	 * Extend the ioctl argument while maintaining backwards
> compatibility:
> -	 * We take different code paths depending on the value of
> arg.version.
> +	 * We take different code paths depending on the value of arg-
> >version.
> +	 *
> +	 * Note: The ioctl argument is extended and zeropadded by core
> DRM.
>  	 */
> -	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> -		     arg.version == 0)) {
> +	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> +		     arg->version == 0)) {
>  		VMW_DEBUG_USER("Incorrect execbuf version.\n");
>  		return -EINVAL;
>  	}
>  
> -	if (arg.version > 1 &&
> -	    copy_from_user(&arg.context_handle,
> -			   (void __user *) (data + copy_offset[0]),
> -			   copy_offset[arg.version - 1] -
> copy_offset[0]) != 0)
> -		return -EFAULT;
> -
> -	switch (arg.version) {
> +	switch (arg->version) {
>  	case 1:
> -		arg.context_handle = (uint32_t) -1;
> +		/* For v1 core DRM have extended + zeropadded the data
> */
> +		arg->context_handle = (uint32_t) -1;
>  		break;
>  	case 2:
>  	default:
> +		/* For v2 and later core DRM would have correctly
> copied it */
>  		break;
>  	}
>  
>  	/* If imported a fence FD from elsewhere, then wait on it */
> -	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> -		in_fence = sync_file_get_fence(arg.imported_fence_fd);
> +	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> +		in_fence = sync_file_get_fence(arg->imported_fence_fd);
>  
>  		if (!in_fence) {
>  			VMW_DEBUG_USER("Cannot get imported fence\n");
> @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev,
> unsigned long data,
>  		return ret;
>  
>  	ret = vmw_execbuf_process(file_priv, dev_priv,
> -				  (void __user *)(unsigned
> long)arg.commands,
> -				  NULL, arg.command_size,
> arg.throttle_us,
> -				  arg.context_handle,
> -				  (void __user *)(unsigned
> long)arg.fence_rep,
> -				  NULL, arg.flags);
> +				  (void __user *)(unsigned long)arg-
> >commands,
> +				  NULL, arg->command_size, arg-
> >throttle_us,
> +				  arg->context_handle,
> +				  (void __user *)(unsigned long)arg-
> >fence_rep,
> +				  NULL, arg->flags);
>  
>  	ttm_read_unlock(&dev_priv->reservation_sem);
>  	if (unlikely(ret != 0))
Thomas Hellstrom May 24, 2019, 6:05 a.m. UTC | #4
On Wed, 2019-05-22 at 21:09 +0200, Daniel Vetter wrote:
> On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <
> thellstrom@vmware.com> wrote:
> > Hi, Emil,
> > 
> > On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> > > From: Emil Velikov <emil.velikov@collabora.com>
> > > 
> > > Currently vmw_execbuf_ioctl() open-codes the permission checking,
> > > size
> > > extending and copying that is already done in core drm.
> > > 
> > > Kill all the duplication, adding a few comments for clarity.
> > 
> > Ah, there is core functionality for this now.
> > 
> > What worries me though with the core approach is that the sizes are
> > not
> > capped by the size of the kernel argument definition, which makes
> > mailicious user-space being able to force kmallocs() the size of
> > the
> > maximum ioctl size. Should probably be fixed before pushing this.
> 
> Hm I always worked under the assumption that kmalloc and friends
> should be userspace hardened. Otherwise stuff like kmalloc_array
> doesn't make any sense, everyone just feeds it unchecked input and
> expects that helper to handle overflows.
> 
> If we assume kmalloc isn't hardened against that, then we have a much
> bigger problem than just vmwgfx ioctls ...

After checking the drm_ioctl code I realize that what I thought was new
behaviour actually has been around for a couple of years, so
fixing isn't really tied to this patch series...

What caused me to react was that previously we used to have this

e4fda9f264e1 ("drm: Perform ioctl command validation on the stored
kernel values")

and we seem to have lost that now, if not for the io flags then at
least for the size part. For the size of the ioctl arguments, I think
in general if the kernel only touches a subset of the user-space
specified size I see no reason why we should malloc / copy more than
that?

Now, given the fact that the maximum ioctl argument size is quite
limited, that might not be a big problem or a problem at all. Otherwise
it would be pretty easy for a malicious process to allocate most or all
of a system's resident memory?

/Thomas







> -Daniel
> 
> > 
> > > Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
> > > Cc: Thomas Hellstrom <thellstrom@vmware.com>
> > > Cc: Daniel Vetter <daniel@ffwll.ch>
> > > Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
> > > ---
> > > Thomas, VMware team,
> > > 
> > > Please give this some testing on your end. I've only tested it
> > > against
> > > mesa-master.
> > 
> > I'll review tomorrow and do some testing. Need to see if I can dig
> > up
> > user-space apps with version 0...
> > 
> > Thanks,
> > 
> > Thomas
> > 
> > > Thanks
> > > Emil
> > > ---
> > >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
> > >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
> > >  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++--------
> > > ----
> > > ----
> > >  3 files changed, 23 insertions(+), 45 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > index d3f108f7e52d..2cb6ae219e43 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc
> > > vmw_ioctls[] =
> > > {
> > >                     DRM_RENDER_ALLOW),
> > >       VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
> > >                     DRM_AUTH | DRM_RENDER_ALLOW),
> > > -     VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> > > +     VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
> > >                     DRM_RENDER_ALLOW),
> > >       VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
> > >                     DRM_RENDER_ALLOW),
> > > @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> > > *filp, unsigned int cmd,
> > >                       &vmw_ioctls[nr - DRM_COMMAND_BASE];
> > > 
> > >               if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> > > -                     ret = (long) drm_ioctl_permit(ioctl->flags,
> > > file_priv);
> > > -                     if (unlikely(ret != 0))
> > > -                             return ret;
> > > -
> > > -                     if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> > > IOC_IN))
> > > -                             goto out_io_encoding;
> > > -
> > > -                     return (long) vmw_execbuf_ioctl(dev, arg,
> > > file_priv,
> > > -                                                     _IOC_SIZE(c
> > > md))
> > > ;
> > > +                     return ioctl_func(filp, cmd, arg);
> > >               } else if (nr == DRM_COMMAND_BASE +
> > > DRM_VMW_UPDATE_LAYOUT) {
> > >                       if (!drm_is_current_master(file_priv) &&
> > >                           !capable(CAP_SYS_ADMIN))
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > index 9be2176cc260..f5bfac85f793 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > @@ -910,8 +910,8 @@ static inline struct page
> > > *vmw_piter_page(struct
> > > vmw_piter *viter)
> > >   * Command submission - vmwgfx_execbuf.c
> > >   */
> > > 
> > > -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned
> > > long
> > > data,
> > > -                          struct drm_file *file_priv, size_t
> > > size);
> > > +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > +                          struct drm_file *file_priv);
> > >  extern int vmw_execbuf_process(struct drm_file *file_priv,
> > >                              struct vmw_private *dev_priv,
> > >                              void __user *user_commands,
> > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > index 2ff7ba04d8c8..767e2b99618d 100644
> > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> > > vmw_private *dev_priv)
> > >       mutex_unlock(&dev_priv->cmdbuf_mutex);
> > >  }
> > > 
> > > -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> > > data,
> > > -                   struct drm_file *file_priv, size_t size)
> > > +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > +                   struct drm_file *file_priv)
> > >  {
> > >       struct vmw_private *dev_priv = vmw_priv(dev);
> > > -     struct drm_vmw_execbuf_arg arg;
> > > +     struct drm_vmw_execbuf_arg *arg = data;
> > >       int ret;
> > > -     static const size_t copy_offset[] = {
> > > -             offsetof(struct drm_vmw_execbuf_arg,
> > > context_handle),
> > > -             sizeof(struct drm_vmw_execbuf_arg)};
> > >       struct dma_fence *in_fence = NULL;
> > > 
> > > -     if (unlikely(size < copy_offset[0])) {
> > > -             VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> > > -                            DRM_VMW_EXECBUF);
> > > -             return -EINVAL;
> > > -     }
> > > -
> > > -     if (copy_from_user(&arg, (void __user *) data,
> > > copy_offset[0])
> > > != 0)
> > > -             return -EFAULT;
> > > -
> > >       /*
> > >        * Extend the ioctl argument while maintaining backwards
> > > compatibility:
> > > -      * We take different code paths depending on the value of
> > > arg.version.
> > > +      * We take different code paths depending on the value of
> > > arg-
> > > > version.
> > > +      *
> > > +      * Note: The ioctl argument is extended and zeropadded by
> > > core
> > > DRM.
> > >        */
> > > -     if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> > > -                  arg.version == 0)) {
> > > +     if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> > > +                  arg->version == 0)) {
> > >               VMW_DEBUG_USER("Incorrect execbuf version.\n");
> > >               return -EINVAL;
> > >       }
> > > 
> > > -     if (arg.version > 1 &&
> > > -         copy_from_user(&arg.context_handle,
> > > -                        (void __user *) (data + copy_offset[0]),
> > > -                        copy_offset[arg.version - 1] -
> > > copy_offset[0]) != 0)
> > > -             return -EFAULT;
> > > -
> > > -     switch (arg.version) {
> > > +     switch (arg->version) {
> > >       case 1:
> > > -             arg.context_handle = (uint32_t) -1;
> > > +             /* For v1 core DRM have extended + zeropadded the
> > > data
> > > */
> > > +             arg->context_handle = (uint32_t) -1;
> > >               break;
> > >       case 2:
> > >       default:
> > > +             /* For v2 and later core DRM would have correctly
> > > copied it */
> > >               break;
> > >       }
> > > 
> > >       /* If imported a fence FD from elsewhere, then wait on it
> > > */
> > > -     if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > -             in_fence =
> > > sync_file_get_fence(arg.imported_fence_fd);
> > > +     if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > +             in_fence = sync_file_get_fence(arg-
> > > >imported_fence_fd);
> > > 
> > >               if (!in_fence) {
> > >                       VMW_DEBUG_USER("Cannot get imported
> > > fence\n");
> > > @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device
> > > *dev,
> > > unsigned long data,
> > >               return ret;
> > > 
> > >       ret = vmw_execbuf_process(file_priv, dev_priv,
> > > -                               (void __user *)(unsigned
> > > long)arg.commands,
> > > -                               NULL, arg.command_size,
> > > arg.throttle_us,
> > > -                               arg.context_handle,
> > > -                               (void __user *)(unsigned
> > > long)arg.fence_rep,
> > > -                               NULL, arg.flags);
> > > +                               (void __user *)(unsigned
> > > long)arg-
> > > > commands,
> > > +                               NULL, arg->command_size, arg-
> > > > throttle_us,
> > > +                               arg->context_handle,
> > > +                               (void __user *)(unsigned
> > > long)arg-
> > > > fence_rep,
> > > +                               NULL, arg->flags);
> > > 
> > >       ttm_read_unlock(&dev_priv->reservation_sem);
> > >       if (unlikely(ret != 0))
> 
>
Daniel Vetter May 24, 2019, 7:46 a.m. UTC | #5
On Fri, May 24, 2019 at 8:05 AM Thomas Hellstrom <thellstrom@vmware.com> wrote:
>
> On Wed, 2019-05-22 at 21:09 +0200, Daniel Vetter wrote:
> > On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <
> > thellstrom@vmware.com> wrote:
> > > Hi, Emil,
> > >
> > > On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> > > > From: Emil Velikov <emil.velikov@collabora.com>
> > > >
> > > > Currently vmw_execbuf_ioctl() open-codes the permission checking,
> > > > size
> > > > extending and copying that is already done in core drm.
> > > >
> > > > Kill all the duplication, adding a few comments for clarity.
> > >
> > > Ah, there is core functionality for this now.
> > >
> > > What worries me though with the core approach is that the sizes are
> > > not
> > > capped by the size of the kernel argument definition, which makes
> > > mailicious user-space being able to force kmallocs() the size of
> > > the
> > > maximum ioctl size. Should probably be fixed before pushing this.
> >
> > Hm I always worked under the assumption that kmalloc and friends
> > should be userspace hardened. Otherwise stuff like kmalloc_array
> > doesn't make any sense, everyone just feeds it unchecked input and
> > expects that helper to handle overflows.
> >
> > If we assume kmalloc isn't hardened against that, then we have a much
> > bigger problem than just vmwgfx ioctls ...
>
> After checking the drm_ioctl code I realize that what I thought was new
> behaviour actually has been around for a couple of years, so
> fixing isn't really tied to this patch series...
>
> What caused me to react was that previously we used to have this
>
> e4fda9f264e1 ("drm: Perform ioctl command validation on the stored
> kernel values")
>
> and we seem to have lost that now, if not for the io flags then at
> least for the size part. For the size of the ioctl arguments, I think
> in general if the kernel only touches a subset of the user-space
> specified size I see no reason why we should malloc / copy more than
> that?

I guess we could optimize that, but we'd probably still need to zero
clear the added size for forward compat with newer userspace. Iirc
we've had some issues in this area.

> Now, given the fact that the maximum ioctl argument size is quite
> limited, that might not be a big problem or a problem at all. Otherwise
> it would be pretty easy for a malicious process to allocate most or all
> of a system's resident memory?

The biggest you can allocate from kmalloc is limited by the largest
contiguous chunk alloc_pages gives you, which is limited by MAX_ORDER
from the page buddy allocator. You need lots of process to be able to
exhaust memory like that (and like I said, the entire kernel would be
broken if we'd consider this a security issue). If you want to make
sure that a process group can't exhaust memory this way then you need
to set appropriate cgroups limits.
-Daniel

>
> /Thomas
>
>
>
>
>
>
>
> > -Daniel
> >
> > >
> > > > Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
> > > > Cc: Thomas Hellstrom <thellstrom@vmware.com>
> > > > Cc: Daniel Vetter <daniel@ffwll.ch>
> > > > Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
> > > > ---
> > > > Thomas, VMware team,
> > > >
> > > > Please give this some testing on your end. I've only tested it
> > > > against
> > > > mesa-master.
> > >
> > > I'll review tomorrow and do some testing. Need to see if I can dig
> > > up
> > > user-space apps with version 0...
> > >
> > > Thanks,
> > >
> > > Thomas
> > >
> > > > Thanks
> > > > Emil
> > > > ---
> > > >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c     | 12 +-----
> > > >  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |  4 +-
> > > >  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 52 +++++++++--------
> > > > ----
> > > > ----
> > > >  3 files changed, 23 insertions(+), 45 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > > index d3f108f7e52d..2cb6ae219e43 100644
> > > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > > > @@ -186,7 +186,7 @@ static const struct drm_ioctl_desc
> > > > vmw_ioctls[] =
> > > > {
> > > >                     DRM_RENDER_ALLOW),
> > > >       VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
> > > >                     DRM_AUTH | DRM_RENDER_ALLOW),
> > > > -     VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
> > > > +     VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
> > > >                     DRM_RENDER_ALLOW),
> > > >       VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
> > > >                     DRM_RENDER_ALLOW),
> > > > @@ -1140,15 +1140,7 @@ static long vmw_generic_ioctl(struct file
> > > > *filp, unsigned int cmd,
> > > >                       &vmw_ioctls[nr - DRM_COMMAND_BASE];
> > > >
> > > >               if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
> > > > -                     ret = (long) drm_ioctl_permit(ioctl->flags,
> > > > file_priv);
> > > > -                     if (unlikely(ret != 0))
> > > > -                             return ret;
> > > > -
> > > > -                     if (unlikely((cmd & (IOC_IN | IOC_OUT)) !=
> > > > IOC_IN))
> > > > -                             goto out_io_encoding;
> > > > -
> > > > -                     return (long) vmw_execbuf_ioctl(dev, arg,
> > > > file_priv,
> > > > -                                                     _IOC_SIZE(c
> > > > md))
> > > > ;
> > > > +                     return ioctl_func(filp, cmd, arg);
> > > >               } else if (nr == DRM_COMMAND_BASE +
> > > > DRM_VMW_UPDATE_LAYOUT) {
> > > >                       if (!drm_is_current_master(file_priv) &&
> > > >                           !capable(CAP_SYS_ADMIN))
> > > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > > index 9be2176cc260..f5bfac85f793 100644
> > > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > > > @@ -910,8 +910,8 @@ static inline struct page
> > > > *vmw_piter_page(struct
> > > > vmw_piter *viter)
> > > >   * Command submission - vmwgfx_execbuf.c
> > > >   */
> > > >
> > > > -extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned
> > > > long
> > > > data,
> > > > -                          struct drm_file *file_priv, size_t
> > > > size);
> > > > +extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > > +                          struct drm_file *file_priv);
> > > >  extern int vmw_execbuf_process(struct drm_file *file_priv,
> > > >                              struct vmw_private *dev_priv,
> > > >                              void __user *user_commands,
> > > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > > b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > > index 2ff7ba04d8c8..767e2b99618d 100644
> > > > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > > > @@ -3977,54 +3977,40 @@ void vmw_execbuf_release_pinned_bo(struct
> > > > vmw_private *dev_priv)
> > > >       mutex_unlock(&dev_priv->cmdbuf_mutex);
> > > >  }
> > > >
> > > > -int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long
> > > > data,
> > > > -                   struct drm_file *file_priv, size_t size)
> > > > +int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
> > > > +                   struct drm_file *file_priv)
> > > >  {
> > > >       struct vmw_private *dev_priv = vmw_priv(dev);
> > > > -     struct drm_vmw_execbuf_arg arg;
> > > > +     struct drm_vmw_execbuf_arg *arg = data;
> > > >       int ret;
> > > > -     static const size_t copy_offset[] = {
> > > > -             offsetof(struct drm_vmw_execbuf_arg,
> > > > context_handle),
> > > > -             sizeof(struct drm_vmw_execbuf_arg)};
> > > >       struct dma_fence *in_fence = NULL;
> > > >
> > > > -     if (unlikely(size < copy_offset[0])) {
> > > > -             VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
> > > > -                            DRM_VMW_EXECBUF);
> > > > -             return -EINVAL;
> > > > -     }
> > > > -
> > > > -     if (copy_from_user(&arg, (void __user *) data,
> > > > copy_offset[0])
> > > > != 0)
> > > > -             return -EFAULT;
> > > > -
> > > >       /*
> > > >        * Extend the ioctl argument while maintaining backwards
> > > > compatibility:
> > > > -      * We take different code paths depending on the value of
> > > > arg.version.
> > > > +      * We take different code paths depending on the value of
> > > > arg-
> > > > > version.
> > > > +      *
> > > > +      * Note: The ioctl argument is extended and zeropadded by
> > > > core
> > > > DRM.
> > > >        */
> > > > -     if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
> > > > -                  arg.version == 0)) {
> > > > +     if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
> > > > +                  arg->version == 0)) {
> > > >               VMW_DEBUG_USER("Incorrect execbuf version.\n");
> > > >               return -EINVAL;
> > > >       }
> > > >
> > > > -     if (arg.version > 1 &&
> > > > -         copy_from_user(&arg.context_handle,
> > > > -                        (void __user *) (data + copy_offset[0]),
> > > > -                        copy_offset[arg.version - 1] -
> > > > copy_offset[0]) != 0)
> > > > -             return -EFAULT;
> > > > -
> > > > -     switch (arg.version) {
> > > > +     switch (arg->version) {
> > > >       case 1:
> > > > -             arg.context_handle = (uint32_t) -1;
> > > > +             /* For v1 core DRM have extended + zeropadded the
> > > > data
> > > > */
> > > > +             arg->context_handle = (uint32_t) -1;
> > > >               break;
> > > >       case 2:
> > > >       default:
> > > > +             /* For v2 and later core DRM would have correctly
> > > > copied it */
> > > >               break;
> > > >       }
> > > >
> > > >       /* If imported a fence FD from elsewhere, then wait on it
> > > > */
> > > > -     if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > > -             in_fence =
> > > > sync_file_get_fence(arg.imported_fence_fd);
> > > > +     if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
> > > > +             in_fence = sync_file_get_fence(arg-
> > > > >imported_fence_fd);
> > > >
> > > >               if (!in_fence) {
> > > >                       VMW_DEBUG_USER("Cannot get imported
> > > > fence\n");
> > > > @@ -4041,11 +4027,11 @@ int vmw_execbuf_ioctl(struct drm_device
> > > > *dev,
> > > > unsigned long data,
> > > >               return ret;
> > > >
> > > >       ret = vmw_execbuf_process(file_priv, dev_priv,
> > > > -                               (void __user *)(unsigned
> > > > long)arg.commands,
> > > > -                               NULL, arg.command_size,
> > > > arg.throttle_us,
> > > > -                               arg.context_handle,
> > > > -                               (void __user *)(unsigned
> > > > long)arg.fence_rep,
> > > > -                               NULL, arg.flags);
> > > > +                               (void __user *)(unsigned
> > > > long)arg-
> > > > > commands,
> > > > +                               NULL, arg->command_size, arg-
> > > > > throttle_us,
> > > > +                               arg->context_handle,
> > > > +                               (void __user *)(unsigned
> > > > long)arg-
> > > > > fence_rep,
> > > > +                               NULL, arg->flags);
> > > >
> > > >       ttm_read_unlock(&dev_priv->reservation_sem);
> > > >       if (unlikely(ret != 0))
> >
> >
Emil Velikov May 24, 2019, 10:53 a.m. UTC | #6
On 2019/05/24, Daniel Vetter wrote:
> On Fri, May 24, 2019 at 8:05 AM Thomas Hellstrom <thellstrom@vmware.com> wrote:
> >
> > On Wed, 2019-05-22 at 21:09 +0200, Daniel Vetter wrote:
> > > On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <
> > > thellstrom@vmware.com> wrote:
> > > > Hi, Emil,
> > > >
> > > > On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
> > > > > From: Emil Velikov <emil.velikov@collabora.com>
> > > > >
> > > > > Currently vmw_execbuf_ioctl() open-codes the permission checking,
> > > > > size
> > > > > extending and copying that is already done in core drm.
> > > > >
> > > > > Kill all the duplication, adding a few comments for clarity.
> > > >
> > > > Ah, there is core functionality for this now.
> > > >
> > > > What worries me though with the core approach is that the sizes are
> > > > not
> > > > capped by the size of the kernel argument definition, which makes
> > > > mailicious user-space being able to force kmallocs() the size of
> > > > the
> > > > maximum ioctl size. Should probably be fixed before pushing this.
> > >
> > > Hm I always worked under the assumption that kmalloc and friends
> > > should be userspace hardened. Otherwise stuff like kmalloc_array
> > > doesn't make any sense, everyone just feeds it unchecked input and
> > > expects that helper to handle overflows.
> > >
> > > If we assume kmalloc isn't hardened against that, then we have a much
> > > bigger problem than just vmwgfx ioctls ...
> >
> > After checking the drm_ioctl code I realize that what I thought was new
> > behaviour actually has been around for a couple of years, so
> > fixing isn't really tied to this patch series...
> >
> > What caused me to react was that previously we used to have this
> >
> > e4fda9f264e1 ("drm: Perform ioctl command validation on the stored
> > kernel values")
> >
> > and we seem to have lost that now, if not for the io flags then at
> > least for the size part. For the size of the ioctl arguments, I think
> > in general if the kernel only touches a subset of the user-space
> > specified size I see no reason why we should malloc / copy more than
> > that?
> 
> I guess we could optimize that, but we'd probably still need to zero
> clear the added size for forward compat with newer userspace. Iirc
> we've had some issues in this area.
> 
> > Now, given the fact that the maximum ioctl argument size is quite
> > limited, that might not be a big problem or a problem at all. Otherwise
> > it would be pretty easy for a malicious process to allocate most or all
> > of a system's resident memory?
> 
> The biggest you can allocate from kmalloc is limited by the largest
> contiguous chunk alloc_pages gives you, which is limited by MAX_ORDER
> from the page buddy allocator. You need lots of process to be able to
> exhaust memory like that (and like I said, the entire kernel would be
> broken if we'd consider this a security issue). If you want to make
> sure that a process group can't exhaust memory this way then you need
> to set appropriate cgroups limits.

I do agree with all the sentiments that drm_ioctl() could use some extra
optimisation and hardening. At the same time I would remind that the
code has been used as-is by vmwgfx and other drivers for years.

In other words: let's keep that work as orthogonal series.

What do you guys think?
Emil
Thomas Hellström (VMware) May 24, 2019, 10:56 a.m. UTC | #7
On 5/24/19 12:53 PM, Emil Velikov wrote:
> On 2019/05/24, Daniel Vetter wrote:
>> On Fri, May 24, 2019 at 8:05 AM Thomas Hellstrom <thellstrom@vmware.com> wrote:
>>> On Wed, 2019-05-22 at 21:09 +0200, Daniel Vetter wrote:
>>>> On Wed, May 22, 2019 at 9:01 PM Thomas Hellstrom <
>>>> thellstrom@vmware.com> wrote:
>>>>> Hi, Emil,
>>>>>
>>>>> On Wed, 2019-05-22 at 17:41 +0100, Emil Velikov wrote:
>>>>>> From: Emil Velikov <emil.velikov@collabora.com>
>>>>>>
>>>>>> Currently vmw_execbuf_ioctl() open-codes the permission checking,
>>>>>> size
>>>>>> extending and copying that is already done in core drm.
>>>>>>
>>>>>> Kill all the duplication, adding a few comments for clarity.
>>>>> Ah, there is core functionality for this now.
>>>>>
>>>>> What worries me though with the core approach is that the sizes are
>>>>> not
>>>>> capped by the size of the kernel argument definition, which makes
>>>>> mailicious user-space being able to force kmallocs() the size of
>>>>> the
>>>>> maximum ioctl size. Should probably be fixed before pushing this.
>>>> Hm I always worked under the assumption that kmalloc and friends
>>>> should be userspace hardened. Otherwise stuff like kmalloc_array
>>>> doesn't make any sense, everyone just feeds it unchecked input and
>>>> expects that helper to handle overflows.
>>>>
>>>> If we assume kmalloc isn't hardened against that, then we have a much
>>>> bigger problem than just vmwgfx ioctls ...
>>> After checking the drm_ioctl code I realize that what I thought was new
>>> behaviour actually has been around for a couple of years, so
>>> fixing isn't really tied to this patch series...
>>>
>>> What caused me to react was that previously we used to have this
>>>
>>> e4fda9f264e1 ("drm: Perform ioctl command validation on the stored
>>> kernel values")
>>>
>>> and we seem to have lost that now, if not for the io flags then at
>>> least for the size part. For the size of the ioctl arguments, I think
>>> in general if the kernel only touches a subset of the user-space
>>> specified size I see no reason why we should malloc / copy more than
>>> that?
>> I guess we could optimize that, but we'd probably still need to zero
>> clear the added size for forward compat with newer userspace. Iirc
>> we've had some issues in this area.
>>
>>> Now, given the fact that the maximum ioctl argument size is quite
>>> limited, that might not be a big problem or a problem at all. Otherwise
>>> it would be pretty easy for a malicious process to allocate most or all
>>> of a system's resident memory?
>> The biggest you can allocate from kmalloc is limited by the largest
>> contiguous chunk alloc_pages gives you, which is limited by MAX_ORDER
>> from the page buddy allocator. You need lots of process to be able to
>> exhaust memory like that (and like I said, the entire kernel would be
>> broken if we'd consider this a security issue). If you want to make
>> sure that a process group can't exhaust memory this way then you need
>> to set appropriate cgroups limits.
> I do agree with all the sentiments that drm_ioctl() could use some extra
> optimisation and hardening. At the same time I would remind that the
> code has been used as-is by vmwgfx and other drivers for years.
>
> In other words: let's keep that work as orthogonal series.
>
> What do you guys think?

I agree. Then I only had a concern with one of the patches.

/Thomas


> Emil
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d3f108f7e52d..2cb6ae219e43 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -186,7 +186,7 @@  static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 		      DRM_AUTH | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
 		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_RENDER_ALLOW),
@@ -1140,15 +1140,7 @@  static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
-			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
-			if (unlikely(ret != 0))
-				return ret;
-
-			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
-				goto out_io_encoding;
-
-			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
-							_IOC_SIZE(cmd));
+			return ioctl_func(filp, cmd, arg);
 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
 			if (!drm_is_current_master(file_priv) &&
 			    !capable(CAP_SYS_ADMIN))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 9be2176cc260..f5bfac85f793 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,8 +910,8 @@  static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  * Command submission - vmwgfx_execbuf.c
  */
 
-extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
-			     struct drm_file *file_priv, size_t size);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
 extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct vmw_private *dev_priv,
 			       void __user *user_commands,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2ff7ba04d8c8..767e2b99618d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3977,54 +3977,40 @@  void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
-int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
-		      struct drm_file *file_priv, size_t size)
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_execbuf_arg arg;
+	struct drm_vmw_execbuf_arg *arg = data;
 	int ret;
-	static const size_t copy_offset[] = {
-		offsetof(struct drm_vmw_execbuf_arg, context_handle),
-		sizeof(struct drm_vmw_execbuf_arg)};
 	struct dma_fence *in_fence = NULL;
 
-	if (unlikely(size < copy_offset[0])) {
-		VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
-			       DRM_VMW_EXECBUF);
-		return -EINVAL;
-	}
-
-	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
-		return -EFAULT;
-
 	/*
 	 * Extend the ioctl argument while maintaining backwards compatibility:
-	 * We take different code paths depending on the value of arg.version.
+	 * We take different code paths depending on the value of arg->version.
+	 *
+	 * Note: The ioctl argument is extended and zeropadded by core DRM.
 	 */
-	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
-		     arg.version == 0)) {
+	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
+		     arg->version == 0)) {
 		VMW_DEBUG_USER("Incorrect execbuf version.\n");
 		return -EINVAL;
 	}
 
-	if (arg.version > 1 &&
-	    copy_from_user(&arg.context_handle,
-			   (void __user *) (data + copy_offset[0]),
-			   copy_offset[arg.version - 1] - copy_offset[0]) != 0)
-		return -EFAULT;
-
-	switch (arg.version) {
+	switch (arg->version) {
 	case 1:
-		arg.context_handle = (uint32_t) -1;
+		/* For v1 core DRM have extended + zeropadded the data */
+		arg->context_handle = (uint32_t) -1;
 		break;
 	case 2:
 	default:
+		/* For v2 and later core DRM would have correctly copied it */
 		break;
 	}
 
 	/* If imported a fence FD from elsewhere, then wait on it */
-	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
-		in_fence = sync_file_get_fence(arg.imported_fence_fd);
+	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+		in_fence = sync_file_get_fence(arg->imported_fence_fd);
 
 		if (!in_fence) {
 			VMW_DEBUG_USER("Cannot get imported fence\n");
@@ -4041,11 +4027,11 @@  int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
 		return ret;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv,
-				  (void __user *)(unsigned long)arg.commands,
-				  NULL, arg.command_size, arg.throttle_us,
-				  arg.context_handle,
-				  (void __user *)(unsigned long)arg.fence_rep,
-				  NULL, arg.flags);
+				  (void __user *)(unsigned long)arg->commands,
+				  NULL, arg->command_size, arg->throttle_us,
+				  arg->context_handle,
+				  (void __user *)(unsigned long)arg->fence_rep,
+				  NULL, arg->flags);
 
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))