diff mbox series

[RFC,1/1] uio: Add dma-buf import ioctls

Message ID 1550953697-7288-2-git-send-email-hyun.kwon@xilinx.com (mailing list archive)
State New, archived
Headers show
Series uio: Add dmabuf import ioctl | expand

Commit Message

Hyun Kwon Feb. 23, 2019, 8:28 p.m. UTC
Add the dmabuf map / unmap interfaces. This allows the user driver
to be able to import the external dmabuf and use it from user space.

Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
---
 drivers/uio/Makefile         |   2 +-
 drivers/uio/uio.c            |  43 +++++++++
 drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
 drivers/uio/uio_dmabuf.h     |  26 ++++++
 include/uapi/linux/uio/uio.h |  33 +++++++
 5 files changed, 313 insertions(+), 1 deletion(-)
 create mode 100644 drivers/uio/uio_dmabuf.c
 create mode 100644 drivers/uio/uio_dmabuf.h
 create mode 100644 include/uapi/linux/uio/uio.h

Comments

Greg Kroah-Hartman Feb. 26, 2019, 11:53 a.m. UTC | #1
On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> Add the dmabuf map / unmap interfaces. This allows the user driver
> to be able to import the external dmabuf and use it from user space.
> 
> Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> ---
>  drivers/uio/Makefile         |   2 +-
>  drivers/uio/uio.c            |  43 +++++++++
>  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
>  drivers/uio/uio_dmabuf.h     |  26 ++++++
>  include/uapi/linux/uio/uio.h |  33 +++++++
>  5 files changed, 313 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/uio/uio_dmabuf.c
>  create mode 100644 drivers/uio/uio_dmabuf.h
>  create mode 100644 include/uapi/linux/uio/uio.h
> 
> diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> index c285dd2..5da16c7 100644
> --- a/drivers/uio/Makefile
> +++ b/drivers/uio/Makefile
> @@ -1,5 +1,5 @@
>  # SPDX-License-Identifier: GPL-2.0
> -obj-$(CONFIG_UIO)	+= uio.o
> +obj-$(CONFIG_UIO)	+= uio.o uio_dmabuf.o
>  obj-$(CONFIG_UIO_CIF)	+= uio_cif.o
>  obj-$(CONFIG_UIO_PDRV_GENIRQ)	+= uio_pdrv_genirq.o
>  obj-$(CONFIG_UIO_DMEM_GENIRQ)	+= uio_dmem_genirq.o
> diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> index 1313422..6841f98 100644
> --- a/drivers/uio/uio.c
> +++ b/drivers/uio/uio.c
> @@ -24,6 +24,12 @@
>  #include <linux/kobject.h>
>  #include <linux/cdev.h>
>  #include <linux/uio_driver.h>
> +#include <linux/list.h>
> +#include <linux/mutex.h>
> +
> +#include <uapi/linux/uio/uio.h>
> +
> +#include "uio_dmabuf.h"
>  
>  #define UIO_MAX_DEVICES		(1U << MINORBITS)
>  
> @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
>  struct uio_listener {
>  	struct uio_device *dev;
>  	s32 event_count;
> +	struct list_head dbufs;
> +	struct mutex dbufs_lock; /* protect @dbufs */
>  };
>  
>  static int uio_open(struct inode *inode, struct file *filep)
> @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
>  	if (ret)
>  		goto err_infoopen;
>  
> +	INIT_LIST_HEAD(&listener->dbufs);
> +	mutex_init(&listener->dbufs_lock);
> +
>  	return 0;
>  
>  err_infoopen:
> @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
>  	struct uio_listener *listener = filep->private_data;
>  	struct uio_device *idev = listener->dev;
>  
> +	ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> +	if (ret)
> +		dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> +
>  	mutex_lock(&idev->info_lock);
>  	if (idev->info && idev->info->release)
>  		ret = idev->info->release(idev->info, inode);
> @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
>  	return retval ? retval : sizeof(s32);
>  }
>  
> +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)

We have resisted adding a uio ioctl for a long time, can't you do this
through sysfs somehow?

A meta-comment about your ioctl structure:

> +#define UIO_DMABUF_DIR_BIDIR	1
> +#define UIO_DMABUF_DIR_TO_DEV	2
> +#define UIO_DMABUF_DIR_FROM_DEV	3
> +#define UIO_DMABUF_DIR_NONE	4

enumerated type?

> +
> +struct uio_dmabuf_args {
> +	__s32	dbuf_fd;
> +	__u64	dma_addr;
> +	__u64	size;
> +	__u32	dir;

Why the odd alignment?  Are you sure this is the best packing for such a
structure?

Why is dbuf_fd __s32?  dir can be __u8, right?

I don't know that dma layer very well, it would be good to get some
review from others to see if this really is even a viable thing to do.
The fd handling seems a bit "odd" here, but maybe I just do not
understand it.

thanks,

greg k-h
Daniel Vetter Feb. 26, 2019, 12:06 p.m. UTC | #2
On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
<gregkh@linuxfoundation.org> wrote:
>
> On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > Add the dmabuf map / unmap interfaces. This allows the user driver
> > to be able to import the external dmabuf and use it from user space.
> >
> > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > ---
> >  drivers/uio/Makefile         |   2 +-
> >  drivers/uio/uio.c            |  43 +++++++++
> >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> >  include/uapi/linux/uio/uio.h |  33 +++++++
> >  5 files changed, 313 insertions(+), 1 deletion(-)
> >  create mode 100644 drivers/uio/uio_dmabuf.c
> >  create mode 100644 drivers/uio/uio_dmabuf.h
> >  create mode 100644 include/uapi/linux/uio/uio.h
> >
> > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > index c285dd2..5da16c7 100644
> > --- a/drivers/uio/Makefile
> > +++ b/drivers/uio/Makefile
> > @@ -1,5 +1,5 @@
> >  # SPDX-License-Identifier: GPL-2.0
> > -obj-$(CONFIG_UIO)    += uio.o
> > +obj-$(CONFIG_UIO)    += uio.o uio_dmabuf.o
> >  obj-$(CONFIG_UIO_CIF)        += uio_cif.o
> >  obj-$(CONFIG_UIO_PDRV_GENIRQ)        += uio_pdrv_genirq.o
> >  obj-$(CONFIG_UIO_DMEM_GENIRQ)        += uio_dmem_genirq.o
> > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > index 1313422..6841f98 100644
> > --- a/drivers/uio/uio.c
> > +++ b/drivers/uio/uio.c
> > @@ -24,6 +24,12 @@
> >  #include <linux/kobject.h>
> >  #include <linux/cdev.h>
> >  #include <linux/uio_driver.h>
> > +#include <linux/list.h>
> > +#include <linux/mutex.h>
> > +
> > +#include <uapi/linux/uio/uio.h>
> > +
> > +#include "uio_dmabuf.h"
> >
> >  #define UIO_MAX_DEVICES              (1U << MINORBITS)
> >
> > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> >  struct uio_listener {
> >       struct uio_device *dev;
> >       s32 event_count;
> > +     struct list_head dbufs;
> > +     struct mutex dbufs_lock; /* protect @dbufs */
> >  };
> >
> >  static int uio_open(struct inode *inode, struct file *filep)
> > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> >       if (ret)
> >               goto err_infoopen;
> >
> > +     INIT_LIST_HEAD(&listener->dbufs);
> > +     mutex_init(&listener->dbufs_lock);
> > +
> >       return 0;
> >
> >  err_infoopen:
> > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> >       struct uio_listener *listener = filep->private_data;
> >       struct uio_device *idev = listener->dev;
> >
> > +     ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > +     if (ret)
> > +             dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > +
> >       mutex_lock(&idev->info_lock);
> >       if (idev->info && idev->info->release)
> >               ret = idev->info->release(idev->info, inode);
> > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> >       return retval ? retval : sizeof(s32);
> >  }
> >
> > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
>
> We have resisted adding a uio ioctl for a long time, can't you do this
> through sysfs somehow?
>
> A meta-comment about your ioctl structure:
>
> > +#define UIO_DMABUF_DIR_BIDIR 1
> > +#define UIO_DMABUF_DIR_TO_DEV        2
> > +#define UIO_DMABUF_DIR_FROM_DEV      3
> > +#define UIO_DMABUF_DIR_NONE  4
>
> enumerated type?
>
> > +
> > +struct uio_dmabuf_args {
> > +     __s32   dbuf_fd;
> > +     __u64   dma_addr;
> > +     __u64   size;
> > +     __u32   dir;
>
> Why the odd alignment?  Are you sure this is the best packing for such a
> structure?
>
> Why is dbuf_fd __s32?  dir can be __u8, right?
>
> I don't know that dma layer very well, it would be good to get some
> review from others to see if this really is even a viable thing to do.
> The fd handling seems a bit "odd" here, but maybe I just do not
> understand it.

Frankly looks like a ploy to sidestep review by graphics folks. We'd
ask for the userspace first :-)

Also, exporting dma_addr to userspace is considered a very bad idea.
If you want to do this properly, you need a minimal in-kernel memory
manager, and those tend to be based on top of drm_gem.c and merged
through the gpu tree. The last place where we accidentally leaked a
dma addr for gpu buffers was in the fbdev code, and we plugged that
one with

commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
drm-misc-next-fixes-2018-10-03)
Author: Neil Armstrong <narmstrong@baylibre.com>
Date:   Fri Sep 28 14:05:55 2018 +0200

    drm/fb_helper: Allow leaking fbdev smem_start

Together with cuse the above patch should be enough to implement a drm
driver entirely in userspace at least.

Cheers, Daniel
Hyun Kwon Feb. 26, 2019, 10:16 p.m. UTC | #3
Hi Greg,

Thanks for the comments.

On Tue, 2019-02-26 at 03:53:11 -0800, Greg Kroah-Hartman wrote:
> On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > Add the dmabuf map / unmap interfaces. This allows the user driver
> > to be able to import the external dmabuf and use it from user space.
> > 
> > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > ---
> >  drivers/uio/Makefile         |   2 +-
> >  drivers/uio/uio.c            |  43 +++++++++
> >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> >  include/uapi/linux/uio/uio.h |  33 +++++++
> >  5 files changed, 313 insertions(+), 1 deletion(-)
> >  create mode 100644 drivers/uio/uio_dmabuf.c
> >  create mode 100644 drivers/uio/uio_dmabuf.h
> >  create mode 100644 include/uapi/linux/uio/uio.h
> > 
> > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > index c285dd2..5da16c7 100644
> > --- a/drivers/uio/Makefile
> > +++ b/drivers/uio/Makefile
> > @@ -1,5 +1,5 @@
> >  # SPDX-License-Identifier: GPL-2.0
> > -obj-$(CONFIG_UIO)	+= uio.o
> > +obj-$(CONFIG_UIO)	+= uio.o uio_dmabuf.o
> >  obj-$(CONFIG_UIO_CIF)	+= uio_cif.o
> >  obj-$(CONFIG_UIO_PDRV_GENIRQ)	+= uio_pdrv_genirq.o
> >  obj-$(CONFIG_UIO_DMEM_GENIRQ)	+= uio_dmem_genirq.o
> > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > index 1313422..6841f98 100644
> > --- a/drivers/uio/uio.c
> > +++ b/drivers/uio/uio.c
> > @@ -24,6 +24,12 @@
> >  #include <linux/kobject.h>
> >  #include <linux/cdev.h>
> >  #include <linux/uio_driver.h>
> > +#include <linux/list.h>
> > +#include <linux/mutex.h>
> > +
> > +#include <uapi/linux/uio/uio.h>
> > +
> > +#include "uio_dmabuf.h"
> >  
> >  #define UIO_MAX_DEVICES		(1U << MINORBITS)
> >  
> > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> >  struct uio_listener {
> >  	struct uio_device *dev;
> >  	s32 event_count;
> > +	struct list_head dbufs;
> > +	struct mutex dbufs_lock; /* protect @dbufs */
> >  };
> >  
> >  static int uio_open(struct inode *inode, struct file *filep)
> > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> >  	if (ret)
> >  		goto err_infoopen;
> >  
> > +	INIT_LIST_HEAD(&listener->dbufs);
> > +	mutex_init(&listener->dbufs_lock);
> > +
> >  	return 0;
> >  
> >  err_infoopen:
> > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> >  	struct uio_listener *listener = filep->private_data;
> >  	struct uio_device *idev = listener->dev;
> >  
> > +	ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > +	if (ret)
> > +		dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > +
> >  	mutex_lock(&idev->info_lock);
> >  	if (idev->info && idev->info->release)
> >  		ret = idev->info->release(idev->info, inode);
> > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> >  	return retval ? retval : sizeof(s32);
> >  }
> >  
> > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> 
> We have resisted adding a uio ioctl for a long time, can't you do this
> through sysfs somehow?
> 

The dmabuf is managed as per process resource, so it's hard to do it through
sysfs.

> A meta-comment about your ioctl structure:
> 
> > +#define UIO_DMABUF_DIR_BIDIR	1
> > +#define UIO_DMABUF_DIR_TO_DEV	2
> > +#define UIO_DMABUF_DIR_FROM_DEV	3
> > +#define UIO_DMABUF_DIR_NONE	4
> 
> enumerated type?
> 
> > +
> > +struct uio_dmabuf_args {
> > +	__s32	dbuf_fd;
> > +	__u64	dma_addr;
> > +	__u64	size;
> > +	__u32	dir;
> 
> Why the odd alignment?  Are you sure this is the best packing for such a
> structure?
> 
> Why is dbuf_fd __s32?  dir can be __u8, right?

The dmabuf fd is defined as int, so __s32 seems correct. Please let me know
otherwise. The dir can be __u8. Will fix if there is v2 at all.

> 
> I don't know that dma layer very well, it would be good to get some
> review from others to see if this really is even a viable thing to do.
> The fd handling seems a bit "odd" here, but maybe I just do not
> understand it.

Agreed. So I'm looking forward to feedback or if there's more sensible
alternative.

Thanks,
-hyun

> 
> thanks,
> 
> greg k-h
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Hyun Kwon Feb. 26, 2019, 10:18 p.m. UTC | #4
Hi Daniel,

Thanks for the comment.

On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> <gregkh@linuxfoundation.org> wrote:
> >
> > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > to be able to import the external dmabuf and use it from user space.
> > >
> > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > ---
> > >  drivers/uio/Makefile         |   2 +-
> > >  drivers/uio/uio.c            |  43 +++++++++
> > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > >  create mode 100644 include/uapi/linux/uio/uio.h
> > >
> > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > index c285dd2..5da16c7 100644
> > > --- a/drivers/uio/Makefile
> > > +++ b/drivers/uio/Makefile
> > > @@ -1,5 +1,5 @@
> > >  # SPDX-License-Identifier: GPL-2.0
> > > -obj-$(CONFIG_UIO)    += uio.o
> > > +obj-$(CONFIG_UIO)    += uio.o uio_dmabuf.o
> > >  obj-$(CONFIG_UIO_CIF)        += uio_cif.o
> > >  obj-$(CONFIG_UIO_PDRV_GENIRQ)        += uio_pdrv_genirq.o
> > >  obj-$(CONFIG_UIO_DMEM_GENIRQ)        += uio_dmem_genirq.o
> > > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > > index 1313422..6841f98 100644
> > > --- a/drivers/uio/uio.c
> > > +++ b/drivers/uio/uio.c
> > > @@ -24,6 +24,12 @@
> > >  #include <linux/kobject.h>
> > >  #include <linux/cdev.h>
> > >  #include <linux/uio_driver.h>
> > > +#include <linux/list.h>
> > > +#include <linux/mutex.h>
> > > +
> > > +#include <uapi/linux/uio/uio.h>
> > > +
> > > +#include "uio_dmabuf.h"
> > >
> > >  #define UIO_MAX_DEVICES              (1U << MINORBITS)
> > >
> > > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> > >  struct uio_listener {
> > >       struct uio_device *dev;
> > >       s32 event_count;
> > > +     struct list_head dbufs;
> > > +     struct mutex dbufs_lock; /* protect @dbufs */
> > >  };
> > >
> > >  static int uio_open(struct inode *inode, struct file *filep)
> > > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> > >       if (ret)
> > >               goto err_infoopen;
> > >
> > > +     INIT_LIST_HEAD(&listener->dbufs);
> > > +     mutex_init(&listener->dbufs_lock);
> > > +
> > >       return 0;
> > >
> > >  err_infoopen:
> > > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> > >       struct uio_listener *listener = filep->private_data;
> > >       struct uio_device *idev = listener->dev;
> > >
> > > +     ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > > +     if (ret)
> > > +             dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > > +
> > >       mutex_lock(&idev->info_lock);
> > >       if (idev->info && idev->info->release)
> > >               ret = idev->info->release(idev->info, inode);
> > > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> > >       return retval ? retval : sizeof(s32);
> > >  }
> > >
> > > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> >
> > We have resisted adding a uio ioctl for a long time, can't you do this
> > through sysfs somehow?
> >
> > A meta-comment about your ioctl structure:
> >
> > > +#define UIO_DMABUF_DIR_BIDIR 1
> > > +#define UIO_DMABUF_DIR_TO_DEV        2
> > > +#define UIO_DMABUF_DIR_FROM_DEV      3
> > > +#define UIO_DMABUF_DIR_NONE  4
> >
> > enumerated type?
> >
> > > +
> > > +struct uio_dmabuf_args {
> > > +     __s32   dbuf_fd;
> > > +     __u64   dma_addr;
> > > +     __u64   size;
> > > +     __u32   dir;
> >
> > Why the odd alignment?  Are you sure this is the best packing for such a
> > structure?
> >
> > Why is dbuf_fd __s32?  dir can be __u8, right?
> >
> > I don't know that dma layer very well, it would be good to get some
> > review from others to see if this really is even a viable thing to do.
> > The fd handling seems a bit "odd" here, but maybe I just do not
> > understand it.
> 
> Frankly looks like a ploy to sidestep review by graphics folks. We'd
> ask for the userspace first :-)

Please refer to pull request [1].

For any interest in more details, the libmetal is the abstraction layer
which provides platform independent APIs. The backend implementation
can be selected per different platforms: ex, rtos, linux,
standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
The actual user space drivers sit on top of libmetal. Such drivers can be
found in [2]. This is why I try to avoid any device specific code in
Linux kernel.

> 
> Also, exporting dma_addr to userspace is considered a very bad idea.

I agree, hence the RFC to pick some brains. :-) Would it make sense
if this call doesn't export the physicall address, but instead takes
only the dmabuf fd and register offsets to be programmed?

> If you want to do this properly, you need a minimal in-kernel memory
> manager, and those tend to be based on top of drm_gem.c and merged
> through the gpu tree. The last place where we accidentally leaked a
> dma addr for gpu buffers was in the fbdev code, and we plugged that
> one with

Could you please help me understand how having a in-kernel memory manager
helps? Isn't it just moving same dmabuf import / paddr export functionality
in different modules: kernel memory manager vs uio. In fact, Xilinx does have
such memory manager based on drm gem in downstream. But for this time we took
the approach of implementing this through generic dmabuf allocator, ION, and
enabling the import capability in the UIO infrastructure instead.

Thanks,
-hyun

[1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
[2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers

> 
> commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> drm-misc-next-fixes-2018-10-03)
> Author: Neil Armstrong <narmstrong@baylibre.com>
> Date:   Fri Sep 28 14:05:55 2018 +0200
> 
>     drm/fb_helper: Allow leaking fbdev smem_start
> 
> Together with cuse the above patch should be enough to implement a drm
> driver entirely in userspace at least.
> 
> Cheers, Daniel
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
Daniel Vetter Feb. 27, 2019, 2:13 p.m. UTC | #5
On Tue, Feb 26, 2019 at 11:20 PM Hyun Kwon <hyun.kwon@xilinx.com> wrote:
>
> Hi Daniel,
>
> Thanks for the comment.
>
> On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> > On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> > <gregkh@linuxfoundation.org> wrote:
> > >
> > > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > > to be able to import the external dmabuf and use it from user space.
> > > >
> > > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > > ---
> > > >  drivers/uio/Makefile         |   2 +-
> > > >  drivers/uio/uio.c            |  43 +++++++++
> > > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > > >  create mode 100644 include/uapi/linux/uio/uio.h
> > > >
> > > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > > index c285dd2..5da16c7 100644
> > > > --- a/drivers/uio/Makefile
> > > > +++ b/drivers/uio/Makefile
> > > > @@ -1,5 +1,5 @@
> > > >  # SPDX-License-Identifier: GPL-2.0
> > > > -obj-$(CONFIG_UIO)    += uio.o
> > > > +obj-$(CONFIG_UIO)    += uio.o uio_dmabuf.o
> > > >  obj-$(CONFIG_UIO_CIF)        += uio_cif.o
> > > >  obj-$(CONFIG_UIO_PDRV_GENIRQ)        += uio_pdrv_genirq.o
> > > >  obj-$(CONFIG_UIO_DMEM_GENIRQ)        += uio_dmem_genirq.o
> > > > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > > > index 1313422..6841f98 100644
> > > > --- a/drivers/uio/uio.c
> > > > +++ b/drivers/uio/uio.c
> > > > @@ -24,6 +24,12 @@
> > > >  #include <linux/kobject.h>
> > > >  #include <linux/cdev.h>
> > > >  #include <linux/uio_driver.h>
> > > > +#include <linux/list.h>
> > > > +#include <linux/mutex.h>
> > > > +
> > > > +#include <uapi/linux/uio/uio.h>
> > > > +
> > > > +#include "uio_dmabuf.h"
> > > >
> > > >  #define UIO_MAX_DEVICES              (1U << MINORBITS)
> > > >
> > > > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> > > >  struct uio_listener {
> > > >       struct uio_device *dev;
> > > >       s32 event_count;
> > > > +     struct list_head dbufs;
> > > > +     struct mutex dbufs_lock; /* protect @dbufs */
> > > >  };
> > > >
> > > >  static int uio_open(struct inode *inode, struct file *filep)
> > > > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> > > >       if (ret)
> > > >               goto err_infoopen;
> > > >
> > > > +     INIT_LIST_HEAD(&listener->dbufs);
> > > > +     mutex_init(&listener->dbufs_lock);
> > > > +
> > > >       return 0;
> > > >
> > > >  err_infoopen:
> > > > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> > > >       struct uio_listener *listener = filep->private_data;
> > > >       struct uio_device *idev = listener->dev;
> > > >
> > > > +     ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > > > +     if (ret)
> > > > +             dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > > > +
> > > >       mutex_lock(&idev->info_lock);
> > > >       if (idev->info && idev->info->release)
> > > >               ret = idev->info->release(idev->info, inode);
> > > > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> > > >       return retval ? retval : sizeof(s32);
> > > >  }
> > > >
> > > > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> > >
> > > We have resisted adding a uio ioctl for a long time, can't you do this
> > > through sysfs somehow?
> > >
> > > A meta-comment about your ioctl structure:
> > >
> > > > +#define UIO_DMABUF_DIR_BIDIR 1
> > > > +#define UIO_DMABUF_DIR_TO_DEV        2
> > > > +#define UIO_DMABUF_DIR_FROM_DEV      3
> > > > +#define UIO_DMABUF_DIR_NONE  4
> > >
> > > enumerated type?
> > >
> > > > +
> > > > +struct uio_dmabuf_args {
> > > > +     __s32   dbuf_fd;
> > > > +     __u64   dma_addr;
> > > > +     __u64   size;
> > > > +     __u32   dir;
> > >
> > > Why the odd alignment?  Are you sure this is the best packing for such a
> > > structure?
> > >
> > > Why is dbuf_fd __s32?  dir can be __u8, right?
> > >
> > > I don't know that dma layer very well, it would be good to get some
> > > review from others to see if this really is even a viable thing to do.
> > > The fd handling seems a bit "odd" here, but maybe I just do not
> > > understand it.
> >
> > Frankly looks like a ploy to sidestep review by graphics folks. We'd
> > ask for the userspace first :-)
>
> Please refer to pull request [1].
>
> For any interest in more details, the libmetal is the abstraction layer
> which provides platform independent APIs. The backend implementation
> can be selected per different platforms: ex, rtos, linux,
> standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
> The actual user space drivers sit on top of libmetal. Such drivers can be
> found in [2]. This is why I try to avoid any device specific code in
> Linux kernel.
>
> >
> > Also, exporting dma_addr to userspace is considered a very bad idea.
>
> I agree, hence the RFC to pick some brains. :-) Would it make sense
> if this call doesn't export the physicall address, but instead takes
> only the dmabuf fd and register offsets to be programmed?
>
> > If you want to do this properly, you need a minimal in-kernel memory
> > manager, and those tend to be based on top of drm_gem.c and merged
> > through the gpu tree. The last place where we accidentally leaked a
> > dma addr for gpu buffers was in the fbdev code, and we plugged that
> > one with
>
> Could you please help me understand how having a in-kernel memory manager
> helps? Isn't it just moving same dmabuf import / paddr export functionality
> in different modules: kernel memory manager vs uio. In fact, Xilinx does have
> such memory manager based on drm gem in downstream. But for this time we took
> the approach of implementing this through generic dmabuf allocator, ION, and
> enabling the import capability in the UIO infrastructure instead.

There's a group of people working on upstreaming a xilinx drm driver
already. Which driver are we talking about? Can you pls provide a link
to that xilinx drm driver?

Thanks, Daniel

> Thanks,
> -hyun
>
> [1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
> [2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers
>
> >
> > commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> > drm-misc-next-fixes-2018-10-03)
> > Author: Neil Armstrong <narmstrong@baylibre.com>
> > Date:   Fri Sep 28 14:05:55 2018 +0200
> >
> >     drm/fb_helper: Allow leaking fbdev smem_start
> >
> > Together with cuse the above patch should be enough to implement a drm
> > driver entirely in userspace at least.
> >
> > Cheers, Daniel
> > --
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
Hyun Kwon Feb. 28, 2019, 12:36 a.m. UTC | #6
Hi Daniel,

On Wed, 2019-02-27 at 06:13:45 -0800, Daniel Vetter wrote:
> On Tue, Feb 26, 2019 at 11:20 PM Hyun Kwon <hyun.kwon@xilinx.com> wrote:
> >
> > Hi Daniel,
> >
> > Thanks for the comment.
> >
> > On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> > > On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> > > <gregkh@linuxfoundation.org> wrote:
> > > >
> > > > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > > > to be able to import the external dmabuf and use it from user space.
> > > > >
> > > > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > > > ---
> > > > >  drivers/uio/Makefile         |   2 +-
> > > > >  drivers/uio/uio.c            |  43 +++++++++
> > > > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > > > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > > > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > > > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > > > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > > > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > > > >  create mode 100644 include/uapi/linux/uio/uio.h
> > > > >
> > > > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > > > index c285dd2..5da16c7 100644
> > > > > --- a/drivers/uio/Makefile
> > > > > +++ b/drivers/uio/Makefile
> > > > > @@ -1,5 +1,5 @@
> > > > >  # SPDX-License-Identifier: GPL-2.0
> > > > > -obj-$(CONFIG_UIO)    += uio.o
> > > > > +obj-$(CONFIG_UIO)    += uio.o uio_dmabuf.o
> > > > >  obj-$(CONFIG_UIO_CIF)        += uio_cif.o
> > > > >  obj-$(CONFIG_UIO_PDRV_GENIRQ)        += uio_pdrv_genirq.o
> > > > >  obj-$(CONFIG_UIO_DMEM_GENIRQ)        += uio_dmem_genirq.o
> > > > > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > > > > index 1313422..6841f98 100644
> > > > > --- a/drivers/uio/uio.c
> > > > > +++ b/drivers/uio/uio.c
> > > > > @@ -24,6 +24,12 @@
> > > > >  #include <linux/kobject.h>
> > > > >  #include <linux/cdev.h>
> > > > >  #include <linux/uio_driver.h>
> > > > > +#include <linux/list.h>
> > > > > +#include <linux/mutex.h>
> > > > > +
> > > > > +#include <uapi/linux/uio/uio.h>
> > > > > +
> > > > > +#include "uio_dmabuf.h"
> > > > >
> > > > >  #define UIO_MAX_DEVICES              (1U << MINORBITS)
> > > > >
> > > > > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> > > > >  struct uio_listener {
> > > > >       struct uio_device *dev;
> > > > >       s32 event_count;
> > > > > +     struct list_head dbufs;
> > > > > +     struct mutex dbufs_lock; /* protect @dbufs */
> > > > >  };
> > > > >
> > > > >  static int uio_open(struct inode *inode, struct file *filep)
> > > > > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> > > > >       if (ret)
> > > > >               goto err_infoopen;
> > > > >
> > > > > +     INIT_LIST_HEAD(&listener->dbufs);
> > > > > +     mutex_init(&listener->dbufs_lock);
> > > > > +
> > > > >       return 0;
> > > > >
> > > > >  err_infoopen:
> > > > > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> > > > >       struct uio_listener *listener = filep->private_data;
> > > > >       struct uio_device *idev = listener->dev;
> > > > >
> > > > > +     ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > > > > +     if (ret)
> > > > > +             dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > > > > +
> > > > >       mutex_lock(&idev->info_lock);
> > > > >       if (idev->info && idev->info->release)
> > > > >               ret = idev->info->release(idev->info, inode);
> > > > > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> > > > >       return retval ? retval : sizeof(s32);
> > > > >  }
> > > > >
> > > > > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> > > >
> > > > We have resisted adding a uio ioctl for a long time, can't you do this
> > > > through sysfs somehow?
> > > >
> > > > A meta-comment about your ioctl structure:
> > > >
> > > > > +#define UIO_DMABUF_DIR_BIDIR 1
> > > > > +#define UIO_DMABUF_DIR_TO_DEV        2
> > > > > +#define UIO_DMABUF_DIR_FROM_DEV      3
> > > > > +#define UIO_DMABUF_DIR_NONE  4
> > > >
> > > > enumerated type?
> > > >
> > > > > +
> > > > > +struct uio_dmabuf_args {
> > > > > +     __s32   dbuf_fd;
> > > > > +     __u64   dma_addr;
> > > > > +     __u64   size;
> > > > > +     __u32   dir;
> > > >
> > > > Why the odd alignment?  Are you sure this is the best packing for such a
> > > > structure?
> > > >
> > > > Why is dbuf_fd __s32?  dir can be __u8, right?
> > > >
> > > > I don't know that dma layer very well, it would be good to get some
> > > > review from others to see if this really is even a viable thing to do.
> > > > The fd handling seems a bit "odd" here, but maybe I just do not
> > > > understand it.
> > >
> > > Frankly looks like a ploy to sidestep review by graphics folks. We'd
> > > ask for the userspace first :-)
> >
> > Please refer to pull request [1].
> >
> > For any interest in more details, the libmetal is the abstraction layer
> > which provides platform independent APIs. The backend implementation
> > can be selected per different platforms: ex, rtos, linux,
> > standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
> > The actual user space drivers sit on top of libmetal. Such drivers can be
> > found in [2]. This is why I try to avoid any device specific code in
> > Linux kernel.
> >
> > >
> > > Also, exporting dma_addr to userspace is considered a very bad idea.
> >
> > I agree, hence the RFC to pick some brains. :-) Would it make sense
> > if this call doesn't export the physicall address, but instead takes
> > only the dmabuf fd and register offsets to be programmed?
> >
> > > If you want to do this properly, you need a minimal in-kernel memory
> > > manager, and those tend to be based on top of drm_gem.c and merged
> > > through the gpu tree. The last place where we accidentally leaked a
> > > dma addr for gpu buffers was in the fbdev code, and we plugged that
> > > one with
> >
> > Could you please help me understand how having a in-kernel memory manager
> > helps? Isn't it just moving same dmabuf import / paddr export functionality
> > in different modules: kernel memory manager vs uio. In fact, Xilinx does have
> > such memory manager based on drm gem in downstream. But for this time we took
> > the approach of implementing this through generic dmabuf allocator, ION, and
> > enabling the import capability in the UIO infrastructure instead.
> 
> There's a group of people working on upstreaming a xilinx drm driver
> already. Which driver are we talking about? Can you pls provide a link
> to that xilinx drm driver?
> 

The one I was pushing [1] is implemented purely for display, and not
intended for anything other than that as of now. What I'm refering to above
is part of Xilinx FPGA (acceleration) runtime [2]. As far as I know,
it's planned to be upstreamed, but not yet started. The Xilinx runtime
software has its own in-kernel memory manager based on drm_cma_gem with
its own ioctls [3].

Thanks,
-hyun

[1] https://patchwork.kernel.org/patch/10513001/
[2] https://github.com/Xilinx/XRT
[3] https://github.com/Xilinx/XRT/tree/master/src/runtime_src/driver/zynq/drm

> Thanks, Daniel
> 
> > Thanks,
> > -hyun
> >
> > [1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
> > [2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers
> >
> > >
> > > commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> > > drm-misc-next-fixes-2018-10-03)
> > > Author: Neil Armstrong <narmstrong@baylibre.com>
> > > Date:   Fri Sep 28 14:05:55 2018 +0200
> > >
> > >     drm/fb_helper: Allow leaking fbdev smem_start
> > >
> > > Together with cuse the above patch should be enough to implement a drm
> > > driver entirely in userspace at least.
> > >
> > > Cheers, Daniel
> > > --
> > > Daniel Vetter
> > > Software Engineer, Intel Corporation
> > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> 
> 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
Daniel Vetter Feb. 28, 2019, 10:01 a.m. UTC | #7
On Wed, Feb 27, 2019 at 04:36:06PM -0800, Hyun Kwon wrote:
> Hi Daniel,
> 
> On Wed, 2019-02-27 at 06:13:45 -0800, Daniel Vetter wrote:
> > On Tue, Feb 26, 2019 at 11:20 PM Hyun Kwon <hyun.kwon@xilinx.com> wrote:
> > >
> > > Hi Daniel,
> > >
> > > Thanks for the comment.
> > >
> > > On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> > > > On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> > > > <gregkh@linuxfoundation.org> wrote:
> > > > >
> > > > > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > > > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > > > > to be able to import the external dmabuf and use it from user space.
> > > > > >
> > > > > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > > > > ---
> > > > > >  drivers/uio/Makefile         |   2 +-
> > > > > >  drivers/uio/uio.c            |  43 +++++++++
> > > > > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > > > > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > > > > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > > > > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > > > > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > > > > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > > > > >  create mode 100644 include/uapi/linux/uio/uio.h
> > > > > >
> > > > > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > > > > index c285dd2..5da16c7 100644
> > > > > > --- a/drivers/uio/Makefile
> > > > > > +++ b/drivers/uio/Makefile
> > > > > > @@ -1,5 +1,5 @@
> > > > > >  # SPDX-License-Identifier: GPL-2.0
> > > > > > -obj-$(CONFIG_UIO)    += uio.o
> > > > > > +obj-$(CONFIG_UIO)    += uio.o uio_dmabuf.o
> > > > > >  obj-$(CONFIG_UIO_CIF)        += uio_cif.o
> > > > > >  obj-$(CONFIG_UIO_PDRV_GENIRQ)        += uio_pdrv_genirq.o
> > > > > >  obj-$(CONFIG_UIO_DMEM_GENIRQ)        += uio_dmem_genirq.o
> > > > > > diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
> > > > > > index 1313422..6841f98 100644
> > > > > > --- a/drivers/uio/uio.c
> > > > > > +++ b/drivers/uio/uio.c
> > > > > > @@ -24,6 +24,12 @@
> > > > > >  #include <linux/kobject.h>
> > > > > >  #include <linux/cdev.h>
> > > > > >  #include <linux/uio_driver.h>
> > > > > > +#include <linux/list.h>
> > > > > > +#include <linux/mutex.h>
> > > > > > +
> > > > > > +#include <uapi/linux/uio/uio.h>
> > > > > > +
> > > > > > +#include "uio_dmabuf.h"
> > > > > >
> > > > > >  #define UIO_MAX_DEVICES              (1U << MINORBITS)
> > > > > >
> > > > > > @@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
> > > > > >  struct uio_listener {
> > > > > >       struct uio_device *dev;
> > > > > >       s32 event_count;
> > > > > > +     struct list_head dbufs;
> > > > > > +     struct mutex dbufs_lock; /* protect @dbufs */
> > > > > >  };
> > > > > >
> > > > > >  static int uio_open(struct inode *inode, struct file *filep)
> > > > > > @@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
> > > > > >       if (ret)
> > > > > >               goto err_infoopen;
> > > > > >
> > > > > > +     INIT_LIST_HEAD(&listener->dbufs);
> > > > > > +     mutex_init(&listener->dbufs_lock);
> > > > > > +
> > > > > >       return 0;
> > > > > >
> > > > > >  err_infoopen:
> > > > > > @@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
> > > > > >       struct uio_listener *listener = filep->private_data;
> > > > > >       struct uio_device *idev = listener->dev;
> > > > > >
> > > > > > +     ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
> > > > > > +     if (ret)
> > > > > > +             dev_err(&idev->dev, "failed to clean up the dma bufs\n");
> > > > > > +
> > > > > >       mutex_lock(&idev->info_lock);
> > > > > >       if (idev->info && idev->info->release)
> > > > > >               ret = idev->info->release(idev->info, inode);
> > > > > > @@ -652,6 +667,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
> > > > > >       return retval ? retval : sizeof(s32);
> > > > > >  }
> > > > > >
> > > > > > +static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> > > > >
> > > > > We have resisted adding a uio ioctl for a long time, can't you do this
> > > > > through sysfs somehow?
> > > > >
> > > > > A meta-comment about your ioctl structure:
> > > > >
> > > > > > +#define UIO_DMABUF_DIR_BIDIR 1
> > > > > > +#define UIO_DMABUF_DIR_TO_DEV        2
> > > > > > +#define UIO_DMABUF_DIR_FROM_DEV      3
> > > > > > +#define UIO_DMABUF_DIR_NONE  4
> > > > >
> > > > > enumerated type?
> > > > >
> > > > > > +
> > > > > > +struct uio_dmabuf_args {
> > > > > > +     __s32   dbuf_fd;
> > > > > > +     __u64   dma_addr;
> > > > > > +     __u64   size;
> > > > > > +     __u32   dir;
> > > > >
> > > > > Why the odd alignment?  Are you sure this is the best packing for such a
> > > > > structure?
> > > > >
> > > > > Why is dbuf_fd __s32?  dir can be __u8, right?
> > > > >
> > > > > I don't know that dma layer very well, it would be good to get some
> > > > > review from others to see if this really is even a viable thing to do.
> > > > > The fd handling seems a bit "odd" here, but maybe I just do not
> > > > > understand it.
> > > >
> > > > Frankly looks like a ploy to sidestep review by graphics folks. We'd
> > > > ask for the userspace first :-)
> > >
> > > Please refer to pull request [1].
> > >
> > > For any interest in more details, the libmetal is the abstraction layer
> > > which provides platform independent APIs. The backend implementation
> > > can be selected per different platforms: ex, rtos, linux,
> > > standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
> > > The actual user space drivers sit on top of libmetal. Such drivers can be
> > > found in [2]. This is why I try to avoid any device specific code in
> > > Linux kernel.
> > >
> > > >
> > > > Also, exporting dma_addr to userspace is considered a very bad idea.
> > >
> > > I agree, hence the RFC to pick some brains. :-) Would it make sense
> > > if this call doesn't export the physicall address, but instead takes
> > > only the dmabuf fd and register offsets to be programmed?
> > >
> > > > If you want to do this properly, you need a minimal in-kernel memory
> > > > manager, and those tend to be based on top of drm_gem.c and merged
> > > > through the gpu tree. The last place where we accidentally leaked a
> > > > dma addr for gpu buffers was in the fbdev code, and we plugged that
> > > > one with
> > >
> > > Could you please help me understand how having a in-kernel memory manager
> > > helps? Isn't it just moving same dmabuf import / paddr export functionality
> > > in different modules: kernel memory manager vs uio. In fact, Xilinx does have
> > > such memory manager based on drm gem in downstream. But for this time we took
> > > the approach of implementing this through generic dmabuf allocator, ION, and
> > > enabling the import capability in the UIO infrastructure instead.
> > 
> > There's a group of people working on upstreaming a xilinx drm driver
> > already. Which driver are we talking about? Can you pls provide a link
> > to that xilinx drm driver?
> > 
> 
> The one I was pushing [1] is implemented purely for display, and not
> intended for anything other than that as of now. What I'm refering to above
> is part of Xilinx FPGA (acceleration) runtime [2]. As far as I know,
> it's planned to be upstreamed, but not yet started. The Xilinx runtime
> software has its own in-kernel memory manager based on drm_cma_gem with
> its own ioctls [3].
> 
> Thanks,
> -hyun
> 
> [1] https://patchwork.kernel.org/patch/10513001/
> [2] https://github.com/Xilinx/XRT
> [3] https://github.com/Xilinx/XRT/tree/master/src/runtime_src/driver/zynq/drm

I've done a very quick look only, and yes this is kinda what I'd expect.
Doing a small drm gem driver for an fpga/accelarator that needs lots of
memories is the right architecture, since at the low level of kernel
interfaces a gpu really isn't anything else than an accelarater.

And from a very cursory look the gem driver you mentioned (I only scrolled
through the ioctl handler quickly) looks reasonable.
-Daniel
> 
> > Thanks, Daniel
> > 
> > > Thanks,
> > > -hyun
> > >
> > > [1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
> > > [2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers
> > >
> > > >
> > > > commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> > > > drm-misc-next-fixes-2018-10-03)
> > > > Author: Neil Armstrong <narmstrong@baylibre.com>
> > > > Date:   Fri Sep 28 14:05:55 2018 +0200
> > > >
> > > >     drm/fb_helper: Allow leaking fbdev smem_start
> > > >
> > > > Together with cuse the above patch should be enough to implement a drm
> > > > driver entirely in userspace at least.
> > > >
> > > > Cheers, Daniel
> > > > --
> > > > Daniel Vetter
> > > > Software Engineer, Intel Corporation
> > > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> > 
> > 
> > 
> > -- 
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
Hyun Kwon March 1, 2019, 12:18 a.m. UTC | #8
Hi Daniel,

On Thu, 2019-02-28 at 02:01:46 -0800, Daniel Vetter wrote:
> On Wed, Feb 27, 2019 at 04:36:06PM -0800, Hyun Kwon wrote:
> > Hi Daniel,
> > 
> > On Wed, 2019-02-27 at 06:13:45 -0800, Daniel Vetter wrote:
> > > On Tue, Feb 26, 2019 at 11:20 PM Hyun Kwon <hyun.kwon@xilinx.com> wrote:
> > > >
> > > > Hi Daniel,
> > > >
> > > > Thanks for the comment.
> > > >
> > > > On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> > > > > On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> > > > > <gregkh@linuxfoundation.org> wrote:
> > > > > >
> > > > > > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > > > > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > > > > > to be able to import the external dmabuf and use it from user space.
> > > > > > >
> > > > > > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > > > > > ---
> > > > > > >  drivers/uio/Makefile         |   2 +-
> > > > > > >  drivers/uio/uio.c            |  43 +++++++++
> > > > > > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > > > > > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > > > > > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > > > > > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > > > > > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > > > > > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > > > > > >  create mode 100644 include/uapi/linux/uio/uio.h
> > > > > > >
> > > > > > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > > > > > index c285dd2..5da16c7 100644
> > > > > > > --- a/drivers/uio/Makefile
> > > > > > > +++ b/drivers/uio/Makefile
> > > > > > > @@ -1,5 +1,5 @@

[snip]

> > > > > Frankly looks like a ploy to sidestep review by graphics folks. We'd
> > > > > ask for the userspace first :-)
> > > >
> > > > Please refer to pull request [1].
> > > >
> > > > For any interest in more details, the libmetal is the abstraction layer
> > > > which provides platform independent APIs. The backend implementation
> > > > can be selected per different platforms: ex, rtos, linux,
> > > > standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
> > > > The actual user space drivers sit on top of libmetal. Such drivers can be
> > > > found in [2]. This is why I try to avoid any device specific code in
> > > > Linux kernel.
> > > >
> > > > >
> > > > > Also, exporting dma_addr to userspace is considered a very bad idea.
> > > >
> > > > I agree, hence the RFC to pick some brains. :-) Would it make sense
> > > > if this call doesn't export the physicall address, but instead takes
> > > > only the dmabuf fd and register offsets to be programmed?
> > > >
> > > > > If you want to do this properly, you need a minimal in-kernel memory
> > > > > manager, and those tend to be based on top of drm_gem.c and merged
> > > > > through the gpu tree. The last place where we accidentally leaked a
> > > > > dma addr for gpu buffers was in the fbdev code, and we plugged that
> > > > > one with
> > > >
> > > > Could you please help me understand how having a in-kernel memory manager
> > > > helps? Isn't it just moving same dmabuf import / paddr export functionality
> > > > in different modules: kernel memory manager vs uio. In fact, Xilinx does have
> > > > such memory manager based on drm gem in downstream. But for this time we took
> > > > the approach of implementing this through generic dmabuf allocator, ION, and
> > > > enabling the import capability in the UIO infrastructure instead.
> > > 
> > > There's a group of people working on upstreaming a xilinx drm driver
> > > already. Which driver are we talking about? Can you pls provide a link
> > > to that xilinx drm driver?
> > > 
> > 
> > The one I was pushing [1] is implemented purely for display, and not
> > intended for anything other than that as of now. What I'm refering to above
> > is part of Xilinx FPGA (acceleration) runtime [2]. As far as I know,
> > it's planned to be upstreamed, but not yet started. The Xilinx runtime
> > software has its own in-kernel memory manager based on drm_cma_gem with
> > its own ioctls [3].
> > 
> > Thanks,
> > -hyun
> > 
> > [1] https://patchwork.kernel.org/patch/10513001/
> > [2] https://github.com/Xilinx/XRT
> > [3] https://github.com/Xilinx/XRT/tree/master/src/runtime_src/driver/zynq/drm
> 
> I've done a very quick look only, and yes this is kinda what I'd expect.
> Doing a small drm gem driver for an fpga/accelarator that needs lots of
> memories is the right architecture, since at the low level of kernel
> interfaces a gpu really isn't anything else than an accelarater.
> 
> And from a very cursory look the gem driver you mentioned (I only scrolled
> through the ioctl handler quickly) looks reasonable.

Thanks for taking time to look and share input. But still I'd like to
understand why it's more reasonable if the similar ioctl exists with drm
than with uio. Is it because such drm ioctl is vendor specific?

Thanks,
-hyun

> -Daniel
> > 
> > > Thanks, Daniel
> > > 
> > > > Thanks,
> > > > -hyun
> > > >
> > > > [1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
> > > > [2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers
> > > >
> > > > >
> > > > > commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> > > > > drm-misc-next-fixes-2018-10-03)
> > > > > Author: Neil Armstrong <narmstrong@baylibre.com>
> > > > > Date:   Fri Sep 28 14:05:55 2018 +0200
> > > > >
> > > > >     drm/fb_helper: Allow leaking fbdev smem_start
> > > > >
> > > > > Together with cuse the above patch should be enough to implement a drm
> > > > > driver entirely in userspace at least.
> > > > >
> > > > > Cheers, Daniel
> > > > > --
> > > > > Daniel Vetter
> > > > > Software Engineer, Intel Corporation
> > > > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> > > 
> > > 
> > > 
> > > -- 
> > > Daniel Vetter
> > > Software Engineer, Intel Corporation
> > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
Daniel Vetter March 1, 2019, 8:55 a.m. UTC | #9
On Thu, Feb 28, 2019 at 04:18:57PM -0800, Hyun Kwon wrote:
> Hi Daniel,
> 
> On Thu, 2019-02-28 at 02:01:46 -0800, Daniel Vetter wrote:
> > On Wed, Feb 27, 2019 at 04:36:06PM -0800, Hyun Kwon wrote:
> > > Hi Daniel,
> > > 
> > > On Wed, 2019-02-27 at 06:13:45 -0800, Daniel Vetter wrote:
> > > > On Tue, Feb 26, 2019 at 11:20 PM Hyun Kwon <hyun.kwon@xilinx.com> wrote:
> > > > >
> > > > > Hi Daniel,
> > > > >
> > > > > Thanks for the comment.
> > > > >
> > > > > On Tue, 2019-02-26 at 04:06:13 -0800, Daniel Vetter wrote:
> > > > > > On Tue, Feb 26, 2019 at 12:53 PM Greg Kroah-Hartman
> > > > > > <gregkh@linuxfoundation.org> wrote:
> > > > > > >
> > > > > > > On Sat, Feb 23, 2019 at 12:28:17PM -0800, Hyun Kwon wrote:
> > > > > > > > Add the dmabuf map / unmap interfaces. This allows the user driver
> > > > > > > > to be able to import the external dmabuf and use it from user space.
> > > > > > > >
> > > > > > > > Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
> > > > > > > > ---
> > > > > > > >  drivers/uio/Makefile         |   2 +-
> > > > > > > >  drivers/uio/uio.c            |  43 +++++++++
> > > > > > > >  drivers/uio/uio_dmabuf.c     | 210 +++++++++++++++++++++++++++++++++++++++++++
> > > > > > > >  drivers/uio/uio_dmabuf.h     |  26 ++++++
> > > > > > > >  include/uapi/linux/uio/uio.h |  33 +++++++
> > > > > > > >  5 files changed, 313 insertions(+), 1 deletion(-)
> > > > > > > >  create mode 100644 drivers/uio/uio_dmabuf.c
> > > > > > > >  create mode 100644 drivers/uio/uio_dmabuf.h
> > > > > > > >  create mode 100644 include/uapi/linux/uio/uio.h
> > > > > > > >
> > > > > > > > diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
> > > > > > > > index c285dd2..5da16c7 100644
> > > > > > > > --- a/drivers/uio/Makefile
> > > > > > > > +++ b/drivers/uio/Makefile
> > > > > > > > @@ -1,5 +1,5 @@
> 
> [snip]
> 
> > > > > > Frankly looks like a ploy to sidestep review by graphics folks. We'd
> > > > > > ask for the userspace first :-)
> > > > >
> > > > > Please refer to pull request [1].
> > > > >
> > > > > For any interest in more details, the libmetal is the abstraction layer
> > > > > which provides platform independent APIs. The backend implementation
> > > > > can be selected per different platforms: ex, rtos, linux,
> > > > > standalone (xilinx),,,. For Linux, it supports UIO / vfio as of now.
> > > > > The actual user space drivers sit on top of libmetal. Such drivers can be
> > > > > found in [2]. This is why I try to avoid any device specific code in
> > > > > Linux kernel.
> > > > >
> > > > > >
> > > > > > Also, exporting dma_addr to userspace is considered a very bad idea.
> > > > >
> > > > > I agree, hence the RFC to pick some brains. :-) Would it make sense
> > > > > if this call doesn't export the physicall address, but instead takes
> > > > > only the dmabuf fd and register offsets to be programmed?
> > > > >
> > > > > > If you want to do this properly, you need a minimal in-kernel memory
> > > > > > manager, and those tend to be based on top of drm_gem.c and merged
> > > > > > through the gpu tree. The last place where we accidentally leaked a
> > > > > > dma addr for gpu buffers was in the fbdev code, and we plugged that
> > > > > > one with
> > > > >
> > > > > Could you please help me understand how having a in-kernel memory manager
> > > > > helps? Isn't it just moving same dmabuf import / paddr export functionality
> > > > > in different modules: kernel memory manager vs uio. In fact, Xilinx does have
> > > > > such memory manager based on drm gem in downstream. But for this time we took
> > > > > the approach of implementing this through generic dmabuf allocator, ION, and
> > > > > enabling the import capability in the UIO infrastructure instead.
> > > > 
> > > > There's a group of people working on upstreaming a xilinx drm driver
> > > > already. Which driver are we talking about? Can you pls provide a link
> > > > to that xilinx drm driver?
> > > > 
> > > 
> > > The one I was pushing [1] is implemented purely for display, and not
> > > intended for anything other than that as of now. What I'm refering to above
> > > is part of Xilinx FPGA (acceleration) runtime [2]. As far as I know,
> > > it's planned to be upstreamed, but not yet started. The Xilinx runtime
> > > software has its own in-kernel memory manager based on drm_cma_gem with
> > > its own ioctls [3].
> > > 
> > > Thanks,
> > > -hyun
> > > 
> > > [1] https://patchwork.kernel.org/patch/10513001/
> > > [2] https://github.com/Xilinx/XRT
> > > [3] https://github.com/Xilinx/XRT/tree/master/src/runtime_src/driver/zynq/drm
> > 
> > I've done a very quick look only, and yes this is kinda what I'd expect.
> > Doing a small drm gem driver for an fpga/accelarator that needs lots of
> > memories is the right architecture, since at the low level of kernel
> > interfaces a gpu really isn't anything else than an accelarater.
> > 
> > And from a very cursory look the gem driver you mentioned (I only scrolled
> > through the ioctl handler quickly) looks reasonable.
> 
> Thanks for taking time to look and share input. But still I'd like to
> understand why it's more reasonable if the similar ioctl exists with drm
> than with uio. Is it because such drm ioctl is vendor specific?

We do have quite a pile of shared infrastructure in drm beyond just the
vendor specific ioctl. So putting accelerator drivers there makes sense,
whether the programming is a GPU, some neural network folder, an FPGA or
something else. The one issue is that we require open source userspace
together with your driver, since just the accelerator shim in the kernel
alone is fairly useless (both for review and for doing anything with it).

But there's also some kernel maintainers who disagree and happily take
drivers originally written for drm and then rewritten for non-drm for
upstream to avoid the drm folks (or at least it very much looks like that,
and happens fairly regularly).

Cheers, Daniel

> 
> Thanks,
> -hyun
> 
> > -Daniel
> > > 
> > > > Thanks, Daniel
> > > > 
> > > > > Thanks,
> > > > > -hyun
> > > > >
> > > > > [1] https://github.com/OpenAMP/libmetal/pull/82/commits/951e2762bd487c98919ad12f2aa81773d8fe7859
> > > > > [2] https://github.com/Xilinx/embeddedsw/tree/master/XilinxProcessorIPLib/drivers
> > > > >
> > > > > >
> > > > > > commit 4be9bd10e22dfc7fc101c5cf5969ef2d3a042d8a (tag:
> > > > > > drm-misc-next-fixes-2018-10-03)
> > > > > > Author: Neil Armstrong <narmstrong@baylibre.com>
> > > > > > Date:   Fri Sep 28 14:05:55 2018 +0200
> > > > > >
> > > > > >     drm/fb_helper: Allow leaking fbdev smem_start
> > > > > >
> > > > > > Together with cuse the above patch should be enough to implement a drm
> > > > > > driver entirely in userspace at least.
> > > > > >
> > > > > > Cheers, Daniel
> > > > > > --
> > > > > > Daniel Vetter
> > > > > > Software Engineer, Intel Corporation
> > > > > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> > > > 
> > > > 
> > > > 
> > > > -- 
> > > > Daniel Vetter
> > > > Software Engineer, Intel Corporation
> > > > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> > 
> > -- 
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > http://blog.ffwll.ch
diff mbox series

Patch

diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2..5da16c7 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,5 +1,5 @@ 
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UIO)	+= uio.o
+obj-$(CONFIG_UIO)	+= uio.o uio_dmabuf.o
 obj-$(CONFIG_UIO_CIF)	+= uio_cif.o
 obj-$(CONFIG_UIO_PDRV_GENIRQ)	+= uio_pdrv_genirq.o
 obj-$(CONFIG_UIO_DMEM_GENIRQ)	+= uio_dmem_genirq.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 1313422..6841f98 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -24,6 +24,12 @@ 
 #include <linux/kobject.h>
 #include <linux/cdev.h>
 #include <linux/uio_driver.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
 
 #define UIO_MAX_DEVICES		(1U << MINORBITS)
 
@@ -454,6 +460,8 @@  static irqreturn_t uio_interrupt(int irq, void *dev_id)
 struct uio_listener {
 	struct uio_device *dev;
 	s32 event_count;
+	struct list_head dbufs;
+	struct mutex dbufs_lock; /* protect @dbufs */
 };
 
 static int uio_open(struct inode *inode, struct file *filep)
@@ -500,6 +508,9 @@  static int uio_open(struct inode *inode, struct file *filep)
 	if (ret)
 		goto err_infoopen;
 
+	INIT_LIST_HEAD(&listener->dbufs);
+	mutex_init(&listener->dbufs_lock);
+
 	return 0;
 
 err_infoopen:
@@ -529,6 +540,10 @@  static int uio_release(struct inode *inode, struct file *filep)
 	struct uio_listener *listener = filep->private_data;
 	struct uio_device *idev = listener->dev;
 
+	ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
+	if (ret)
+		dev_err(&idev->dev, "failed to clean up the dma bufs\n");
+
 	mutex_lock(&idev->info_lock);
 	if (idev->info && idev->info->release)
 		ret = idev->info->release(idev->info, inode);
@@ -652,6 +667,33 @@  static ssize_t uio_write(struct file *filep, const char __user *buf,
 	return retval ? retval : sizeof(s32);
 }
 
+static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	struct uio_listener *listener = filep->private_data;
+	struct uio_device *idev = listener->dev;
+	long ret;
+
+	if (!idev->info)
+		return -EIO;
+
+	switch (cmd) {
+	case UIO_IOC_MAP_DMABUF:
+		ret = uio_dmabuf_map(idev, &listener->dbufs,
+				     &listener->dbufs_lock, (void __user *)arg);
+		break;
+	case UIO_IOC_UNMAP_DMABUF:
+		ret = uio_dmabuf_unmap(idev, &listener->dbufs,
+				       &listener->dbufs_lock,
+				       (void __user *)arg);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 static int uio_find_mem_index(struct vm_area_struct *vma)
 {
 	struct uio_device *idev = vma->vm_private_data;
@@ -821,6 +863,7 @@  static const struct file_operations uio_fops = {
 	.write		= uio_write,
 	.mmap		= uio_mmap,
 	.poll		= uio_poll,
+	.unlocked_ioctl	= uio_ioctl,
 	.fasync		= uio_fasync,
 	.llseek		= noop_llseek,
 };
diff --git a/drivers/uio/uio_dmabuf.c b/drivers/uio/uio_dmabuf.c
new file mode 100644
index 0000000..b18f146
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.c
@@ -0,0 +1,210 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
+
+struct uio_dmabuf_mem {
+	int dbuf_fd;
+	struct dma_buf *dbuf;
+	struct dma_buf_attachment *dbuf_attach;
+	struct sg_table *sgt;
+	enum dma_data_direction dir;
+	struct list_head list;
+};
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+		    struct mutex *dbufs_lock, void __user *user_args)
+{
+	struct uio_dmabuf_args args;
+	struct uio_dmabuf_mem *dbuf_mem;
+	struct dma_buf *dbuf;
+	struct dma_buf_attachment *dbuf_attach;
+	enum dma_data_direction dir;
+	struct sg_table *sgt;
+	long ret;
+
+	if (copy_from_user(&args, user_args, sizeof(args))) {
+		ret = -EFAULT;
+		dev_err(dev->dev.parent, "failed to copy from user\n");
+		goto err;
+	}
+
+	dbuf = dma_buf_get(args.dbuf_fd);
+	if (IS_ERR(dbuf)) {
+		dev_err(dev->dev.parent, "failed to get dmabuf\n");
+		return PTR_ERR(dbuf);
+	}
+
+	dbuf_attach = dma_buf_attach(dbuf, dev->dev.parent);
+	if (IS_ERR(dbuf_attach)) {
+		dev_err(dev->dev.parent, "failed to attach dmabuf\n");
+		ret = PTR_ERR(dbuf_attach);
+		goto err_put;
+	}
+
+	switch (args.dir) {
+	case UIO_DMABUF_DIR_BIDIR:
+		dir = DMA_BIDIRECTIONAL;
+		break;
+	case UIO_DMABUF_DIR_TO_DEV:
+		dir = DMA_TO_DEVICE;
+		break;
+	case UIO_DMABUF_DIR_FROM_DEV:
+		dir = DMA_FROM_DEVICE;
+		break;
+	default:
+		/* Not needed with check. Just here for any future change  */
+		dev_err(dev->dev.parent, "invalid direction\n");
+		ret = -EINVAL;
+		goto err_detach;
+	}
+
+	sgt = dma_buf_map_attachment(dbuf_attach, dir);
+	if (IS_ERR(sgt)) {
+		dev_err(dev->dev.parent, "failed to get dmabuf scatterlist\n");
+		ret = PTR_ERR(sgt);
+		goto err_detach;
+	}
+
+	/* Accept only contiguous one */
+	if (sgt->nents != 1) {
+		dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+		struct scatterlist *s;
+		unsigned int i;
+
+		for_each_sg(sgt->sgl, s, sgt->nents, i) {
+			if (!sg_dma_len(s))
+				continue;
+
+			if (sg_dma_address(s) != next_addr) {
+				dev_err(dev->dev.parent,
+					"dmabuf not contiguous\n");
+				ret = -EINVAL;
+				goto err_unmap;
+			}
+
+			next_addr = sg_dma_address(s) + sg_dma_len(s);
+		}
+	}
+
+	dbuf_mem = kzalloc(sizeof(*dbuf_mem), GFP_KERNEL);
+	if (!dbuf_mem) {
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+
+	dbuf_mem->dbuf_fd = args.dbuf_fd;
+	dbuf_mem->dbuf = dbuf;
+	dbuf_mem->dbuf_attach = dbuf_attach;
+	dbuf_mem->sgt = sgt;
+	dbuf_mem->dir = dir;
+	args.dma_addr = sg_dma_address(sgt->sgl);
+	args.size = dbuf->size;
+
+	if (copy_to_user(user_args, &args, sizeof(args))) {
+		ret = -EFAULT;
+		dev_err(dev->dev.parent, "failed to copy to user\n");
+		goto err_free;
+	}
+
+	mutex_lock(dbufs_lock);
+	list_add(&dbuf_mem->list, dbufs);
+	mutex_unlock(dbufs_lock);
+
+	return 0;
+
+err_free:
+	kfree(dbuf_mem);
+err_unmap:
+	dma_buf_unmap_attachment(dbuf_attach, sgt, dir);
+err_detach:
+	dma_buf_detach(dbuf, dbuf_attach);
+err_put:
+	dma_buf_put(dbuf);
+err:
+	return ret;
+}
+
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+		      struct mutex *dbufs_lock, void __user *user_args)
+
+{
+	struct uio_dmabuf_args args;
+	struct uio_dmabuf_mem *dbuf_mem;
+	long ret;
+
+	if (copy_from_user(&args, user_args, sizeof(args))) {
+		ret = -EFAULT;
+		goto err;
+	}
+
+	mutex_lock(dbufs_lock);
+	list_for_each_entry(dbuf_mem, dbufs, list) {
+		if (dbuf_mem->dbuf_fd == args.dbuf_fd)
+			break;
+	}
+
+	if (dbuf_mem->dbuf_fd != args.dbuf_fd) {
+		dev_err(dev->dev.parent, "failed to find the dmabuf (%d)\n",
+			args.dbuf_fd);
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+	list_del(&dbuf_mem->list);
+	mutex_unlock(dbufs_lock);
+
+	dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+				 dbuf_mem->dir);
+	dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+	dma_buf_put(dbuf_mem->dbuf);
+	kfree(dbuf_mem);
+
+	memset(&args, 0x0, sizeof(args));
+
+	if (copy_to_user(user_args, &args, sizeof(args))) {
+		ret = -EFAULT;
+		goto err;
+	}
+
+	return 0;
+
+err_unlock:
+	mutex_unlock(dbufs_lock);
+err:
+	return ret;
+}
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+		       struct mutex *dbufs_lock)
+{
+	struct uio_dmabuf_mem *dbuf_mem, *next;
+
+	mutex_lock(dbufs_lock);
+	list_for_each_entry_safe(dbuf_mem, next, dbufs, list) {
+		list_del(&dbuf_mem->list);
+		dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+					 dbuf_mem->dir);
+		dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+		dma_buf_put(dbuf_mem->dbuf);
+		kfree(dbuf_mem);
+	}
+	mutex_unlock(dbufs_lock);
+
+	return 0;
+}
diff --git a/drivers/uio/uio_dmabuf.h b/drivers/uio/uio_dmabuf.h
new file mode 100644
index 0000000..3020030
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.h
@@ -0,0 +1,26 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#ifndef _UIO_DMABUF_H_
+#define _UIO_DMABUF_H_
+
+struct uio_device;
+struct list_head;
+struct mutex;
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+		    struct mutex *dbufs_lock, void __user *user_args);
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+		      struct mutex *dbufs_lock, void __user *user_args);
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+		       struct mutex *dbufs_lock);
+
+#endif
diff --git a/include/uapi/linux/uio/uio.h b/include/uapi/linux/uio/uio.h
new file mode 100644
index 0000000..298bfd7
--- /dev/null
+++ b/include/uapi/linux/uio/uio.h
@@ -0,0 +1,33 @@ 
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * The header for UIO driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#ifndef _UAPI_UIO_UIO_H_
+#define _UAPI_UIO_UIO_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define UIO_DMABUF_DIR_BIDIR	1
+#define UIO_DMABUF_DIR_TO_DEV	2
+#define UIO_DMABUF_DIR_FROM_DEV	3
+#define UIO_DMABUF_DIR_NONE	4
+
+struct uio_dmabuf_args {
+	__s32	dbuf_fd;
+	__u64	dma_addr;
+	__u64	size;
+	__u32	dir;
+};
+
+#define UIO_IOC_BASE		'U'
+
+#define	UIO_IOC_MAP_DMABUF	_IOWR(UIO_IOC_BASE, 0x1, struct uio_dmabuf_args)
+#define	UIO_IOC_UNMAP_DMABUF	_IOWR(UIO_IOC_BASE, 0x2, struct uio_dmabuf_args)
+
+#endif