diff mbox

[2/2] drm/udl: add support to export a handle to a FD on UDL.

Message ID 1415846033-3271-2-git-send-email-hshi@chromium.org (mailing list archive)
State New, archived
Headers show

Commit Message

Haixia Shi Nov. 13, 2014, 2:33 a.m. UTC
Only importing an FD to a handle is currently supported on UDL,
but the exporting functionality is equally useful.

Change-Id: If4983041875ebf3bd2ecf996d0771eb77b0cf1dc
Signed-off-by: Haixia Shi <hshi@chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
---
 drivers/gpu/drm/udl/Makefile     |   2 +-
 drivers/gpu/drm/udl/udl_dmabuf.c | 273 +++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/udl/udl_drv.c    |   2 +
 drivers/gpu/drm/udl/udl_drv.h    |   2 +
 drivers/gpu/drm/udl/udl_gem.c    |  71 ----------
 5 files changed, 278 insertions(+), 72 deletions(-)
 create mode 100644 drivers/gpu/drm/udl/udl_dmabuf.c

Comments

Haixia Shi Nov. 13, 2014, 2:42 a.m. UTC | #1
Sorry I forgot to remove the Change-Id lines for these 2 patches. Please
review and I'll send updated patches with the Change-Id lines removed.

On Wed, Nov 12, 2014 at 6:33 PM, Haixia Shi <hshi@chromium.org> wrote:

> Only importing an FD to a handle is currently supported on UDL,
> but the exporting functionality is equally useful.
>
> Change-Id: If4983041875ebf3bd2ecf996d0771eb77b0cf1dc
> Signed-off-by: Haixia Shi <hshi@chromium.org>
> Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
> ---
>  drivers/gpu/drm/udl/Makefile     |   2 +-
>  drivers/gpu/drm/udl/udl_dmabuf.c | 273
> +++++++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/udl/udl_drv.c    |   2 +
>  drivers/gpu/drm/udl/udl_drv.h    |   2 +
>  drivers/gpu/drm/udl/udl_gem.c    |  71 ----------
>  5 files changed, 278 insertions(+), 72 deletions(-)
>  create mode 100644 drivers/gpu/drm/udl/udl_dmabuf.c
>
> diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
> index 05c7481..195bcac 100644
> --- a/drivers/gpu/drm/udl/Makefile
> +++ b/drivers/gpu/drm/udl/Makefile
> @@ -1,6 +1,6 @@
>
>  ccflags-y := -Iinclude/drm
>
> -udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o
> udl_fb.o udl_transfer.o udl_gem.o
> +udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o
> udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
>
>  obj-$(CONFIG_DRM_UDL) := udl.o
> diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c
> b/drivers/gpu/drm/udl/udl_dmabuf.c
> new file mode 100644
> index 0000000..1d85c3a
> --- /dev/null
> +++ b/drivers/gpu/drm/udl/udl_dmabuf.c
> @@ -0,0 +1,273 @@
> +/*
> + * udl_dmabuf.c
> + *
> + * Copyright (c) 2014 The Chromium OS Authors
> + *
> + * This program is free software; you can redistribute  it and/or modify
> it
> + * under  the terms of  the GNU General  Public License as published by
> the
> + * Free Software Foundation;  either version 2 of the  License, or (at
> your
> + * option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program. If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <drm/drmP.h>
> +#include "udl_drv.h"
> +#include <linux/shmem_fs.h>
> +#include <linux/dma-buf.h>
> +
> +struct udl_drm_dmabuf_attachment {
> +       struct sg_table sgt;
> +       enum dma_data_direction dir;
> +       bool is_mapped;
> +};
> +
> +static int udl_attach_dma_buf(struct dma_buf *dmabuf,
> +                             struct device *dev,
> +                             struct dma_buf_attachment *attach)
> +{
> +       struct udl_drm_dmabuf_attachment *udl_attach;
> +
> +       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
> +                       attach->dmabuf->size);
> +
> +       udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
> +       if (!udl_attach)
> +               return -ENOMEM;
> +
> +       udl_attach->dir = DMA_NONE;
> +       attach->priv = udl_attach;
> +
> +       return 0;
> +}
> +
> +static void udl_detach_dma_buf(struct dma_buf *dmabuf,
> +                              struct dma_buf_attachment *attach)
> +{
> +       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
> +       struct sg_table *sgt;
> +
> +       if (!udl_attach)
> +               return;
> +
> +       DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
> +                       attach->dmabuf->size);
> +
> +       sgt = &udl_attach->sgt;
> +
> +       if (udl_attach->dir != DMA_NONE)
> +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> +                               udl_attach->dir);
> +
> +       sg_free_table(sgt);
> +       kfree(udl_attach);
> +       attach->priv = NULL;
> +}
> +
> +static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
> +                                       enum dma_data_direction dir)
> +{
> +       struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
> +       struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
> +       struct drm_device *dev = obj->base.dev;
> +       struct scatterlist *rd, *wr;
> +       struct sg_table *sgt = NULL;
> +       unsigned int i;
> +       int page_count;
> +       int nents, ret;
> +
> +       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n",
> dev_name(attach->dev),
> +                       attach->dmabuf->size, dir);
> +
> +       /* just return current sgt if already requested. */
> +       if (udl_attach->dir == dir && udl_attach->is_mapped)
> +               return &udl_attach->sgt;
> +
> +       if (!obj->pages) {
> +               DRM_ERROR("pages is null.\n");
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +       page_count = obj->base.size / PAGE_SIZE;
> +       obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
> +       if (!obj->sg) {
> +               DRM_ERROR("sg is null.\n");
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +       sgt = &udl_attach->sgt;
> +
> +       ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
> +       if (ret) {
> +               DRM_ERROR("failed to alloc sgt.\n");
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +       mutex_lock(&dev->struct_mutex);
> +
> +       rd = obj->sg->sgl;
> +       wr = sgt->sgl;
> +       for (i = 0; i < sgt->orig_nents; ++i) {
> +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> +               rd = sg_next(rd);
> +               wr = sg_next(wr);
> +       }
> +
> +       if (dir != DMA_NONE) {
> +               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents,
> dir);
> +               if (!nents) {
> +                       DRM_ERROR("failed to map sgl with iommu.\n");
> +                       sg_free_table(sgt);
> +                       sgt = ERR_PTR(-EIO);
> +                       goto err_unlock;
> +               }
> +       }
> +
> +       udl_attach->is_mapped = true;
> +       udl_attach->dir = dir;
> +       attach->priv = udl_attach;
> +
> +err_unlock:
> +       mutex_unlock(&dev->struct_mutex);
> +       return sgt;
> +}
> +
> +static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
> +                             struct sg_table *sgt,
> +                             enum dma_data_direction dir)
> +{
> +       /* Nothing to do. */
> +       DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n",
> dev_name(attach->dev),
> +                       attach->dmabuf->size, dir);
> +}
> +
> +static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long
> page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
> +                                   unsigned long page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
> +                             unsigned long page_num, void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
> +                                    unsigned long page_num,
> +                                    void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
> +                          struct vm_area_struct *vma)
> +{
> +       /* TODO */
> +
> +       return -EINVAL;
> +}
> +
> +static struct dma_buf_ops udl_dmabuf_ops = {
> +       .attach                 = udl_attach_dma_buf,
> +       .detach                 = udl_detach_dma_buf,
> +       .map_dma_buf            = udl_map_dma_buf,
> +       .unmap_dma_buf          = udl_unmap_dma_buf,
> +       .kmap                   = udl_dmabuf_kmap,
> +       .kmap_atomic            = udl_dmabuf_kmap_atomic,
> +       .kunmap                 = udl_dmabuf_kunmap,
> +       .kunmap_atomic          = udl_dmabuf_kunmap_atomic,
> +       .mmap                   = udl_dmabuf_mmap,
> +       .release                = drm_gem_dmabuf_release,
> +};
> +
> +struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
> +                                    struct drm_gem_object *obj, int flags)
> +{
> +       return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags,
> NULL);
> +}
> +
> +static int udl_prime_create(struct drm_device *dev,
> +                           size_t size,
> +                           struct sg_table *sg,
> +                           struct udl_gem_object **obj_p)
> +{
> +       struct udl_gem_object *obj;
> +       int npages;
> +
> +       npages = size / PAGE_SIZE;
> +
> +       *obj_p = NULL;
> +       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
> +       if (!obj)
> +               return -ENOMEM;
> +
> +       obj->sg = sg;
> +       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
> +       if (obj->pages == NULL) {
> +               DRM_ERROR("obj pages is NULL %d\n", npages);
> +               return -ENOMEM;
> +       }
> +
> +       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
> +
> +       *obj_p = obj;
> +       return 0;
> +}
> +
> +struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
> +                               struct dma_buf *dma_buf)
> +{
> +       struct dma_buf_attachment *attach;
> +       struct sg_table *sg;
> +       struct udl_gem_object *uobj;
> +       int ret;
> +
> +       /* need to attach */
> +       get_device(dev->dev);
> +       attach = dma_buf_attach(dma_buf, dev->dev);
> +       if (IS_ERR(attach)) {
> +               put_device(dev->dev);
> +               return ERR_CAST(attach);
> +       }
> +
> +       get_dma_buf(dma_buf);
> +
> +       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> +       if (IS_ERR(sg)) {
> +               ret = PTR_ERR(sg);
> +               goto fail_detach;
> +       }
> +
> +       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
> +       if (ret)
> +               goto fail_unmap;
> +
> +       uobj->base.import_attach = attach;
> +       uobj->flags = UDL_BO_WC;
> +
> +       return &uobj->base;
> +
> +fail_unmap:
> +       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
> +fail_detach:
> +       dma_buf_detach(dma_buf, attach);
> +       dma_buf_put(dma_buf);
> +       put_device(dev->dev);
> +       return ERR_PTR(ret);
> +}
> diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
> index 8607e9e..d5728ec 100644
> --- a/drivers/gpu/drm/udl/udl_drv.c
> +++ b/drivers/gpu/drm/udl/udl_drv.c
> @@ -51,7 +51,9 @@ static struct drm_driver driver = {
>         .dumb_destroy = drm_gem_dumb_destroy,
>         .fops = &udl_driver_fops,
>
> +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> +       .gem_prime_export = udl_gem_prime_export,
>         .gem_prime_import = udl_gem_prime_import,
>
>         .name = DRIVER_NAME,
> diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
> index 3082780..1b132d7 100644
> --- a/drivers/gpu/drm/udl/udl_drv.h
> +++ b/drivers/gpu/drm/udl/udl_drv.h
> @@ -124,6 +124,8 @@ int udl_gem_mmap(struct drm_file *file_priv, struct
> drm_device *dev,
>  void udl_gem_free_object(struct drm_gem_object *gem_obj);
>  struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
>                                             size_t size);
> +struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
> +                                    struct drm_gem_object *obj, int
> flags);
>  struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
>                                 struct dma_buf *dma_buf);
>
> diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
> index e00459d..692d6f2 100644
> --- a/drivers/gpu/drm/udl/udl_gem.c
> +++ b/drivers/gpu/drm/udl/udl_gem.c
> @@ -240,74 +240,3 @@ unlock:
>         mutex_unlock(&dev->struct_mutex);
>         return ret;
>  }
> -
> -static int udl_prime_create(struct drm_device *dev,
> -                           size_t size,
> -                           struct sg_table *sg,
> -                           struct udl_gem_object **obj_p)
> -{
> -       struct udl_gem_object *obj;
> -       int npages;
> -
> -       npages = size / PAGE_SIZE;
> -
> -       *obj_p = NULL;
> -       obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
> -       if (!obj)
> -               return -ENOMEM;
> -
> -       obj->sg = sg;
> -       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
> -       if (obj->pages == NULL) {
> -               DRM_ERROR("obj pages is NULL %d\n", npages);
> -               return -ENOMEM;
> -       }
> -
> -       drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
> -
> -       *obj_p = obj;
> -       return 0;
> -}
> -
> -struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
> -                               struct dma_buf *dma_buf)
> -{
> -       struct dma_buf_attachment *attach;
> -       struct sg_table *sg;
> -       struct udl_gem_object *uobj;
> -       int ret;
> -
> -       /* need to attach */
> -       get_device(dev->dev);
> -       attach = dma_buf_attach(dma_buf, dev->dev);
> -       if (IS_ERR(attach)) {
> -               put_device(dev->dev);
> -               return ERR_CAST(attach);
> -       }
> -
> -       get_dma_buf(dma_buf);
> -
> -       sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> -       if (IS_ERR(sg)) {
> -               ret = PTR_ERR(sg);
> -               goto fail_detach;
> -       }
> -
> -       ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
> -       if (ret) {
> -               goto fail_unmap;
> -       }
> -
> -       uobj->base.import_attach = attach;
> -       uobj->flags = UDL_BO_WC;
> -
> -       return &uobj->base;
> -
> -fail_unmap:
> -       dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
> -fail_detach:
> -       dma_buf_detach(dma_buf, attach);
> -       dma_buf_put(dma_buf);
> -       put_device(dev->dev);
> -       return ERR_PTR(ret);
> -}
> --
> 2.1.0.rc2.206.gedb03e5
>
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 05c7481..195bcac 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,6 +1,6 @@ 
 
 ccflags-y := -Iinclude/drm
 
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
 
 obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644
index 0000000..1d85c3a
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -0,0 +1,273 @@ 
+/*
+ * udl_dmabuf.c
+ *
+ * Copyright (c) 2014 The Chromium OS Authors
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_drm_dmabuf_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+	bool is_mapped;
+};
+
+static int udl_attach_dma_buf(struct dma_buf *dmabuf,
+			      struct device *dev,
+			      struct dma_buf_attachment *attach)
+{
+	struct udl_drm_dmabuf_attachment *udl_attach;
+
+	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+			attach->dmabuf->size);
+
+	udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
+	if (!udl_attach)
+		return -ENOMEM;
+
+	udl_attach->dir = DMA_NONE;
+	attach->priv = udl_attach;
+
+	return 0;
+}
+
+static void udl_detach_dma_buf(struct dma_buf *dmabuf,
+			       struct dma_buf_attachment *attach)
+{
+	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+	struct sg_table *sgt;
+
+	if (!udl_attach)
+		return;
+
+	DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
+			attach->dmabuf->size);
+
+	sgt = &udl_attach->sgt;
+
+	if (udl_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+				udl_attach->dir);
+
+	sg_free_table(sgt);
+	kfree(udl_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
+					enum dma_data_direction dir)
+{
+	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
+	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
+	struct drm_device *dev = obj->base.dev;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt = NULL;
+	unsigned int i;
+	int page_count;
+	int nents, ret;
+
+	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
+			attach->dmabuf->size, dir);
+
+	/* just return current sgt if already requested. */
+	if (udl_attach->dir == dir && udl_attach->is_mapped)
+		return &udl_attach->sgt;
+
+	if (!obj->pages) {
+		DRM_ERROR("pages is null.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	page_count = obj->base.size / PAGE_SIZE;
+	obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
+	if (!obj->sg) {
+		DRM_ERROR("sg is null.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sgt = &udl_attach->sgt;
+
+	ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	rd = obj->sg->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	if (dir != DMA_NONE) {
+		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+		if (!nents) {
+			DRM_ERROR("failed to map sgl with iommu.\n");
+			sg_free_table(sgt);
+			sgt = ERR_PTR(-EIO);
+			goto err_unlock;
+		}
+	}
+
+	udl_attach->is_mapped = true;
+	udl_attach->dir = dir;
+	attach->priv = udl_attach;
+
+err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return sgt;
+}
+
+static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
+			      struct sg_table *sgt,
+			      enum dma_data_direction dir)
+{
+	/* Nothing to do. */
+	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
+			attach->dmabuf->size, dir);
+}
+
+static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+				    unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
+			      unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+				     unsigned long page_num,
+				     void *addr)
+{
+	/* TODO */
+}
+
+static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
+			   struct vm_area_struct *vma)
+{
+	/* TODO */
+
+	return -EINVAL;
+}
+
+static struct dma_buf_ops udl_dmabuf_ops = {
+	.attach			= udl_attach_dma_buf,
+	.detach			= udl_detach_dma_buf,
+	.map_dma_buf		= udl_map_dma_buf,
+	.unmap_dma_buf		= udl_unmap_dma_buf,
+	.kmap			= udl_dmabuf_kmap,
+	.kmap_atomic		= udl_dmabuf_kmap_atomic,
+	.kunmap			= udl_dmabuf_kunmap,
+	.kunmap_atomic		= udl_dmabuf_kunmap_atomic,
+	.mmap			= udl_dmabuf_mmap,
+	.release		= drm_gem_dmabuf_release,
+};
+
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj, int flags)
+{
+	return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+}
+
+static int udl_prime_create(struct drm_device *dev,
+			    size_t size,
+			    struct sg_table *sg,
+			    struct udl_gem_object **obj_p)
+{
+	struct udl_gem_object *obj;
+	int npages;
+
+	npages = size / PAGE_SIZE;
+
+	*obj_p = NULL;
+	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+	if (!obj)
+		return -ENOMEM;
+
+	obj->sg = sg;
+	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (obj->pages == NULL) {
+		DRM_ERROR("obj pages is NULL %d\n", npages);
+		return -ENOMEM;
+	}
+
+	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+	*obj_p = obj;
+	return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sg;
+	struct udl_gem_object *uobj;
+	int ret;
+
+	/* need to attach */
+	get_device(dev->dev);
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach)) {
+		put_device(dev->dev);
+		return ERR_CAST(attach);
+	}
+
+	get_dma_buf(dma_buf);
+
+	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto fail_detach;
+	}
+
+	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+	if (ret)
+		goto fail_unmap;
+
+	uobj->base.import_attach = attach;
+	uobj->flags = UDL_BO_WC;
+
+	return &uobj->base;
+
+fail_unmap:
+	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+	put_device(dev->dev);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8607e9e..d5728ec 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -51,7 +51,9 @@  static struct drm_driver driver = {
 	.dumb_destroy = drm_gem_dumb_destroy,
 	.fops = &udl_driver_fops,
 
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = udl_gem_prime_export,
 	.gem_prime_import = udl_gem_prime_import,
 
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 3082780..1b132d7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -124,6 +124,8 @@  int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
 					    size_t size);
+struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj, int flags);
 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
 				struct dma_buf *dma_buf);
 
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index e00459d..692d6f2 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -240,74 +240,3 @@  unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
-
-static int udl_prime_create(struct drm_device *dev,
-			    size_t size,
-			    struct sg_table *sg,
-			    struct udl_gem_object **obj_p)
-{
-	struct udl_gem_object *obj;
-	int npages;
-
-	npages = size / PAGE_SIZE;
-
-	*obj_p = NULL;
-	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
-	if (!obj)
-		return -ENOMEM;
-
-	obj->sg = sg;
-	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
-	if (obj->pages == NULL) {
-		DRM_ERROR("obj pages is NULL %d\n", npages);
-		return -ENOMEM;
-	}
-
-	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
-
-	*obj_p = obj;
-	return 0;
-}
-
-struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sg;
-	struct udl_gem_object *uobj;
-	int ret;
-
-	/* need to attach */
-	get_device(dev->dev);
-	attach = dma_buf_attach(dma_buf, dev->dev);
-	if (IS_ERR(attach)) {
-		put_device(dev->dev);
-		return ERR_CAST(attach);
-	}
-
-	get_dma_buf(dma_buf);
-
-	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg)) {
-		ret = PTR_ERR(sg);
-		goto fail_detach;
-	}
-
-	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
-	if (ret) {
-		goto fail_unmap;
-	}
-
-	uobj->base.import_attach = attach;
-	uobj->flags = UDL_BO_WC;
-
-	return &uobj->base;
-
-fail_unmap:
-	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-	put_device(dev->dev);
-	return ERR_PTR(ret);
-}