diff mbox

[2/3] drm/vgem: Enable dmabuf interface for export

Message ID 1466692534-28303-2-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 23, 2016, 2:35 p.m. UTC
Enable the standard GEM dma-buf interface provided by the DRM core, but
only for exporting the VGEM object. This allows passing around the VGEM
objects created from the dumb interface and using them as sources
elsewhere. Creating a VGEM object for a foriegn handle is not supported.

v2: With additional completeness.
v3: Need to clear the CPU cache upon exporting the dma-addresses.
v4: Use drm_gem_put_pages() as well.

Testcase: igt/vgem_basic/dmabuf-*
Testcase: igt/prime_vgem
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Zach Reizner <zachr@google.com>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 104 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 103 insertions(+), 1 deletion(-)

Comments

Matthew Auld July 1, 2016, 4:56 p.m. UTC | #1
> +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> +{
> +       long n_pages = obj->size >> PAGE_SHIFT;
> +       struct sg_table *st;
> +       struct page **pages;
> +       int ret;
> +
> +       st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> +       if (st == NULL)
> +               return ERR_PTR(-ENOMEM);
> +
> +       pages = drm_gem_get_pages(obj);
> +       if (IS_ERR(pages)) {
> +               ret = PTR_ERR(pages);
> +               goto err;
> +       }
> +
> +       ret = sg_alloc_table_from_pages(st, pages, n_pages,
> +                                       0, obj->size, GFP_KERNEL);
> +       drm_gem_put_pages(obj, pages, false, false);
> +       if (ret)
> +               goto err;
> +
> +       return st;
> +
> +err:
> +       kfree(st);
> +       return ERR_PTR(ret);
> +}
> +
Couldn't this be written more simply as:

pages = drm_gem_get_pages(obj);
if (IS_ERR(pages))
        return ERR_CAST(pages);

st = drm_prime_pages_to_sg(pages, n_pages);
drm_gem_put_pages(obj, pages, false, false);

return st;

But either way:
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Chris Wilson July 1, 2016, 8:13 p.m. UTC | #2
On Fri, Jul 01, 2016 at 05:56:25PM +0100, Matthew Auld wrote:
> > +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
> > +{
> > +       long n_pages = obj->size >> PAGE_SHIFT;
> > +       struct sg_table *st;
> > +       struct page **pages;
> > +       int ret;
> > +
> > +       st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> > +       if (st == NULL)
> > +               return ERR_PTR(-ENOMEM);
> > +
> > +       pages = drm_gem_get_pages(obj);
> > +       if (IS_ERR(pages)) {
> > +               ret = PTR_ERR(pages);
> > +               goto err;
> > +       }
> > +
> > +       ret = sg_alloc_table_from_pages(st, pages, n_pages,
> > +                                       0, obj->size, GFP_KERNEL);
> > +       drm_gem_put_pages(obj, pages, false, false);
> > +       if (ret)
> > +               goto err;
> > +
> > +       return st;
> > +
> > +err:
> > +       kfree(st);
> > +       return ERR_PTR(ret);
> > +}
> > +
> Couldn't this be written more simply as:
> 
> pages = drm_gem_get_pages(obj);
> if (IS_ERR(pages))
>         return ERR_CAST(pages);
> 
> st = drm_prime_pages_to_sg(pages, n_pages);
> drm_gem_put_pages(obj, pages, false, false);
> 
> return st;

That looks better, thanks.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c161b6d7e427..69468b5f3d82 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -192,14 +192,116 @@  static const struct file_operations vgem_driver_fops = {
 	.release	= drm_release,
 };
 
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages;
+
+	/* Flush the object from the CPU cache so that importers can rely
+	 * on coherent indirect access via the exported dma-address.
+	 */
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
+	drm_clflush_pages(pages, n_pages);
+	drm_gem_put_pages(obj, pages, true, false);
+
+	return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct sg_table *st;
+	struct page **pages;
+	int ret;
+
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto err;
+	}
+
+	ret = sg_alloc_table_from_pages(st, pages, n_pages,
+					0, obj->size, GFP_KERNEL);
+	drm_gem_put_pages(obj, pages, false, false);
+	if (ret)
+		goto err;
+
+	return st;
+
+err:
+	kfree(st);
+	return ERR_PTR(ret);
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+	long n_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages;
+	void *addr;
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages))
+		return NULL;
+
+	addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL_IO));
+	drm_gem_put_pages(obj, pages, false, false);
+
+	return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+			   struct vm_area_struct *vma)
+{
+	int ret;
+
+	if (obj->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!obj->filp)
+		return -ENODEV;
+
+	ret = obj->filp->f_op->mmap(obj->filp, vma);
+	if (ret)
+		return ret;
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(obj->filp);
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	return 0;
+}
+
 static struct drm_driver vgem_driver = {
-	.driver_features		= DRIVER_GEM,
+	.driver_features		= DRIVER_GEM | DRIVER_PRIME,
 	.gem_free_object_unlocked	= vgem_gem_free_object,
 	.gem_vm_ops			= &vgem_gem_vm_ops,
 	.ioctls				= vgem_ioctls,
 	.fops				= &vgem_driver_fops,
+
 	.dumb_create			= vgem_gem_dumb_create,
 	.dumb_map_offset		= vgem_gem_dumb_map,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_pin = vgem_prime_pin,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = vgem_prime_get_sg_table,
+	.gem_prime_vmap = vgem_prime_vmap,
+	.gem_prime_vunmap = vgem_prime_vunmap,
+	.gem_prime_mmap = vgem_prime_mmap,
+
 	.name	= DRIVER_NAME,
 	.desc	= DRIVER_DESC,
 	.date	= DRIVER_DATE,