diff mbox

[v2,5/5] drm: GEM CMA: Add DRM PRIME support

Message ID 1370312422-25027-6-git-send-email-laurent.pinchart+renesas@ideasonboard.com (mailing list archive)
State Superseded
Headers show

Commit Message

Laurent Pinchart June 4, 2013, 2:20 a.m. UTC
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
---
 drivers/gpu/drm/drm_gem_cma_helper.c | 321 ++++++++++++++++++++++++++++++++++-
 include/drm/drm_gem_cma_helper.h     |   9 +
 2 files changed, 327 insertions(+), 3 deletions(-)

Comments

Rob Clark June 4, 2013, 9:56 p.m. UTC | #1
couple small comments, other than those it looks ok

BR,
-R

On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart
<laurent.pinchart+renesas@ideasonboard.com> wrote:
> Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
> ---
>  drivers/gpu/drm/drm_gem_cma_helper.c | 321 ++++++++++++++++++++++++++++++++++-
>  include/drm/drm_gem_cma_helper.h     |   9 +
>  2 files changed, 327 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
> index 7a4db4e..1dc2157 100644
> --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> @@ -21,6 +21,9 @@
>  #include <linux/slab.h>
>  #include <linux/mutex.h>
>  #include <linux/export.h>
> +#if CONFIG_DMA_SHARED_BUFFER
> +#include <linux/dma-buf.h>
> +#endif

I don't think we need the #if, since drm selects DMA_SHARED_BUFFER

and same for other spot below

>  #include <linux/dma-mapping.h>
>
>  #include <drm/drmP.h>
> @@ -82,6 +85,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
>                 unsigned int size)
>  {
>         struct drm_gem_cma_object *cma_obj;
> +       struct sg_table *sgt = NULL;
> +       int ret;
>
>         size = round_up(size, PAGE_SIZE);
>
> @@ -94,11 +99,29 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
>         if (!cma_obj->vaddr) {
>                 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
>                         size);
> -               drm_gem_cma_free_object(&cma_obj->base);
> -               return ERR_PTR(-ENOMEM);
> +               ret = -ENOMEM;
> +               goto error;
>         }
>
> +       sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
> +       if (sgt == NULL) {
> +               ret = -ENOMEM;
> +               goto error;
> +       }
> +
> +       ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
> +                             cma_obj->paddr, size);
> +       if (ret < 0)
> +               goto error;
> +
> +       cma_obj->sgt = sgt;
> +
>         return cma_obj;
> +
> +error:
> +       kfree(sgt);
> +       drm_gem_cma_free_object(&cma_obj->base);
> +       return ERR_PTR(ret);
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_cma_create);
>
> @@ -156,9 +179,16 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
>
>         cma_obj = to_drm_gem_cma_obj(gem_obj);
>
> -       if (cma_obj->vaddr)
> +       if (cma_obj->vaddr) {
>                 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
>                                       cma_obj->vaddr, cma_obj->paddr);
> +               if (cma_obj->sgt) {
> +                       sg_free_table(cma_obj->sgt);
> +                       kfree(cma_obj->sgt);
> +               }
> +       } else if (gem_obj->import_attach) {
> +               drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
> +       }
>
>         drm_gem_object_release(gem_obj);
>
> @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
>  #endif
> +
> +/* -----------------------------------------------------------------------------
> + * DMA-BUF
> + */
> +
> +#if CONFIG_DMA_SHARED_BUFFER
> +struct drm_gem_cma_dmabuf_attachment {
> +       struct sg_table sgt;
> +       enum dma_data_direction dir;
> +};
> +
> +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
> +                                    struct dma_buf_attachment *attach)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> +
> +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> +       if (!cma_attach)
> +               return -ENOMEM;
> +
> +       cma_attach->dir = DMA_NONE;
> +       attach->priv = cma_attach;
> +
> +       return 0;
> +}
> +
> +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> +                                     struct dma_buf_attachment *attach)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> +       struct sg_table *sgt;
> +
> +       if (cma_attach == NULL)
> +               return;
> +
> +       sgt = &cma_attach->sgt;
> +
> +       if (cma_attach->dir != DMA_NONE)
> +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> +                               cma_attach->dir);
> +
> +       sg_free_table(sgt);
> +       kfree(cma_attach);
> +       attach->priv = NULL;
> +}
> +
> +static struct sg_table *
> +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> +                      enum dma_data_direction dir)
> +{
> +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> +       struct drm_device *drm = cma_obj->base.dev;
> +       struct scatterlist *rd, *wr;
> +       struct sg_table *sgt;
> +       unsigned int i;
> +       int nents, ret;
> +
> +       DRM_DEBUG_PRIME("\n");
> +
> +       if (WARN_ON(dir == DMA_NONE))
> +               return ERR_PTR(-EINVAL);
> +
> +       /* Return the cached mapping when possible. */
> +       if (cma_attach->dir == dir)
> +               return &cma_attach->sgt;
> +
> +       /* Two mappings with different directions for the same attachment are
> +        * not allowed.
> +        */
> +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> +               return ERR_PTR(-EBUSY);
> +
> +       sgt = &cma_attach->sgt;
> +
> +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
> +       if (ret) {
> +               DRM_ERROR("failed to alloc sgt.\n");
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +       mutex_lock(&drm->struct_mutex);
> +
> +       rd = cma_obj->sgt->sgl;
> +       wr = sgt->sgl;
> +       for (i = 0; i < sgt->orig_nents; ++i) {
> +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> +               rd = sg_next(rd);
> +               wr = sg_next(wr);
> +       }
> +
> +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
> +       if (!nents) {
> +               DRM_ERROR("failed to map sgl with iommu.\n");
> +               sg_free_table(sgt);
> +               sgt = ERR_PTR(-EIO);
> +               goto done;
> +       }
> +
> +       cma_attach->dir = dir;
> +       attach->priv = cma_attach;
> +
> +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> +
> +done:
> +       mutex_unlock(&drm->struct_mutex);
> +       return sgt;
> +}
> +
> +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
> +                                    struct sg_table *sgt,
> +                                    enum dma_data_direction dir)
> +{
> +       /* Nothing to do. */

I kinda think that if you don't support multiple mappings with
different direction, that you should at least support unmap and then
let someone else map with other direction

> +}
> +
> +static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +
> +       DRM_DEBUG_PRIME("%s\n", __FILE__);
> +
> +       /*
> +        * drm_gem_cma_dmabuf_release() call means that file object's
> +        * f_count is 0 and it calls drm_gem_object_handle_unreference()
> +        * to drop the references that these values had been increased
> +        * at drm_prime_handle_to_fd()
> +        */
> +       if (cma_obj->base.export_dma_buf == dmabuf) {
> +               cma_obj->base.export_dma_buf = NULL;
> +
> +               /*
> +                * drop this gem object refcount to release allocated buffer
> +                * and resources.
> +                */
> +               drm_gem_object_unreference_unlocked(&cma_obj->base);
> +       }
> +}
> +
> +static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
> +                                           unsigned long page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
> +                                            unsigned long page_num, void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
> +                                    unsigned long page_num)
> +{
> +       /* TODO */
> +
> +       return NULL;
> +}
> +
> +static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
> +                                     unsigned long page_num, void *addr)
> +{
> +       /* TODO */
> +}
> +
> +static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
> +                                  struct vm_area_struct *vma)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +       struct drm_gem_object *gem_obj = &cma_obj->base;
> +       int ret;
> +
> +       ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
> +       if (ret < 0)
> +               return ret;
> +
> +       return drm_gem_cma_mmap_obj(cma_obj, vma);
> +}
> +
> +static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
> +{
> +       struct drm_gem_cma_object *cma_obj = dmabuf->priv;
> +
> +       return cma_obj->vaddr;
> +}
> +
> +static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
> +       .attach                 = drm_gem_cma_dmabuf_attach,
> +       .detach                 = drm_gem_cma_dmabuf_detach,
> +       .map_dma_buf            = drm_gem_cma_dmabuf_map,
> +       .unmap_dma_buf          = drm_gem_cma_dmabuf_unmap,
> +       .kmap                   = drm_gem_cma_dmabuf_kmap,
> +       .kmap_atomic            = drm_gem_cma_dmabuf_kmap_atomic,
> +       .kunmap                 = drm_gem_cma_dmabuf_kunmap,
> +       .kunmap_atomic          = drm_gem_cma_dmabuf_kunmap_atomic,
> +       .mmap                   = drm_gem_cma_dmabuf_mmap,
> +       .vmap                   = drm_gem_cma_dmabuf_vmap,
> +       .release                = drm_gem_cma_dmabuf_release,
> +};
> +
> +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
> +                                         struct drm_gem_object *obj, int flags)
> +{
> +       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
> +
> +       return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
> +                             cma_obj->base.size, flags);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
> +
> +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
> +                                                struct dma_buf *dma_buf)
> +{
> +       struct drm_gem_cma_object *cma_obj;
> +       struct dma_buf_attachment *attach;
> +       struct sg_table *sgt;
> +       int ret;
> +
> +       DRM_DEBUG_PRIME("%s\n", __FILE__);
> +
> +       /* is this one of own objects? */
> +       if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
> +               struct drm_gem_object *obj;
> +
> +               cma_obj = dma_buf->priv;
> +               obj = &cma_obj->base;
> +
> +               /* is it from our device? */
> +               if (obj->dev == drm) {
> +                       /*
> +                        * Importing dmabuf exported from out own gem increases
> +                        * refcount on gem itself instead of f_count of dmabuf.
> +                        */
> +                       drm_gem_object_reference(obj);
> +                       dma_buf_put(dma_buf);
> +                       return obj;
> +               }
> +       }
> +
> +       /* Create a CMA GEM buffer. */
> +       cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
> +       if (IS_ERR(cma_obj))
> +               return ERR_PTR(PTR_ERR(cma_obj));
> +
> +       /* Attach to the buffer and map it. Make sure the mapping is contiguous
> +        * on the device memory bus, as that's all we support.
> +        */
> +       attach = dma_buf_attach(dma_buf, drm->dev);
> +       if (IS_ERR(attach)) {
> +               ret = -EINVAL;
> +               goto error_gem_free;
> +       }
> +
> +       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
> +       if (IS_ERR_OR_NULL(sgt)) {
> +               ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
> +               goto error_buf_detach;
> +       }
> +
> +       if (sgt->nents != 1) {
> +               ret = -EINVAL;
> +               goto error_buf_unmap;
> +       }
> +
> +       cma_obj->base.import_attach = attach;
> +       cma_obj->paddr = sg_dma_address(sgt->sgl);
> +       cma_obj->sgt = sgt;
> +
> +       DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
> +                       dma_buf->size);
> +
> +       return &cma_obj->base;
> +
> +error_buf_unmap:
> +       dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
> +error_buf_detach:
> +       dma_buf_detach(dma_buf, attach);
> +error_gem_free:
> +       drm_gem_cma_free_object(&cma_obj->base);
> +       return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
> +#endif
> diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
> index 63397ce..6e17251 100644
> --- a/include/drm/drm_gem_cma_helper.h
> +++ b/include/drm/drm_gem_cma_helper.h
> @@ -4,6 +4,9 @@
>  struct drm_gem_cma_object {
>         struct drm_gem_object base;
>         dma_addr_t paddr;
> +       struct sg_table *sgt;
> +
> +       /* For objects with DMA memory allocated by GEM CMA */
>         void *vaddr;
>  };
>
> @@ -45,4 +48,10 @@ extern const struct vm_operations_struct drm_gem_cma_vm_ops;
>  void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
>  #endif
>
> +struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
> +                                         struct drm_gem_object *obj,
> +                                         int flags);
> +struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
> +                                                struct dma_buf *dma_buf);
> +
>  #endif /* __DRM_GEM_CMA_HELPER_H__ */
> --
> 1.8.1.5
>
Laurent Pinchart June 5, 2013, 1:22 a.m. UTC | #2
Hi Rob,

On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
> couple small comments, other than those it looks ok

Thanks for the review.

> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
> > Signed-off-by: Laurent Pinchart
> > <laurent.pinchart+renesas@ideasonboard.com>
> > ---
> > 
> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321 +++++++++++++++++++++++++++++-
> >  include/drm/drm_gem_cma_helper.h     |   9 +
> >  2 files changed, 327 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> > @@ -21,6 +21,9 @@
> >  #include <linux/slab.h>
> >  #include <linux/mutex.h>
> >  #include <linux/export.h>
> > +#if CONFIG_DMA_SHARED_BUFFER
> > +#include <linux/dma-buf.h>
> > +#endif
> 
> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
> 
> and same for other spot below

Indeed. Will be fixed in the next version.

> >  #include <linux/dma-mapping.h>
> >  
> >  #include <drm/drmP.h>

[snip]

> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object
> > *cma_obj, struct seq_file *m> 
> >  }
> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
> >  #endif
> > 
> > +
> > +/*
> > -------------------------------------------------------------------------
> > ---- + * DMA-BUF
> > + */
> > +
> > +#if CONFIG_DMA_SHARED_BUFFER
> > +struct drm_gem_cma_dmabuf_attachment {
> > +       struct sg_table sgt;
> > +       enum dma_data_direction dir;
> > +};
> > +
> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
> > device *dev, +                                    struct
> > dma_buf_attachment *attach) +{
> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> > +
> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> > +       if (!cma_attach)
> > +               return -ENOMEM;
> > +
> > +       cma_attach->dir = DMA_NONE;
> > +       attach->priv = cma_attach;
> > +
> > +       return 0;
> > +}
> > +
> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> > +                                     struct dma_buf_attachment *attach)
> > +{
> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> > +       struct sg_table *sgt;
> > +
> > +       if (cma_attach == NULL)
> > +               return;
> > +
> > +       sgt = &cma_attach->sgt;
> > +
> > +       if (cma_attach->dir != DMA_NONE)
> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> > +                               cma_attach->dir);
> > +
> > +       sg_free_table(sgt);
> > +       kfree(cma_attach);
> > +       attach->priv = NULL;
> > +}
> > +
> > +static struct sg_table *
> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> > +                      enum dma_data_direction dir)
> > +{
> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> > +       struct drm_device *drm = cma_obj->base.dev;
> > +       struct scatterlist *rd, *wr;
> > +       struct sg_table *sgt;
> > +       unsigned int i;
> > +       int nents, ret;
> > +
> > +       DRM_DEBUG_PRIME("\n");
> > +
> > +       if (WARN_ON(dir == DMA_NONE))
> > +               return ERR_PTR(-EINVAL);
> > +
> > +       /* Return the cached mapping when possible. */
> > +       if (cma_attach->dir == dir)
> > +               return &cma_attach->sgt;
> > +
> > +       /* Two mappings with different directions for the same attachment
> > are +        * not allowed.
> > +        */
> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> > +               return ERR_PTR(-EBUSY);
> > +
> > +       sgt = &cma_attach->sgt;
> > +
> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
> > +       if (ret) {
> > +               DRM_ERROR("failed to alloc sgt.\n");
> > +               return ERR_PTR(-ENOMEM);
> > +       }
> > +
> > +       mutex_lock(&drm->struct_mutex);
> > +
> > +       rd = cma_obj->sgt->sgl;
> > +       wr = sgt->sgl;
> > +       for (i = 0; i < sgt->orig_nents; ++i) {
> > +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> > +               rd = sg_next(rd);
> > +               wr = sg_next(wr);
> > +       }
> > +
> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
> > +       if (!nents) {
> > +               DRM_ERROR("failed to map sgl with iommu.\n");
> > +               sg_free_table(sgt);
> > +               sgt = ERR_PTR(-EIO);
> > +               goto done;
> > +       }
> > +
> > +       cma_attach->dir = dir;
> > +       attach->priv = cma_attach;
> > +
> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> > +
> > +done:
> > +       mutex_unlock(&drm->struct_mutex);
> > +       return sgt;
> > +}
> > +
> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
> > +                                    struct sg_table *sgt,
> > +                                    enum dma_data_direction dir)
> > +{
> > +       /* Nothing to do. */
> 
> I kinda think that if you don't support multiple mappings with
> different direction, that you should at least support unmap and then
> let someone else map with other direction

That would make sense, but in that case I wouldn't be able to cache the 
mapping. It would probably be better to add proper support for multiple 
mappings with different directions.

Given that the mapping is cached in the attachment, this will only be an issue 
if a driver tries to map the same attachment twice with different directions. 
Isn't that an uncommon use case that could be fixed later ? :-) I'd like to 
get this set in v3.11 if possible.

> > +}
Rob Clark June 5, 2013, 3:05 a.m. UTC | #3
On Tue, Jun 4, 2013 at 9:22 PM, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
> Hi Rob,
>
> On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
>> couple small comments, other than those it looks ok
>
> Thanks for the review.
>
>> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
>> > Signed-off-by: Laurent Pinchart
>> > <laurent.pinchart+renesas@ideasonboard.com>
>> > ---
>> >
>> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321 +++++++++++++++++++++++++++++-
>> >  include/drm/drm_gem_cma_helper.h     |   9 +
>> >  2 files changed, 327 insertions(+), 3 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
>> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
>> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
>> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
>> > @@ -21,6 +21,9 @@
>> >  #include <linux/slab.h>
>> >  #include <linux/mutex.h>
>> >  #include <linux/export.h>
>> > +#if CONFIG_DMA_SHARED_BUFFER
>> > +#include <linux/dma-buf.h>
>> > +#endif
>>
>> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
>>
>> and same for other spot below
>
> Indeed. Will be fixed in the next version.
>
>> >  #include <linux/dma-mapping.h>
>> >
>> >  #include <drm/drmP.h>
>
> [snip]
>
>> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object
>> > *cma_obj, struct seq_file *m>
>> >  }
>> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
>> >  #endif
>> >
>> > +
>> > +/*
>> > -------------------------------------------------------------------------
>> > ---- + * DMA-BUF
>> > + */
>> > +
>> > +#if CONFIG_DMA_SHARED_BUFFER
>> > +struct drm_gem_cma_dmabuf_attachment {
>> > +       struct sg_table sgt;
>> > +       enum dma_data_direction dir;
>> > +};
>> > +
>> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
>> > device *dev, +                                    struct
>> > dma_buf_attachment *attach) +{
>> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
>> > +
>> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
>> > +       if (!cma_attach)
>> > +               return -ENOMEM;
>> > +
>> > +       cma_attach->dir = DMA_NONE;
>> > +       attach->priv = cma_attach;
>> > +
>> > +       return 0;
>> > +}
>> > +
>> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
>> > +                                     struct dma_buf_attachment *attach)
>> > +{
>> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
>> > +       struct sg_table *sgt;
>> > +
>> > +       if (cma_attach == NULL)
>> > +               return;
>> > +
>> > +       sgt = &cma_attach->sgt;
>> > +
>> > +       if (cma_attach->dir != DMA_NONE)
>> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
>> > +                               cma_attach->dir);
>> > +
>> > +       sg_free_table(sgt);
>> > +       kfree(cma_attach);
>> > +       attach->priv = NULL;
>> > +}
>> > +
>> > +static struct sg_table *
>> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
>> > +                      enum dma_data_direction dir)
>> > +{
>> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
>> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
>> > +       struct drm_device *drm = cma_obj->base.dev;
>> > +       struct scatterlist *rd, *wr;
>> > +       struct sg_table *sgt;
>> > +       unsigned int i;
>> > +       int nents, ret;
>> > +
>> > +       DRM_DEBUG_PRIME("\n");
>> > +
>> > +       if (WARN_ON(dir == DMA_NONE))
>> > +               return ERR_PTR(-EINVAL);
>> > +
>> > +       /* Return the cached mapping when possible. */
>> > +       if (cma_attach->dir == dir)
>> > +               return &cma_attach->sgt;
>> > +
>> > +       /* Two mappings with different directions for the same attachment
>> > are +        * not allowed.
>> > +        */
>> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
>> > +               return ERR_PTR(-EBUSY);
>> > +
>> > +       sgt = &cma_attach->sgt;
>> > +
>> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
>> > +       if (ret) {
>> > +               DRM_ERROR("failed to alloc sgt.\n");
>> > +               return ERR_PTR(-ENOMEM);
>> > +       }
>> > +
>> > +       mutex_lock(&drm->struct_mutex);
>> > +
>> > +       rd = cma_obj->sgt->sgl;
>> > +       wr = sgt->sgl;
>> > +       for (i = 0; i < sgt->orig_nents; ++i) {
>> > +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
>> > +               rd = sg_next(rd);
>> > +               wr = sg_next(wr);
>> > +       }
>> > +
>> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
>> > +       if (!nents) {
>> > +               DRM_ERROR("failed to map sgl with iommu.\n");
>> > +               sg_free_table(sgt);
>> > +               sgt = ERR_PTR(-EIO);
>> > +               goto done;
>> > +       }
>> > +
>> > +       cma_attach->dir = dir;
>> > +       attach->priv = cma_attach;
>> > +
>> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
>> > +
>> > +done:
>> > +       mutex_unlock(&drm->struct_mutex);
>> > +       return sgt;
>> > +}
>> > +
>> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
>> > +                                    struct sg_table *sgt,
>> > +                                    enum dma_data_direction dir)
>> > +{
>> > +       /* Nothing to do. */
>>
>> I kinda think that if you don't support multiple mappings with
>> different direction, that you should at least support unmap and then
>> let someone else map with other direction
>
> That would make sense, but in that case I wouldn't be able to cache the
> mapping. It would probably be better to add proper support for multiple
> mappings with different directions.

well, you could still cache it, you'd just have to invalidate that
cache on transition in direction

> Given that the mapping is cached in the attachment, this will only be an issue
> if a driver tries to map the same attachment twice with different directions.
> Isn't that an uncommon use case that could be fixed later ? :-) I'd like to
> get this set in v3.11 if possible.

I don't feel strongly that this should block merging, vs fixing at a
(not too much later) date

BR,
-R

>> > +}
>
> --
> Regards,
>
> Laurent Pinchart
>
Daniel Vetter June 5, 2013, 8:44 a.m. UTC | #4
On Tue, Jun 04, 2013 at 11:05:36PM -0400, Rob Clark wrote:
> On Tue, Jun 4, 2013 at 9:22 PM, Laurent Pinchart
> <laurent.pinchart@ideasonboard.com> wrote:
> > Hi Rob,
> >
> > On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
> >> couple small comments, other than those it looks ok
> >
> > Thanks for the review.
> >
> >> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
> >> > Signed-off-by: Laurent Pinchart
> >> > <laurent.pinchart+renesas@ideasonboard.com>
> >> > ---
> >> >
> >> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321 +++++++++++++++++++++++++++++-
> >> >  include/drm/drm_gem_cma_helper.h     |   9 +
> >> >  2 files changed, 327 insertions(+), 3 deletions(-)
> >> >
> >> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
> >> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> >> > @@ -21,6 +21,9 @@
> >> >  #include <linux/slab.h>
> >> >  #include <linux/mutex.h>
> >> >  #include <linux/export.h>
> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> > +#include <linux/dma-buf.h>
> >> > +#endif
> >>
> >> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
> >>
> >> and same for other spot below
> >
> > Indeed. Will be fixed in the next version.
> >
> >> >  #include <linux/dma-mapping.h>
> >> >
> >> >  #include <drm/drmP.h>
> >
> > [snip]
> >
> >> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object
> >> > *cma_obj, struct seq_file *m>
> >> >  }
> >> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
> >> >  #endif
> >> >
> >> > +
> >> > +/*
> >> > -------------------------------------------------------------------------
> >> > ---- + * DMA-BUF
> >> > + */
> >> > +
> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> > +struct drm_gem_cma_dmabuf_attachment {
> >> > +       struct sg_table sgt;
> >> > +       enum dma_data_direction dir;
> >> > +};
> >> > +
> >> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
> >> > device *dev, +                                    struct
> >> > dma_buf_attachment *attach) +{
> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> >> > +
> >> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> >> > +       if (!cma_attach)
> >> > +               return -ENOMEM;
> >> > +
> >> > +       cma_attach->dir = DMA_NONE;
> >> > +       attach->priv = cma_attach;
> >> > +
> >> > +       return 0;
> >> > +}
> >> > +
> >> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> >> > +                                     struct dma_buf_attachment *attach)
> >> > +{
> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> >> > +       struct sg_table *sgt;
> >> > +
> >> > +       if (cma_attach == NULL)
> >> > +               return;
> >> > +
> >> > +       sgt = &cma_attach->sgt;
> >> > +
> >> > +       if (cma_attach->dir != DMA_NONE)
> >> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> >> > +                               cma_attach->dir);
> >> > +
> >> > +       sg_free_table(sgt);
> >> > +       kfree(cma_attach);
> >> > +       attach->priv = NULL;
> >> > +}
> >> > +
> >> > +static struct sg_table *
> >> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> >> > +                      enum dma_data_direction dir)
> >> > +{
> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
> >> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> >> > +       struct drm_device *drm = cma_obj->base.dev;
> >> > +       struct scatterlist *rd, *wr;
> >> > +       struct sg_table *sgt;
> >> > +       unsigned int i;
> >> > +       int nents, ret;
> >> > +
> >> > +       DRM_DEBUG_PRIME("\n");
> >> > +
> >> > +       if (WARN_ON(dir == DMA_NONE))
> >> > +               return ERR_PTR(-EINVAL);
> >> > +
> >> > +       /* Return the cached mapping when possible. */
> >> > +       if (cma_attach->dir == dir)
> >> > +               return &cma_attach->sgt;
> >> > +
> >> > +       /* Two mappings with different directions for the same attachment
> >> > are +        * not allowed.
> >> > +        */
> >> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> >> > +               return ERR_PTR(-EBUSY);
> >> > +
> >> > +       sgt = &cma_attach->sgt;
> >> > +
> >> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
> >> > +       if (ret) {
> >> > +               DRM_ERROR("failed to alloc sgt.\n");
> >> > +               return ERR_PTR(-ENOMEM);
> >> > +       }
> >> > +
> >> > +       mutex_lock(&drm->struct_mutex);
> >> > +
> >> > +       rd = cma_obj->sgt->sgl;
> >> > +       wr = sgt->sgl;
> >> > +       for (i = 0; i < sgt->orig_nents; ++i) {
> >> > +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> >> > +               rd = sg_next(rd);
> >> > +               wr = sg_next(wr);
> >> > +       }
> >> > +
> >> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
> >> > +       if (!nents) {
> >> > +               DRM_ERROR("failed to map sgl with iommu.\n");
> >> > +               sg_free_table(sgt);
> >> > +               sgt = ERR_PTR(-EIO);
> >> > +               goto done;
> >> > +       }
> >> > +
> >> > +       cma_attach->dir = dir;
> >> > +       attach->priv = cma_attach;
> >> > +
> >> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> >> > +
> >> > +done:
> >> > +       mutex_unlock(&drm->struct_mutex);
> >> > +       return sgt;
> >> > +}
> >> > +
> >> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
> >> > +                                    struct sg_table *sgt,
> >> > +                                    enum dma_data_direction dir)
> >> > +{
> >> > +       /* Nothing to do. */
> >>
> >> I kinda think that if you don't support multiple mappings with
> >> different direction, that you should at least support unmap and then
> >> let someone else map with other direction
> >
> > That would make sense, but in that case I wouldn't be able to cache the
> > mapping. It would probably be better to add proper support for multiple
> > mappings with different directions.
> 
> well, you could still cache it, you'd just have to invalidate that
> cache on transition in direction
> 
> > Given that the mapping is cached in the attachment, this will only be an issue
> > if a driver tries to map the same attachment twice with different directions.
> > Isn't that an uncommon use case that could be fixed later ? :-) I'd like to
> > get this set in v3.11 if possible.
> 
> I don't feel strongly that this should block merging, vs fixing at a
> (not too much later) date

I'm not sure whether we should even allow this at all. If an importer
wants to switch dma directions he should probably map it bidirectional and
we should finally bite the bullet and add a attachment_sync callback to
flush caches without dropping the mapping on the floor ...

Tbh I'm not even sure whether multiple mappings make any sense at all for
one attachment. Imo that sounds like the importer seriously lost track of
things ;-)
-Daniel
Rob Clark June 5, 2013, 11 a.m. UTC | #5
On Wed, Jun 5, 2013 at 4:44 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Tue, Jun 04, 2013 at 11:05:36PM -0400, Rob Clark wrote:
>> On Tue, Jun 4, 2013 at 9:22 PM, Laurent Pinchart
>> <laurent.pinchart@ideasonboard.com> wrote:
>> > Hi Rob,
>> >
>> > On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
>> >> couple small comments, other than those it looks ok
>> >
>> > Thanks for the review.
>> >
>> >> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
>> >> > Signed-off-by: Laurent Pinchart
>> >> > <laurent.pinchart+renesas@ideasonboard.com>
>> >> > ---
>> >> >
>> >> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321 +++++++++++++++++++++++++++++-
>> >> >  include/drm/drm_gem_cma_helper.h     |   9 +
>> >> >  2 files changed, 327 insertions(+), 3 deletions(-)
>> >> >
>> >> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
>> >> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
>> >> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
>> >> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
>> >> > @@ -21,6 +21,9 @@
>> >> >  #include <linux/slab.h>
>> >> >  #include <linux/mutex.h>
>> >> >  #include <linux/export.h>
>> >> > +#if CONFIG_DMA_SHARED_BUFFER
>> >> > +#include <linux/dma-buf.h>
>> >> > +#endif
>> >>
>> >> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
>> >>
>> >> and same for other spot below
>> >
>> > Indeed. Will be fixed in the next version.
>> >
>> >> >  #include <linux/dma-mapping.h>
>> >> >
>> >> >  #include <drm/drmP.h>
>> >
>> > [snip]
>> >
>> >> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct drm_gem_cma_object
>> >> > *cma_obj, struct seq_file *m>
>> >> >  }
>> >> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
>> >> >  #endif
>> >> >
>> >> > +
>> >> > +/*
>> >> > -------------------------------------------------------------------------
>> >> > ---- + * DMA-BUF
>> >> > + */
>> >> > +
>> >> > +#if CONFIG_DMA_SHARED_BUFFER
>> >> > +struct drm_gem_cma_dmabuf_attachment {
>> >> > +       struct sg_table sgt;
>> >> > +       enum dma_data_direction dir;
>> >> > +};
>> >> > +
>> >> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
>> >> > device *dev, +                                    struct
>> >> > dma_buf_attachment *attach) +{
>> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
>> >> > +
>> >> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
>> >> > +       if (!cma_attach)
>> >> > +               return -ENOMEM;
>> >> > +
>> >> > +       cma_attach->dir = DMA_NONE;
>> >> > +       attach->priv = cma_attach;
>> >> > +
>> >> > +       return 0;
>> >> > +}
>> >> > +
>> >> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
>> >> > +                                     struct dma_buf_attachment *attach)
>> >> > +{
>> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
>> >> > +       struct sg_table *sgt;
>> >> > +
>> >> > +       if (cma_attach == NULL)
>> >> > +               return;
>> >> > +
>> >> > +       sgt = &cma_attach->sgt;
>> >> > +
>> >> > +       if (cma_attach->dir != DMA_NONE)
>> >> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
>> >> > +                               cma_attach->dir);
>> >> > +
>> >> > +       sg_free_table(sgt);
>> >> > +       kfree(cma_attach);
>> >> > +       attach->priv = NULL;
>> >> > +}
>> >> > +
>> >> > +static struct sg_table *
>> >> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
>> >> > +                      enum dma_data_direction dir)
>> >> > +{
>> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
>> >> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
>> >> > +       struct drm_device *drm = cma_obj->base.dev;
>> >> > +       struct scatterlist *rd, *wr;
>> >> > +       struct sg_table *sgt;
>> >> > +       unsigned int i;
>> >> > +       int nents, ret;
>> >> > +
>> >> > +       DRM_DEBUG_PRIME("\n");
>> >> > +
>> >> > +       if (WARN_ON(dir == DMA_NONE))
>> >> > +               return ERR_PTR(-EINVAL);
>> >> > +
>> >> > +       /* Return the cached mapping when possible. */
>> >> > +       if (cma_attach->dir == dir)
>> >> > +               return &cma_attach->sgt;
>> >> > +
>> >> > +       /* Two mappings with different directions for the same attachment
>> >> > are +        * not allowed.
>> >> > +        */
>> >> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
>> >> > +               return ERR_PTR(-EBUSY);
>> >> > +
>> >> > +       sgt = &cma_attach->sgt;
>> >> > +
>> >> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
>> >> > +       if (ret) {
>> >> > +               DRM_ERROR("failed to alloc sgt.\n");
>> >> > +               return ERR_PTR(-ENOMEM);
>> >> > +       }
>> >> > +
>> >> > +       mutex_lock(&drm->struct_mutex);
>> >> > +
>> >> > +       rd = cma_obj->sgt->sgl;
>> >> > +       wr = sgt->sgl;
>> >> > +       for (i = 0; i < sgt->orig_nents; ++i) {
>> >> > +               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
>> >> > +               rd = sg_next(rd);
>> >> > +               wr = sg_next(wr);
>> >> > +       }
>> >> > +
>> >> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
>> >> > +       if (!nents) {
>> >> > +               DRM_ERROR("failed to map sgl with iommu.\n");
>> >> > +               sg_free_table(sgt);
>> >> > +               sgt = ERR_PTR(-EIO);
>> >> > +               goto done;
>> >> > +       }
>> >> > +
>> >> > +       cma_attach->dir = dir;
>> >> > +       attach->priv = cma_attach;
>> >> > +
>> >> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
>> >> > +
>> >> > +done:
>> >> > +       mutex_unlock(&drm->struct_mutex);
>> >> > +       return sgt;
>> >> > +}
>> >> > +
>> >> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
>> >> > +                                    struct sg_table *sgt,
>> >> > +                                    enum dma_data_direction dir)
>> >> > +{
>> >> > +       /* Nothing to do. */
>> >>
>> >> I kinda think that if you don't support multiple mappings with
>> >> different direction, that you should at least support unmap and then
>> >> let someone else map with other direction
>> >
>> > That would make sense, but in that case I wouldn't be able to cache the
>> > mapping. It would probably be better to add proper support for multiple
>> > mappings with different directions.
>>
>> well, you could still cache it, you'd just have to invalidate that
>> cache on transition in direction
>>
>> > Given that the mapping is cached in the attachment, this will only be an issue
>> > if a driver tries to map the same attachment twice with different directions.
>> > Isn't that an uncommon use case that could be fixed later ? :-) I'd like to
>> > get this set in v3.11 if possible.
>>
>> I don't feel strongly that this should block merging, vs fixing at a
>> (not too much later) date
>
> I'm not sure whether we should even allow this at all. If an importer
> wants to switch dma directions he should probably map it bidirectional and
> we should finally bite the bullet and add a attachment_sync callback to
> flush caches without dropping the mapping on the floor ...
>
> Tbh I'm not even sure whether multiple mappings make any sense at all for
> one attachment. Imo that sounds like the importer seriously lost track of
> things ;-)

oh, good point.. I was thinking more the case of multiple importers,
but you are right, there would be multiple attachments in that case so
this wouldn't be the problem

BR,
-R

> -Daniel
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
Laurent Pinchart June 7, 2013, 7:25 p.m. UTC | #6
On Wednesday 05 June 2013 07:00:45 Rob Clark wrote:
> On Wed, Jun 5, 2013 at 4:44 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
> > On Tue, Jun 04, 2013 at 11:05:36PM -0400, Rob Clark wrote:
> >> On Tue, Jun 4, 2013 at 9:22 PM, Laurent Pinchart wrote:
> >> > On Tuesday 04 June 2013 17:56:36 Rob Clark wrote:
> >> >> couple small comments, other than those it looks ok
> >> > 
> >> > Thanks for the review.
> >> > 
> >> >> On Mon, Jun 3, 2013 at 10:20 PM, Laurent Pinchart wrote:
> >> >> > Signed-off-by: Laurent Pinchart
> >> >> > <laurent.pinchart+renesas@ideasonboard.com>
> >> >> > ---
> >> >> > 
> >> >> >  drivers/gpu/drm/drm_gem_cma_helper.c | 321
> >> >> >  +++++++++++++++++++++++++++++-
> >> >> >  include/drm/drm_gem_cma_helper.h     |   9 +
> >> >> >  2 files changed, 327 insertions(+), 3 deletions(-)
> >> >> > 
> >> >> > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > b/drivers/gpu/drm/drm_gem_cma_helper.c index 7a4db4e..1dc2157 100644
> >> >> > --- a/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
> >> >> > @@ -21,6 +21,9 @@
> >> >> > 
> >> >> >  #include <linux/slab.h>
> >> >> >  #include <linux/mutex.h>
> >> >> >  #include <linux/export.h>
> >> >> > 
> >> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> >> > +#include <linux/dma-buf.h>
> >> >> > +#endif
> >> >> 
> >> >> I don't think we need the #if, since drm selects DMA_SHARED_BUFFER
> >> >> 
> >> >> and same for other spot below
> >> > 
> >> > Indeed. Will be fixed in the next version.
> >> > 
> >> >> >  #include <linux/dma-mapping.h>
> >> >> >  
> >> >> >  #include <drm/drmP.h>
> >> > 
> >> > [snip]
> >> > 
> >> >> > @@ -291,3 +321,288 @@ void drm_gem_cma_describe(struct
> >> >> > drm_gem_cma_object
> >> >> > *cma_obj, struct seq_file *m>
> >> >> > 
> >> >> >  }
> >> >> >  EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
> >> >> >  #endif
> >> >> > 
> >> >> > +
> >> >> > +/*
> >> >> > --------------------------------------------------------------------
> >> >> > -----
> >> >> > ---- + * DMA-BUF
> >> >> > + */
> >> >> > +
> >> >> > +#if CONFIG_DMA_SHARED_BUFFER
> >> >> > +struct drm_gem_cma_dmabuf_attachment {
> >> >> > +       struct sg_table sgt;
> >> >> > +       enum dma_data_direction dir;
> >> >> > +};
> >> >> > +
> >> >> > +static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct
> >> >> > device *dev, +                                    struct
> >> >> > dma_buf_attachment *attach) +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach;
> >> >> > +
> >> >> > +       cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
> >> >> > +       if (!cma_attach)
> >> >> > +               return -ENOMEM;
> >> >> > +
> >> >> > +       cma_attach->dir = DMA_NONE;
> >> >> > +       attach->priv = cma_attach;
> >> >> > +
> >> >> > +       return 0;
> >> >> > +}
> >> >> > +
> >> >> > +static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
> >> >> > +                                     struct dma_buf_attachment
> >> >> > *attach)
> >> >> > +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach =
> >> >> > attach->priv;
> >> >> > +       struct sg_table *sgt;
> >> >> > +
> >> >> > +       if (cma_attach == NULL)
> >> >> > +               return;
> >> >> > +
> >> >> > +       sgt = &cma_attach->sgt;
> >> >> > +
> >> >> > +       if (cma_attach->dir != DMA_NONE)
> >> >> > +               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
> >> >> > +                               cma_attach->dir);
> >> >> > +
> >> >> > +       sg_free_table(sgt);
> >> >> > +       kfree(cma_attach);
> >> >> > +       attach->priv = NULL;
> >> >> > +}
> >> >> > +
> >> >> > +static struct sg_table *
> >> >> > +drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
> >> >> > +                      enum dma_data_direction dir)
> >> >> > +{
> >> >> > +       struct drm_gem_cma_dmabuf_attachment *cma_attach =
> >> >> > attach->priv;
> >> >> > +       struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
> >> >> > +       struct drm_device *drm = cma_obj->base.dev;
> >> >> > +       struct scatterlist *rd, *wr;
> >> >> > +       struct sg_table *sgt;
> >> >> > +       unsigned int i;
> >> >> > +       int nents, ret;
> >> >> > +
> >> >> > +       DRM_DEBUG_PRIME("\n");
> >> >> > +
> >> >> > +       if (WARN_ON(dir == DMA_NONE))
> >> >> > +               return ERR_PTR(-EINVAL);
> >> >> > +
> >> >> > +       /* Return the cached mapping when possible. */
> >> >> > +       if (cma_attach->dir == dir)
> >> >> > +               return &cma_attach->sgt;
> >> >> > +
> >> >> > +       /* Two mappings with different directions for the same
> >> >> > attachment
> >> >> > are +        * not allowed.
> >> >> > +        */
> >> >> > +       if (WARN_ON(cma_attach->dir != DMA_NONE))
> >> >> > +               return ERR_PTR(-EBUSY);
> >> >> > +
> >> >> > +       sgt = &cma_attach->sgt;
> >> >> > +
> >> >> > +       ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents,
> >> >> > GFP_KERNEL);
> >> >> > +       if (ret) {
> >> >> > +               DRM_ERROR("failed to alloc sgt.\n");
> >> >> > +               return ERR_PTR(-ENOMEM);
> >> >> > +       }
> >> >> > +
> >> >> > +       mutex_lock(&drm->struct_mutex);
> >> >> > +
> >> >> > +       rd = cma_obj->sgt->sgl;
> >> >> > +       wr = sgt->sgl;
> >> >> > +       for (i = 0; i < sgt->orig_nents; ++i) {
> >> >> > +               sg_set_page(wr, sg_page(rd), rd->length,
> >> >> > rd->offset);
> >> >> > +               rd = sg_next(rd);
> >> >> > +               wr = sg_next(wr);
> >> >> > +       }
> >> >> > +
> >> >> > +       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents,
> >> >> > dir);
> >> >> > +       if (!nents) {
> >> >> > +               DRM_ERROR("failed to map sgl with iommu.\n");
> >> >> > +               sg_free_table(sgt);
> >> >> > +               sgt = ERR_PTR(-EIO);
> >> >> > +               goto done;
> >> >> > +       }
> >> >> > +
> >> >> > +       cma_attach->dir = dir;
> >> >> > +       attach->priv = cma_attach;
> >> >> > +
> >> >> > +       DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
> >> >> > +
> >> >> > +done:
> >> >> > +       mutex_unlock(&drm->struct_mutex);
> >> >> > +       return sgt;
> >> >> > +}
> >> >> > +
> >> >> > +static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment
> >> >> > *attach,
> >> >> > +                                    struct sg_table *sgt,
> >> >> > +                                    enum dma_data_direction dir)
> >> >> > +{
> >> >> > +       /* Nothing to do. */
> >> >> 
> >> >> I kinda think that if you don't support multiple mappings with
> >> >> different direction, that you should at least support unmap and then
> >> >> let someone else map with other direction
> >> > 
> >> > That would make sense, but in that case I wouldn't be able to cache the
> >> > mapping. It would probably be better to add proper support for multiple
> >> > mappings with different directions.
> >> 
> >> well, you could still cache it, you'd just have to invalidate that
> >> cache on transition in direction
> >> 
> >> > Given that the mapping is cached in the attachment, this will only be
> >> > an issue if a driver tries to map the same attachment twice with
> >> > different directions. Isn't that an uncommon use case that could be
> >> > fixed later ? :-) I'd like to get this set in v3.11 if possible.
> >> 
> >> I don't feel strongly that this should block merging, vs fixing at a
> >> (not too much later) date
> > 
> > I'm not sure whether we should even allow this at all. If an importer
> > wants to switch dma directions he should probably map it bidirectional and
> > we should finally bite the bullet and add a attachment_sync callback to
> > flush caches without dropping the mapping on the floor ...
> > 
> > Tbh I'm not even sure whether multiple mappings make any sense at all for
> > one attachment. Imo that sounds like the importer seriously lost track of
> > things ;-)
> 
> oh, good point.. I was thinking more the case of multiple importers,
> but you are right, there would be multiple attachments in that case so
> this wouldn't be the problem

Problem solved, great :-) I've posted v3.
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 7a4db4e..1dc2157 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -21,6 +21,9 @@ 
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/export.h>
+#if CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif
 #include <linux/dma-mapping.h>
 
 #include <drm/drmP.h>
@@ -82,6 +85,8 @@  struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
 		unsigned int size)
 {
 	struct drm_gem_cma_object *cma_obj;
+	struct sg_table *sgt = NULL;
+	int ret;
 
 	size = round_up(size, PAGE_SIZE);
 
@@ -94,11 +99,29 @@  struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
 	if (!cma_obj->vaddr) {
 		dev_err(drm->dev, "failed to allocate buffer with size %d\n",
 			size);
-		drm_gem_cma_free_object(&cma_obj->base);
-		return ERR_PTR(-ENOMEM);
+		ret = -ENOMEM;
+		goto error;
 	}
 
+	sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
+	if (sgt == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
+			      cma_obj->paddr, size);
+	if (ret < 0)
+		goto error;
+
+	cma_obj->sgt = sgt;
+
 	return cma_obj;
+
+error:
+	kfree(sgt);
+	drm_gem_cma_free_object(&cma_obj->base);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
 
@@ -156,9 +179,16 @@  void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 
 	cma_obj = to_drm_gem_cma_obj(gem_obj);
 
-	if (cma_obj->vaddr)
+	if (cma_obj->vaddr) {
 		dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
 				      cma_obj->vaddr, cma_obj->paddr);
+		if (cma_obj->sgt) {
+			sg_free_table(cma_obj->sgt);
+			kfree(cma_obj->sgt);
+		}
+	} else if (gem_obj->import_attach) {
+		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+	}
 
 	drm_gem_object_release(gem_obj);
 
@@ -291,3 +321,288 @@  void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
 #endif
+
+/* -----------------------------------------------------------------------------
+ * DMA-BUF
+ */
+
+#if CONFIG_DMA_SHARED_BUFFER
+struct drm_gem_cma_dmabuf_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+};
+
+static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
+				     struct dma_buf_attachment *attach)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach;
+
+	cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
+	if (!cma_attach)
+		return -ENOMEM;
+
+	cma_attach->dir = DMA_NONE;
+	attach->priv = cma_attach;
+
+	return 0;
+}
+
+static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
+				      struct dma_buf_attachment *attach)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
+	struct sg_table *sgt;
+
+	if (cma_attach == NULL)
+		return;
+
+	sgt = &cma_attach->sgt;
+
+	if (cma_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+				cma_attach->dir);
+
+	sg_free_table(sgt);
+	kfree(cma_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *
+drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
+		       enum dma_data_direction dir)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
+	struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
+	struct drm_device *drm = cma_obj->base.dev;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt;
+	unsigned int i;
+	int nents, ret;
+
+	DRM_DEBUG_PRIME("\n");
+
+	if (WARN_ON(dir == DMA_NONE))
+		return ERR_PTR(-EINVAL);
+
+	/* Return the cached mapping when possible. */
+	if (cma_attach->dir == dir)
+		return &cma_attach->sgt;
+
+	/* Two mappings with different directions for the same attachment are
+	 * not allowed.
+	 */
+	if (WARN_ON(cma_attach->dir != DMA_NONE))
+		return ERR_PTR(-EBUSY);
+
+	sgt = &cma_attach->sgt;
+
+	ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&drm->struct_mutex);
+
+	rd = cma_obj->sgt->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	if (!nents) {
+		DRM_ERROR("failed to map sgl with iommu.\n");
+		sg_free_table(sgt);
+		sgt = ERR_PTR(-EIO);
+		goto done;
+	}
+
+	cma_attach->dir = dir;
+	attach->priv = cma_attach;
+
+	DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
+
+done:
+	mutex_unlock(&drm->struct_mutex);
+	return sgt;
+}
+
+static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
+				     struct sg_table *sgt,
+				     enum dma_data_direction dir)
+{
+	/* Nothing to do. */
+}
+
+static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/*
+	 * drm_gem_cma_dmabuf_release() call means that file object's
+	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
+	 * to drop the references that these values had been increased
+	 * at drm_prime_handle_to_fd()
+	 */
+	if (cma_obj->base.export_dma_buf == dmabuf) {
+		cma_obj->base.export_dma_buf = NULL;
+
+		/*
+		 * drop this gem object refcount to release allocated buffer
+		 * and resources.
+		 */
+		drm_gem_object_unreference_unlocked(&cma_obj->base);
+	}
+}
+
+static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
+					    unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
+					     unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
+				     unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
+				      unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
+				   struct vm_area_struct *vma)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+	struct drm_gem_object *gem_obj = &cma_obj->base;
+	int ret;
+
+	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
+	if (ret < 0)
+		return ret;
+
+	return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+
+static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+
+	return cma_obj->vaddr;
+}
+
+static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
+	.attach			= drm_gem_cma_dmabuf_attach,
+	.detach			= drm_gem_cma_dmabuf_detach,
+	.map_dma_buf		= drm_gem_cma_dmabuf_map,
+	.unmap_dma_buf		= drm_gem_cma_dmabuf_unmap,
+	.kmap			= drm_gem_cma_dmabuf_kmap,
+	.kmap_atomic		= drm_gem_cma_dmabuf_kmap_atomic,
+	.kunmap			= drm_gem_cma_dmabuf_kunmap,
+	.kunmap_atomic		= drm_gem_cma_dmabuf_kunmap_atomic,
+	.mmap			= drm_gem_cma_dmabuf_mmap,
+	.vmap			= drm_gem_cma_dmabuf_vmap,
+	.release		= drm_gem_cma_dmabuf_release,
+};
+
+struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
+					  struct drm_gem_object *obj, int flags)
+{
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
+			      cma_obj->base.size, flags);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
+
+struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
+						 struct dma_buf *dma_buf)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	int ret;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/* is this one of own objects? */
+	if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
+		struct drm_gem_object *obj;
+
+		cma_obj = dma_buf->priv;
+		obj = &cma_obj->base;
+
+		/* is it from our device? */
+		if (obj->dev == drm) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(obj);
+			dma_buf_put(dma_buf);
+			return obj;
+		}
+	}
+
+	/* Create a CMA GEM buffer. */
+	cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
+	if (IS_ERR(cma_obj))
+		return ERR_PTR(PTR_ERR(cma_obj));
+
+	/* Attach to the buffer and map it. Make sure the mapping is contiguous
+	 * on the device memory bus, as that's all we support.
+	 */
+	attach = dma_buf_attach(dma_buf, drm->dev);
+	if (IS_ERR(attach)) {
+		ret = -EINVAL;
+		goto error_gem_free;
+	}
+
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sgt)) {
+		ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
+		goto error_buf_detach;
+	}
+
+	if (sgt->nents != 1) {
+		ret = -EINVAL;
+		goto error_buf_unmap;
+	}
+
+	cma_obj->base.import_attach = attach;
+	cma_obj->paddr = sg_dma_address(sgt->sgl);
+	cma_obj->sgt = sgt;
+
+	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
+			dma_buf->size);
+
+	return &cma_obj->base;
+
+error_buf_unmap:
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+error_buf_detach:
+	dma_buf_detach(dma_buf, attach);
+error_gem_free:
+	drm_gem_cma_free_object(&cma_obj->base);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
+#endif
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 63397ce..6e17251 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,9 @@ 
 struct drm_gem_cma_object {
 	struct drm_gem_object base;
 	dma_addr_t paddr;
+	struct sg_table *sgt;
+
+	/* For objects with DMA memory allocated by GEM CMA */
 	void *vaddr;
 };
 
@@ -45,4 +48,10 @@  extern const struct vm_operations_struct drm_gem_cma_vm_ops;
 void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
 #endif
 
+struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
+					  struct drm_gem_object *obj,
+					  int flags);
+struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
+						 struct dma_buf *dma_buf);
+
 #endif /* __DRM_GEM_CMA_HELPER_H__ */