diff mbox series

drm-buf: Add debug option

Message ID 20210115130219.3915789-1-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show
Series drm-buf: Add debug option | expand

Commit Message

Daniel Vetter Jan. 15, 2021, 1:02 p.m. UTC
We have too many people abusing the struct page they can get at but
really shouldn't in importers. Aside from that the backing page might
simply not exist (for dynamic p2p mappings) looking at it and using it
e.g. for mmap can also wreak the page handling of the exporter
completely. Importers really must go through the proper interface like
dma_buf_mmap for everything.

I'm semi-tempted to enforce this for dynamic importers since those
really have no excuse at all to break the rules.

Unfortuantely we can't store the right pointers somewhere safe to make
sure we oops on something recognizable, so best is to just wrangle
them a bit by flipping all the bits. At least on x86 kernel addresses
have all their high bits sets and the struct page array is fairly low
in the kernel mapping, so flipping all the bits gives us a very high
pointer in userspace and hence excellent chances for an invalid
dereference.

v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
fancy caching tricks, which would blow up with this address scrambling
trick here (Chris)

Enable by default when CONFIG_DMA_API_DEBUG is enabled.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: David Stevens <stevensd@chromium.org>
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
 drivers/dma-buf/Kconfig   |  8 +++++++
 drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++++++----
 include/linux/dma-buf.h   |  6 +++++
 3 files changed, 59 insertions(+), 4 deletions(-)

Comments

Christian König Jan. 15, 2021, 1:09 p.m. UTC | #1
Am 15.01.21 um 14:02 schrieb Daniel Vetter:
> We have too many people abusing the struct page they can get at but
> really shouldn't in importers. Aside from that the backing page might
> simply not exist (for dynamic p2p mappings) looking at it and using it
> e.g. for mmap can also wreak the page handling of the exporter
> completely. Importers really must go through the proper interface like
> dma_buf_mmap for everything.
>
> I'm semi-tempted to enforce this for dynamic importers since those
> really have no excuse at all to break the rules.
>
> Unfortuantely we can't store the right pointers somewhere safe to make
> sure we oops on something recognizable, so best is to just wrangle
> them a bit by flipping all the bits. At least on x86 kernel addresses
> have all their high bits sets and the struct page array is fairly low
> in the kernel mapping, so flipping all the bits gives us a very high
> pointer in userspace and hence excellent chances for an invalid
> dereference.
>
> v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
> fancy caching tricks, which would blow up with this address scrambling
> trick here (Chris)
>
> Enable by default when CONFIG_DMA_API_DEBUG is enabled.
>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Sumit Semwal <sumit.semwal@linaro.org>
> Cc: "Christian König" <christian.koenig@amd.com>
> Cc: David Stevens <stevensd@chromium.org>
> Cc: linux-media@vger.kernel.org
> Cc: linaro-mm-sig@lists.linaro.org
> ---
>   drivers/dma-buf/Kconfig   |  8 +++++++
>   drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++++++----
>   include/linux/dma-buf.h   |  6 +++++
>   3 files changed, 59 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
> index 4f8224a6ac95..4e16c71c24b7 100644
> --- a/drivers/dma-buf/Kconfig
> +++ b/drivers/dma-buf/Kconfig
> @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
>   	  This is marked experimental because we don't yet have a consistent
>   	  execution context and memory management between drivers.
>   
> +config DMABUF_DEBUG
> +	bool "DMA-BUF debug checks"
> +	default y if DMA_API_DEBUG
> +	help
> +	  This option enables additional checks for DMA-BUF importers and
> +	  exporters. Specifically it validates that importers do not peek at the
> +	  underlying struct page when they import a buffer.
> +
>   config DMABUF_SELFTESTS
>   	tristate "Selftests for the dma-buf interfaces"
>   	default n
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 1c9bd51db110..6e4725f7dfde 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -666,6 +666,30 @@ void dma_buf_put(struct dma_buf *dmabuf)
>   }
>   EXPORT_SYMBOL_GPL(dma_buf_put);
>   
> +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
> +				       enum dma_data_direction direction)
> +{
> +	struct sg_table *sg_table;
> +
> +	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> +
> +#if CONFIG_DMABUF_DEBUG
> +	if (sg_table) {
> +		int i;
> +		struct scatterlist *sg;
> +
> +		/* To catch abuse of the underlying struct page by importers mix
> +		 * up the bits, but take care to preserve the low SG_ bits to
> +		 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
> +		 * before passing the sgt back to the exporter. */
> +		for_each_sgtable_sg(sg_table, sg, i)
> +			sg->page_link ^= ~0xffUL;
> +	}
> +#endif
> +
> +	return sg_table;
> +}
> +
>   /**
>    * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
>    * @dmabuf:		[in]	buffer to attach device to.
> @@ -737,7 +761,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
>   				goto err_unlock;
>   		}
>   
> -		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
> +		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
>   		if (!sgt)
>   			sgt = ERR_PTR(-ENOMEM);
>   		if (IS_ERR(sgt)) {
> @@ -784,6 +808,23 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>   }
>   EXPORT_SYMBOL_GPL(dma_buf_attach);
>   
> +static void __unmap_dma_buf(struct dma_buf_attachment *attach,
> +			    struct sg_table *sg_table,
> +			    enum dma_data_direction direction)
> +{
> +
> +#if CONFIG_DMABUF_DEBUG
> +	if (sg_table) {
> +		int i;
> +		struct scatterlist *sg;
> +
> +		for_each_sgtable_sg(sg_table, sg, i)
> +			sg->page_link ^= ~0xffUL;
> +	}
> +#endif

Instead of duplicating this I would rather structure the code so that we 
have a helper to mangle the sgt when necessary.

This can then be called from both the map() as well as the unmap() path.

Apart from that looks good to me,
Christian.

> +	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> +}
> +
>   /**
>    * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
>    * @dmabuf:	[in]	buffer to detach from.
> @@ -802,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>   		if (dma_buf_is_dynamic(attach->dmabuf))
>   			dma_resv_lock(attach->dmabuf->resv, NULL);
>   
> -		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> +		__unmap_dma_buf(attach, attach->sgt, attach->dir);
>   
>   		if (dma_buf_is_dynamic(attach->dmabuf)) {
>   			dma_buf_unpin(attach);
> @@ -924,7 +965,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>   		}
>   	}
>   
> -	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> +	sg_table = __map_dma_buf(attach, direction);
>   	if (!sg_table)
>   		sg_table = ERR_PTR(-ENOMEM);
>   
> @@ -987,7 +1028,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>   	if (dma_buf_is_dynamic(attach->dmabuf))
>   		dma_resv_assert_held(attach->dmabuf->resv);
>   
> -	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> +	__unmap_dma_buf(attach, sg_table, direction);
>   
>   	if (dma_buf_is_dynamic(attach->dmabuf) &&
>   	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index 628681bf6c99..efdc56b9d95f 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -154,6 +154,12 @@ struct dma_buf_ops {
>   	 * On failure, returns a negative error value wrapped into a pointer.
>   	 * May also return -EINTR when a signal was received while being
>   	 * blocked.
> +	 *
> +	 * Note that exporters should not try to cache the scatter list, or
> +	 * return the same one for multiple calls. Caching is done either by the
> +	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
> +	 * of the scatter list is transferred to the caller, and returned by
> +	 * @unmap_dma_buf.
>   	 */
>   	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
>   					 enum dma_data_direction);
Daniel Vetter Jan. 15, 2021, 1:22 p.m. UTC | #2
On Fri, Jan 15, 2021 at 2:09 PM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Am 15.01.21 um 14:02 schrieb Daniel Vetter:
> > have too many people abusing the struct page they can get at but
> > really shouldn't in importers. Aside from that the backing page might
> > simply not exist (for dynamic p2p mappings) looking at it and using it
> > e.g. for mmap can also wreak the page handling of the exporter
> > completely. Importers really must go through the proper interface like
> > dma_buf_mmap for everything.
> >
> > I'm semi-tempted to enforce this for dynamic importers since those
> > really have no excuse at all to break the rules.
> >
> > Unfortuantely we can't store the right pointers somewhere safe to make
> > sure we oops on something recognizable, so best is to just wrangle
> > them a bit by flipping all the bits. At least on x86 kernel addresses
> > have all their high bits sets and the struct page array is fairly low
> > in the kernel mapping, so flipping all the bits gives us a very high
> > pointer in userspace and hence excellent chances for an invalid
> > dereference.
> >
> > v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
> > fancy caching tricks, which would blow up with this address scrambling
> > trick here (Chris)
> >
> > Enable by default when CONFIG_DMA_API_DEBUG is enabled.
> >
> > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> > Cc: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Sumit Semwal <sumit.semwal@linaro.org>
> > Cc: "Christian König" <christian.koenig@amd.com>
> > Cc: David Stevens <stevensd@chromium.org>
> > Cc: linux-media@vger.kernel.org
> > Cc: linaro-mm-sig@lists.linaro.org
> > ---
> >   drivers/dma-buf/Kconfig   |  8 +++++++
> >   drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++++++----
> >   include/linux/dma-buf.h   |  6 +++++
> >   3 files changed, 59 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
> > index 4f8224a6ac95..4e16c71c24b7 100644
> > --- a/drivers/dma-buf/Kconfig
> > +++ b/drivers/dma-buf/Kconfig
> > @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
> >         This is marked experimental because we don't yet have a consistent
> >         execution context and memory management between drivers.
> >
> > +config DMABUF_DEBUG
> > +     bool "DMA-BUF debug checks"
> > +     default y if DMA_API_DEBUG
> > +     help
> > +       This option enables additional checks for DMA-BUF importers and
> > +       exporters. Specifically it validates that importers do not peek at the
> > +       underlying struct page when they import a buffer.
> > +
> >   config DMABUF_SELFTESTS
> >       tristate "Selftests for the dma-buf interfaces"
> >       default n
> > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> > index 1c9bd51db110..6e4725f7dfde 100644
> > --- a/drivers/dma-buf/dma-buf.c
> > +++ b/drivers/dma-buf/dma-buf.c
> > @@ -666,6 +666,30 @@ void dma_buf_put(struct dma_buf *dmabuf)
> >   }
> >   EXPORT_SYMBOL_GPL(dma_buf_put);
> >
> > +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
> > +                                    enum dma_data_direction direction)
> > +{
> > +     struct sg_table *sg_table;
> > +
> > +     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > +
> > +#if CONFIG_DMABUF_DEBUG
> > +     if (sg_table) {
> > +             int i;
> > +             struct scatterlist *sg;
> > +
> > +             /* To catch abuse of the underlying struct page by importers mix
> > +              * up the bits, but take care to preserve the low SG_ bits to
> > +              * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
> > +              * before passing the sgt back to the exporter. */
> > +             for_each_sgtable_sg(sg_table, sg, i)
> > +                     sg->page_link ^= ~0xffUL;
> > +     }
> > +#endif
> > +
> > +     return sg_table;
> > +}
> > +
> >   /**
> >    * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
> >    * @dmabuf:         [in]    buffer to attach device to.
> > @@ -737,7 +761,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
> >                               goto err_unlock;
> >               }
> >
> > -             sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
> > +             sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
> >               if (!sgt)
> >                       sgt = ERR_PTR(-ENOMEM);
> >               if (IS_ERR(sgt)) {
> > @@ -784,6 +808,23 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
> >   }
> >   EXPORT_SYMBOL_GPL(dma_buf_attach);
> >
> > +static void __unmap_dma_buf(struct dma_buf_attachment *attach,
> > +                         struct sg_table *sg_table,
> > +                         enum dma_data_direction direction)
> > +{
> > +
> > +#if CONFIG_DMABUF_DEBUG
> > +     if (sg_table) {
> > +             int i;
> > +             struct scatterlist *sg;
> > +
> > +             for_each_sgtable_sg(sg_table, sg, i)
> > +                     sg->page_link ^= ~0xffUL;
> > +     }
> > +#endif
>
> Instead of duplicating this I would rather structure the code so that we
> have a helper to mangle the sgt when necessary.
>
> This can then be called from both the map() as well as the unmap() path.

Well that's why extracted the helper functions (it would be 4 copies
otherwise). It's true that it's still 2x the same operation, but
conceptually one of them mangles, the other unmangles the pointers.
It's just that with XOR mangling, that's both the same code.
Readability feels better that way to me, but I guess I can do another
tiny helper function extraction if you insist?
-Daniel

> Apart from that looks good to me,
> Christian.
>
> > +     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > +}
> > +
> >   /**
> >    * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
> >    * @dmabuf: [in]    buffer to detach from.
> > @@ -802,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
> >               if (dma_buf_is_dynamic(attach->dmabuf))
> >                       dma_resv_lock(attach->dmabuf->resv, NULL);
> >
> > -             dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
> > +             __unmap_dma_buf(attach, attach->sgt, attach->dir);
> >
> >               if (dma_buf_is_dynamic(attach->dmabuf)) {
> >                       dma_buf_unpin(attach);
> > @@ -924,7 +965,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
> >               }
> >       }
> >
> > -     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> > +     sg_table = __map_dma_buf(attach, direction);
> >       if (!sg_table)
> >               sg_table = ERR_PTR(-ENOMEM);
> >
> > @@ -987,7 +1028,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
> >       if (dma_buf_is_dynamic(attach->dmabuf))
> >               dma_resv_assert_held(attach->dmabuf->resv);
> >
> > -     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
> > +     __unmap_dma_buf(attach, sg_table, direction);
> >
> >       if (dma_buf_is_dynamic(attach->dmabuf) &&
> >           !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
> > diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> > index 628681bf6c99..efdc56b9d95f 100644
> > --- a/include/linux/dma-buf.h
> > +++ b/include/linux/dma-buf.h
> > @@ -154,6 +154,12 @@ struct dma_buf_ops {
> >        * On failure, returns a negative error value wrapped into a pointer.
> >        * May also return -EINTR when a signal was received while being
> >        * blocked.
> > +      *
> > +      * Note that exporters should not try to cache the scatter list, or
> > +      * return the same one for multiple calls. Caching is done either by the
> > +      * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
> > +      * of the scatter list is transferred to the caller, and returned by
> > +      * @unmap_dma_buf.
> >        */
> >       struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
> >                                        enum dma_data_direction);
>
Christian König Jan. 15, 2021, 1:24 p.m. UTC | #3
Am 15.01.21 um 14:22 schrieb Daniel Vetter:
> On Fri, Jan 15, 2021 at 2:09 PM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> Am 15.01.21 um 14:02 schrieb Daniel Vetter:
>>> have too many people abusing the struct page they can get at but
>>> really shouldn't in importers. Aside from that the backing page might
>>> simply not exist (for dynamic p2p mappings) looking at it and using it
>>> e.g. for mmap can also wreak the page handling of the exporter
>>> completely. Importers really must go through the proper interface like
>>> dma_buf_mmap for everything.
>>>
>>> I'm semi-tempted to enforce this for dynamic importers since those
>>> really have no excuse at all to break the rules.
>>>
>>> Unfortuantely we can't store the right pointers somewhere safe to make
>>> sure we oops on something recognizable, so best is to just wrangle
>>> them a bit by flipping all the bits. At least on x86 kernel addresses
>>> have all their high bits sets and the struct page array is fairly low
>>> in the kernel mapping, so flipping all the bits gives us a very high
>>> pointer in userspace and hence excellent chances for an invalid
>>> dereference.
>>>
>>> v2: Add a note to the @map_dma_buf hook that exporters shouldn't do
>>> fancy caching tricks, which would blow up with this address scrambling
>>> trick here (Chris)
>>>
>>> Enable by default when CONFIG_DMA_API_DEBUG is enabled.
>>>
>>> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
>>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Sumit Semwal <sumit.semwal@linaro.org>
>>> Cc: "Christian König" <christian.koenig@amd.com>
>>> Cc: David Stevens <stevensd@chromium.org>
>>> Cc: linux-media@vger.kernel.org
>>> Cc: linaro-mm-sig@lists.linaro.org
>>> ---
>>>    drivers/dma-buf/Kconfig   |  8 +++++++
>>>    drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++++++----
>>>    include/linux/dma-buf.h   |  6 +++++
>>>    3 files changed, 59 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
>>> index 4f8224a6ac95..4e16c71c24b7 100644
>>> --- a/drivers/dma-buf/Kconfig
>>> +++ b/drivers/dma-buf/Kconfig
>>> @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY
>>>          This is marked experimental because we don't yet have a consistent
>>>          execution context and memory management between drivers.
>>>
>>> +config DMABUF_DEBUG
>>> +     bool "DMA-BUF debug checks"
>>> +     default y if DMA_API_DEBUG
>>> +     help
>>> +       This option enables additional checks for DMA-BUF importers and
>>> +       exporters. Specifically it validates that importers do not peek at the
>>> +       underlying struct page when they import a buffer.
>>> +
>>>    config DMABUF_SELFTESTS
>>>        tristate "Selftests for the dma-buf interfaces"
>>>        default n
>>> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
>>> index 1c9bd51db110..6e4725f7dfde 100644
>>> --- a/drivers/dma-buf/dma-buf.c
>>> +++ b/drivers/dma-buf/dma-buf.c
>>> @@ -666,6 +666,30 @@ void dma_buf_put(struct dma_buf *dmabuf)
>>>    }
>>>    EXPORT_SYMBOL_GPL(dma_buf_put);
>>>
>>> +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
>>> +                                    enum dma_data_direction direction)
>>> +{
>>> +     struct sg_table *sg_table;
>>> +
>>> +     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>>> +
>>> +#if CONFIG_DMABUF_DEBUG
>>> +     if (sg_table) {
>>> +             int i;
>>> +             struct scatterlist *sg;
>>> +
>>> +             /* To catch abuse of the underlying struct page by importers mix
>>> +              * up the bits, but take care to preserve the low SG_ bits to
>>> +              * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
>>> +              * before passing the sgt back to the exporter. */
>>> +             for_each_sgtable_sg(sg_table, sg, i)
>>> +                     sg->page_link ^= ~0xffUL;
>>> +     }
>>> +#endif
>>> +
>>> +     return sg_table;
>>> +}
>>> +
>>>    /**
>>>     * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
>>>     * @dmabuf:         [in]    buffer to attach device to.
>>> @@ -737,7 +761,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
>>>                                goto err_unlock;
>>>                }
>>>
>>> -             sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
>>> +             sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
>>>                if (!sgt)
>>>                        sgt = ERR_PTR(-ENOMEM);
>>>                if (IS_ERR(sgt)) {
>>> @@ -784,6 +808,23 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
>>>    }
>>>    EXPORT_SYMBOL_GPL(dma_buf_attach);
>>>
>>> +static void __unmap_dma_buf(struct dma_buf_attachment *attach,
>>> +                         struct sg_table *sg_table,
>>> +                         enum dma_data_direction direction)
>>> +{
>>> +
>>> +#if CONFIG_DMABUF_DEBUG
>>> +     if (sg_table) {
>>> +             int i;
>>> +             struct scatterlist *sg;
>>> +
>>> +             for_each_sgtable_sg(sg_table, sg, i)
>>> +                     sg->page_link ^= ~0xffUL;
>>> +     }
>>> +#endif
>> Instead of duplicating this I would rather structure the code so that we
>> have a helper to mangle the sgt when necessary.
>>
>> This can then be called from both the map() as well as the unmap() path.
> Well that's why extracted the helper functions (it would be 4 copies
> otherwise). It's true that it's still 2x the same operation, but
> conceptually one of them mangles, the other unmangles the pointers.
> It's just that with XOR mangling, that's both the same code.
> Readability feels better that way to me, but I guess I can do another
> tiny helper function extraction if you insist?

I think it would be better to have only one.

And I insist that the mangle value is only once somewhere, either use 
just one function or a define/constant.

Christian.

> -Daniel
>
>> Apart from that looks good to me,
>> Christian.
>>
>>> +     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>>> +}
>>> +
>>>    /**
>>>     * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
>>>     * @dmabuf: [in]    buffer to detach from.
>>> @@ -802,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
>>>                if (dma_buf_is_dynamic(attach->dmabuf))
>>>                        dma_resv_lock(attach->dmabuf->resv, NULL);
>>>
>>> -             dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
>>> +             __unmap_dma_buf(attach, attach->sgt, attach->dir);
>>>
>>>                if (dma_buf_is_dynamic(attach->dmabuf)) {
>>>                        dma_buf_unpin(attach);
>>> @@ -924,7 +965,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
>>>                }
>>>        }
>>>
>>> -     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
>>> +     sg_table = __map_dma_buf(attach, direction);
>>>        if (!sg_table)
>>>                sg_table = ERR_PTR(-ENOMEM);
>>>
>>> @@ -987,7 +1028,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
>>>        if (dma_buf_is_dynamic(attach->dmabuf))
>>>                dma_resv_assert_held(attach->dmabuf->resv);
>>>
>>> -     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
>>> +     __unmap_dma_buf(attach, sg_table, direction);
>>>
>>>        if (dma_buf_is_dynamic(attach->dmabuf) &&
>>>            !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
>>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>>> index 628681bf6c99..efdc56b9d95f 100644
>>> --- a/include/linux/dma-buf.h
>>> +++ b/include/linux/dma-buf.h
>>> @@ -154,6 +154,12 @@ struct dma_buf_ops {
>>>         * On failure, returns a negative error value wrapped into a pointer.
>>>         * May also return -EINTR when a signal was received while being
>>>         * blocked.
>>> +      *
>>> +      * Note that exporters should not try to cache the scatter list, or
>>> +      * return the same one for multiple calls. Caching is done either by the
>>> +      * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
>>> +      * of the scatter list is transferred to the caller, and returned by
>>> +      * @unmap_dma_buf.
>>>         */
>>>        struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
>>>                                         enum dma_data_direction);
>
kernel test robot Jan. 15, 2021, 3:36 p.m. UTC | #4
Hi Daniel,

I love your patch! Perhaps something to improve:

[auto build test WARNING on next-20210115]
[also build test WARNING on v5.11-rc3]
[cannot apply to tegra-drm/drm/tegra/for-next linus/master v5.11-rc3 v5.11-rc2 v5.11-rc1]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Daniel-Vetter/drm-buf-Add-debug-option/20210115-210650
base:    b3a3cbdec55b090d22a09f75efb7c7d34cb97f25
config: i386-randconfig-a012-20210115 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/a0f2603f574f0a1aedd7719cbb47b807796d2367
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Daniel-Vetter/drm-buf-Add-debug-option/20210115-210650
        git checkout a0f2603f574f0a1aedd7719cbb47b807796d2367
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   drivers/dma-buf/dma-buf.c: In function '__map_dma_buf':
>> drivers/dma-buf/dma-buf.c:676:5: warning: "CONFIG_DMABUF_DEBUG" is not defined, evaluates to 0 [-Wundef]
     676 | #if CONFIG_DMABUF_DEBUG
         |     ^~~~~~~~~~~~~~~~~~~
   drivers/dma-buf/dma-buf.c: In function '__unmap_dma_buf':
   drivers/dma-buf/dma-buf.c:816:5: warning: "CONFIG_DMABUF_DEBUG" is not defined, evaluates to 0 [-Wundef]
     816 | #if CONFIG_DMABUF_DEBUG
         |     ^~~~~~~~~~~~~~~~~~~


vim +/CONFIG_DMABUF_DEBUG +676 drivers/dma-buf/dma-buf.c

   668	
   669	static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
   670					       enum dma_data_direction direction)
   671	{
   672		struct sg_table *sg_table;
   673	
   674		sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
   675	
 > 676	#if CONFIG_DMABUF_DEBUG
   677		if (sg_table) {
   678			int i;
   679			struct scatterlist *sg;
   680	
   681			/* To catch abuse of the underlying struct page by importers mix
   682			 * up the bits, but take care to preserve the low SG_ bits to
   683			 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
   684			 * before passing the sgt back to the exporter. */
   685			for_each_sgtable_sg(sg_table, sg, i)
   686				sg->page_link ^= ~0xffUL;
   687		}
   688	#endif
   689	
   690		return sg_table;
   691	}
   692	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 4f8224a6ac95..4e16c71c24b7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -50,6 +50,14 @@  config DMABUF_MOVE_NOTIFY
 	  This is marked experimental because we don't yet have a consistent
 	  execution context and memory management between drivers.
 
+config DMABUF_DEBUG
+	bool "DMA-BUF debug checks"
+	default y if DMA_API_DEBUG
+	help
+	  This option enables additional checks for DMA-BUF importers and
+	  exporters. Specifically it validates that importers do not peek at the
+	  underlying struct page when they import a buffer.
+
 config DMABUF_SELFTESTS
 	tristate "Selftests for the dma-buf interfaces"
 	default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1c9bd51db110..6e4725f7dfde 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -666,6 +666,30 @@  void dma_buf_put(struct dma_buf *dmabuf)
 }
 EXPORT_SYMBOL_GPL(dma_buf_put);
 
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+				       enum dma_data_direction direction)
+{
+	struct sg_table *sg_table;
+
+	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+
+#if CONFIG_DMABUF_DEBUG
+	if (sg_table) {
+		int i;
+		struct scatterlist *sg;
+
+		/* To catch abuse of the underlying struct page by importers mix
+		 * up the bits, but take care to preserve the low SG_ bits to
+		 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+		 * before passing the sgt back to the exporter. */
+		for_each_sgtable_sg(sg_table, sg, i)
+			sg->page_link ^= ~0xffUL;
+	}
+#endif
+
+	return sg_table;
+}
+
 /**
  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
  * @dmabuf:		[in]	buffer to attach device to.
@@ -737,7 +761,7 @@  dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 				goto err_unlock;
 		}
 
-		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
 		if (!sgt)
 			sgt = ERR_PTR(-ENOMEM);
 		if (IS_ERR(sgt)) {
@@ -784,6 +808,23 @@  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_GPL(dma_buf_attach);
 
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+			    struct sg_table *sg_table,
+			    enum dma_data_direction direction)
+{
+
+#if CONFIG_DMABUF_DEBUG
+	if (sg_table) {
+		int i;
+		struct scatterlist *sg;
+
+		for_each_sgtable_sg(sg_table, sg, i)
+			sg->page_link ^= ~0xffUL;
+	}
+#endif
+	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
 /**
  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
  * @dmabuf:	[in]	buffer to detach from.
@@ -802,7 +843,7 @@  void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 		if (dma_buf_is_dynamic(attach->dmabuf))
 			dma_resv_lock(attach->dmabuf->resv, NULL);
 
-		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
+		__unmap_dma_buf(attach, attach->sgt, attach->dir);
 
 		if (dma_buf_is_dynamic(attach->dmabuf)) {
 			dma_buf_unpin(attach);
@@ -924,7 +965,7 @@  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 		}
 	}
 
-	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+	sg_table = __map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
 
@@ -987,7 +1028,7 @@  void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 	if (dma_buf_is_dynamic(attach->dmabuf))
 		dma_resv_assert_held(attach->dmabuf->resv);
 
-	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+	__unmap_dma_buf(attach, sg_table, direction);
 
 	if (dma_buf_is_dynamic(attach->dmabuf) &&
 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 628681bf6c99..efdc56b9d95f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -154,6 +154,12 @@  struct dma_buf_ops {
 	 * On failure, returns a negative error value wrapped into a pointer.
 	 * May also return -EINTR when a signal was received while being
 	 * blocked.
+	 *
+	 * Note that exporters should not try to cache the scatter list, or
+	 * return the same one for multiple calls. Caching is done either by the
+	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+	 * of the scatter list is transferred to the caller, and returned by
+	 * @unmap_dma_buf.
 	 */
 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
 					 enum dma_data_direction);