diff mbox series

[v4,4/7] dma-buf: heaps: restricted_heap: Add dma_ops

Message ID 20240112092014.23999-5-yong.wu@mediatek.com (mailing list archive)
State New, archived
Headers show
Series dma-buf: heaps: Add restricted heap | expand

Commit Message

Yong Wu (吴勇) Jan. 12, 2024, 9:20 a.m. UTC
Add the dma_ops for this restricted heap. For restricted buffer,
cache_ops/mmap are not allowed, thus return EPERM for them.

Signed-off-by: Yong Wu <yong.wu@mediatek.com>
---
 drivers/dma-buf/heaps/restricted_heap.c | 103 ++++++++++++++++++++++++
 1 file changed, 103 insertions(+)

Comments

Daniel Vetter Jan. 12, 2024, 9:41 a.m. UTC | #1
On Fri, Jan 12, 2024 at 05:20:11PM +0800, Yong Wu wrote:
> Add the dma_ops for this restricted heap. For restricted buffer,
> cache_ops/mmap are not allowed, thus return EPERM for them.
> 
> Signed-off-by: Yong Wu <yong.wu@mediatek.com>
> ---
>  drivers/dma-buf/heaps/restricted_heap.c | 103 ++++++++++++++++++++++++
>  1 file changed, 103 insertions(+)
> 
> diff --git a/drivers/dma-buf/heaps/restricted_heap.c b/drivers/dma-buf/heaps/restricted_heap.c
> index 8c266a0f6192..ec4c63d2112d 100644
> --- a/drivers/dma-buf/heaps/restricted_heap.c
> +++ b/drivers/dma-buf/heaps/restricted_heap.c
> @@ -12,6 +12,10 @@
>  
>  #include "restricted_heap.h"
>  
> +struct restricted_heap_attachment {
> +	struct sg_table			*table;
> +};
> +
>  static int
>  restricted_heap_memory_allocate(struct restricted_heap *heap, struct restricted_buffer *buf)
>  {
> @@ -45,6 +49,104 @@ restricted_heap_memory_free(struct restricted_heap *heap, struct restricted_buff
>  	ops->memory_free(heap, buf);
>  }
>  
> +static int restricted_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
> +{
> +	struct restricted_buffer *restricted_buf = dmabuf->priv;
> +	struct restricted_heap_attachment *a;
> +	struct sg_table *table;
> +	int ret;
> +
> +	a = kzalloc(sizeof(*a), GFP_KERNEL);
> +	if (!a)
> +		return -ENOMEM;
> +
> +	table = kzalloc(sizeof(*table), GFP_KERNEL);
> +	if (!table) {
> +		ret = -ENOMEM;
> +		goto err_free_attach;
> +	}
> +
> +	ret = sg_alloc_table(table, 1, GFP_KERNEL);
> +	if (ret)
> +		goto err_free_sgt;
> +	sg_set_page(table->sgl, NULL, restricted_buf->size, 0);

So this is definitely broken and violating the dma-buf api rules. You
cannot let attach succed and supply a dummy/invalid sg table.

Two options:

- Reject ->attach for all this buffers with -EBUSY and provide instead a
  private api for these secure buffers, similar to how virtio_dma_buf has
  private virto-specific apis. This interface would need to be
  standardized across all arm TEE users, so that we don't have a
  disastrous proliferation of apis.

- Allow ->attach, but _only_ for drivers/devices which can access the
  secure buffer correctly, and only if you can put the right secure buffer
  address into the sg table directly. If dma to a secure buffer for a
  given struct device * will not work correctly (i.e. without data
  corruption), you _must_ reject the attach attempt with -EBUSY.

The 2nd approach would be my preferred one, if it's technically possible.

Also my understanding is that arm TEE is standardized, so I think we'll at
least want some acks from other soc people whether this will work for them
too.

Finally the usual drill:
- this also needs the driver side support, if there's any changes needed.
  Just the new heap isn't enough.
- and for drm you need open userspace for this. Doesn't have to be the
  full content protection decode pipeline, the drivers in drm that landed
  secure buffer support thus far enabled it using the
  EGL_EXT_protected_content extension using gl, which side steps all the
  complications around content decryption keys and support

Cheers, Sima

> +
> +	a->table = table;
> +	attachment->priv = a;
> +
> +	return 0;
> +
> +err_free_sgt:
> +	kfree(table);
> +err_free_attach:
> +	kfree(a);
> +	return ret;
> +}
> +
> +static void restricted_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
> +{
> +	struct restricted_heap_attachment *a = attachment->priv;
> +
> +	sg_free_table(a->table);
> +	kfree(a->table);
> +	kfree(a);
> +}
> +
> +static struct sg_table *
> +restricted_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direct)
> +{
> +	struct restricted_heap_attachment *a = attachment->priv;
> +	struct sg_table *table = a->table;
> +
> +	return table;
> +}
> +
> +static void
> +restricted_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table,
> +			      enum dma_data_direction direction)
> +{
> +	struct restricted_heap_attachment *a = attachment->priv;
> +
> +	WARN_ON(a->table != table);
> +}
> +
> +static int
> +restricted_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
> +{
> +	return -EPERM;
> +}
> +
> +static int
> +restricted_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
> +{
> +	return -EPERM;
> +}
> +
> +static int restricted_heap_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
> +{
> +	return -EPERM;
> +}
> +
> +static void restricted_heap_free(struct dma_buf *dmabuf)
> +{
> +	struct restricted_buffer *restricted_buf = dmabuf->priv;
> +	struct restricted_heap *heap = dma_heap_get_drvdata(restricted_buf->heap);
> +
> +	restricted_heap_memory_free(heap, restricted_buf);
> +	kfree(restricted_buf);
> +}
> +
> +static const struct dma_buf_ops restricted_heap_buf_ops = {
> +	.attach		= restricted_heap_attach,
> +	.detach		= restricted_heap_detach,
> +	.map_dma_buf	= restricted_heap_map_dma_buf,
> +	.unmap_dma_buf	= restricted_heap_unmap_dma_buf,
> +	.begin_cpu_access = restricted_heap_dma_buf_begin_cpu_access,
> +	.end_cpu_access	= restricted_heap_dma_buf_end_cpu_access,
> +	.mmap		= restricted_heap_dma_buf_mmap,
> +	.release	= restricted_heap_free,
> +};
> +
>  static struct dma_buf *
>  restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
>  			 unsigned long fd_flags, unsigned long heap_flags)
> @@ -66,6 +168,7 @@ restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
>  	if (ret)
>  		goto err_free_buf;
>  	exp_info.exp_name = dma_heap_get_name(heap);
> +	exp_info.ops = &restricted_heap_buf_ops;
>  	exp_info.size = restricted_buf->size;
>  	exp_info.flags = fd_flags;
>  	exp_info.priv = restricted_buf;
> -- 
> 2.25.1
>
Daniel Vetter Jan. 12, 2024, 9:49 a.m. UTC | #2
On Fri, Jan 12, 2024 at 10:41:14AM +0100, Daniel Vetter wrote:
> On Fri, Jan 12, 2024 at 05:20:11PM +0800, Yong Wu wrote:
> > Add the dma_ops for this restricted heap. For restricted buffer,
> > cache_ops/mmap are not allowed, thus return EPERM for them.
> > 
> > Signed-off-by: Yong Wu <yong.wu@mediatek.com>
> > ---
> >  drivers/dma-buf/heaps/restricted_heap.c | 103 ++++++++++++++++++++++++
> >  1 file changed, 103 insertions(+)
> > 
> > diff --git a/drivers/dma-buf/heaps/restricted_heap.c b/drivers/dma-buf/heaps/restricted_heap.c
> > index 8c266a0f6192..ec4c63d2112d 100644
> > --- a/drivers/dma-buf/heaps/restricted_heap.c
> > +++ b/drivers/dma-buf/heaps/restricted_heap.c
> > @@ -12,6 +12,10 @@
> >  
> >  #include "restricted_heap.h"
> >  
> > +struct restricted_heap_attachment {
> > +	struct sg_table			*table;
> > +};
> > +
> >  static int
> >  restricted_heap_memory_allocate(struct restricted_heap *heap, struct restricted_buffer *buf)
> >  {
> > @@ -45,6 +49,104 @@ restricted_heap_memory_free(struct restricted_heap *heap, struct restricted_buff
> >  	ops->memory_free(heap, buf);
> >  }
> >  
> > +static int restricted_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
> > +{
> > +	struct restricted_buffer *restricted_buf = dmabuf->priv;
> > +	struct restricted_heap_attachment *a;
> > +	struct sg_table *table;
> > +	int ret;
> > +
> > +	a = kzalloc(sizeof(*a), GFP_KERNEL);
> > +	if (!a)
> > +		return -ENOMEM;
> > +
> > +	table = kzalloc(sizeof(*table), GFP_KERNEL);
> > +	if (!table) {
> > +		ret = -ENOMEM;
> > +		goto err_free_attach;
> > +	}
> > +
> > +	ret = sg_alloc_table(table, 1, GFP_KERNEL);
> > +	if (ret)
> > +		goto err_free_sgt;
> > +	sg_set_page(table->sgl, NULL, restricted_buf->size, 0);
> 
> So this is definitely broken and violating the dma-buf api rules. You
> cannot let attach succed and supply a dummy/invalid sg table.
> 
> Two options:
> 
> - Reject ->attach for all this buffers with -EBUSY and provide instead a
>   private api for these secure buffers, similar to how virtio_dma_buf has
>   private virto-specific apis. This interface would need to be
>   standardized across all arm TEE users, so that we don't have a
>   disastrous proliferation of apis.
> 
> - Allow ->attach, but _only_ for drivers/devices which can access the
>   secure buffer correctly, and only if you can put the right secure buffer
>   address into the sg table directly. If dma to a secure buffer for a
>   given struct device * will not work correctly (i.e. without data
>   corruption), you _must_ reject the attach attempt with -EBUSY.
> 
> The 2nd approach would be my preferred one, if it's technically possible.
> 
> Also my understanding is that arm TEE is standardized, so I think we'll at
> least want some acks from other soc people whether this will work for them
> too.
> 
> Finally the usual drill:
> - this also needs the driver side support, if there's any changes needed.
>   Just the new heap isn't enough.

Ok I quickly scrolled through your drm patches and that confirms that the
current dma-buf interface you're implementing is just completely breaking
the api. And you need to paper over that will all kinds of very icky
special-casing.

So definitely need to rethink the overall design between dma-buf heaps and
drivers here.
-Sima

> - and for drm you need open userspace for this. Doesn't have to be the
>   full content protection decode pipeline, the drivers in drm that landed
>   secure buffer support thus far enabled it using the
>   EGL_EXT_protected_content extension using gl, which side steps all the
>   complications around content decryption keys and support
> 
> Cheers, Sima
> 
> > +
> > +	a->table = table;
> > +	attachment->priv = a;
> > +
> > +	return 0;
> > +
> > +err_free_sgt:
> > +	kfree(table);
> > +err_free_attach:
> > +	kfree(a);
> > +	return ret;
> > +}
> > +
> > +static void restricted_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
> > +{
> > +	struct restricted_heap_attachment *a = attachment->priv;
> > +
> > +	sg_free_table(a->table);
> > +	kfree(a->table);
> > +	kfree(a);
> > +}
> > +
> > +static struct sg_table *
> > +restricted_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direct)
> > +{
> > +	struct restricted_heap_attachment *a = attachment->priv;
> > +	struct sg_table *table = a->table;
> > +
> > +	return table;
> > +}
> > +
> > +static void
> > +restricted_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table,
> > +			      enum dma_data_direction direction)
> > +{
> > +	struct restricted_heap_attachment *a = attachment->priv;
> > +
> > +	WARN_ON(a->table != table);
> > +}
> > +
> > +static int
> > +restricted_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
> > +{
> > +	return -EPERM;
> > +}
> > +
> > +static int
> > +restricted_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
> > +{
> > +	return -EPERM;
> > +}
> > +
> > +static int restricted_heap_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
> > +{
> > +	return -EPERM;
> > +}
> > +
> > +static void restricted_heap_free(struct dma_buf *dmabuf)
> > +{
> > +	struct restricted_buffer *restricted_buf = dmabuf->priv;
> > +	struct restricted_heap *heap = dma_heap_get_drvdata(restricted_buf->heap);
> > +
> > +	restricted_heap_memory_free(heap, restricted_buf);
> > +	kfree(restricted_buf);
> > +}
> > +
> > +static const struct dma_buf_ops restricted_heap_buf_ops = {
> > +	.attach		= restricted_heap_attach,
> > +	.detach		= restricted_heap_detach,
> > +	.map_dma_buf	= restricted_heap_map_dma_buf,
> > +	.unmap_dma_buf	= restricted_heap_unmap_dma_buf,
> > +	.begin_cpu_access = restricted_heap_dma_buf_begin_cpu_access,
> > +	.end_cpu_access	= restricted_heap_dma_buf_end_cpu_access,
> > +	.mmap		= restricted_heap_dma_buf_mmap,
> > +	.release	= restricted_heap_free,
> > +};
> > +
> >  static struct dma_buf *
> >  restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
> >  			 unsigned long fd_flags, unsigned long heap_flags)
> > @@ -66,6 +168,7 @@ restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
> >  	if (ret)
> >  		goto err_free_buf;
> >  	exp_info.exp_name = dma_heap_get_name(heap);
> > +	exp_info.ops = &restricted_heap_buf_ops;
> >  	exp_info.size = restricted_buf->size;
> >  	exp_info.flags = fd_flags;
> >  	exp_info.priv = restricted_buf;
> > -- 
> > 2.25.1
> > 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
Yong Wu (吴勇) May 15, 2024, 5:35 a.m. UTC | #3
On Fri, 2024-01-12 at 10:49 +0100, Daniel Vetter wrote:
>  	 
> External email : Please do not click links or open attachments until
> you have verified the sender or the content.
>  On Fri, Jan 12, 2024 at 10:41:14AM +0100, Daniel Vetter wrote:
> > On Fri, Jan 12, 2024 at 05:20:11PM +0800, Yong Wu wrote:
> > > Add the dma_ops for this restricted heap. For restricted buffer,
> > > cache_ops/mmap are not allowed, thus return EPERM for them.
> > > 
> > > Signed-off-by: Yong Wu <yong.wu@mediatek.com>
> > > ---
> > >  drivers/dma-buf/heaps/restricted_heap.c | 103
> ++++++++++++++++++++++++
> > >  1 file changed, 103 insertions(+)
> > > 
> > > diff --git a/drivers/dma-buf/heaps/restricted_heap.c
> b/drivers/dma-buf/heaps/restricted_heap.c
> > > index 8c266a0f6192..ec4c63d2112d 100644
> > > --- a/drivers/dma-buf/heaps/restricted_heap.c
> > > +++ b/drivers/dma-buf/heaps/restricted_heap.c
> > > @@ -12,6 +12,10 @@
> > >  
> > >  #include "restricted_heap.h"
> > >  
> > > +struct restricted_heap_attachment {
> > > +struct sg_table*table;
> > > +};
> > > +
> > >  static int
> > >  restricted_heap_memory_allocate(struct restricted_heap *heap,
> struct restricted_buffer *buf)
> > >  {
> > > @@ -45,6 +49,104 @@ restricted_heap_memory_free(struct
> restricted_heap *heap, struct restricted_buff
> > >  ops->memory_free(heap, buf);
> > >  }
> > >  
> > > +static int restricted_heap_attach(struct dma_buf *dmabuf, struct
> dma_buf_attachment *attachment)
> > > +{
> > > +struct restricted_buffer *restricted_buf = dmabuf->priv;
> > > +struct restricted_heap_attachment *a;
> > > +struct sg_table *table;
> > > +int ret;
> > > +
> > > +a = kzalloc(sizeof(*a), GFP_KERNEL);
> > > +if (!a)
> > > +return -ENOMEM;
> > > +
> > > +table = kzalloc(sizeof(*table), GFP_KERNEL);
> > > +if (!table) {
> > > +ret = -ENOMEM;
> > > +goto err_free_attach;
> > > +}
> > > +
> > > +ret = sg_alloc_table(table, 1, GFP_KERNEL);
> > > +if (ret)
> > > +goto err_free_sgt;
> > > +sg_set_page(table->sgl, NULL, restricted_buf->size, 0);
> > 
> > So this is definitely broken and violating the dma-buf api rules.
> You
> > cannot let attach succed and supply a dummy/invalid sg table.
> > 
> > Two options:
> > 
> > - Reject ->attach for all this buffers with -EBUSY and provide
> instead a
> >   private api for these secure buffers, similar to how
> virtio_dma_buf has
> >   private virto-specific apis. This interface would need to be
> >   standardized across all arm TEE users, so that we don't have a
> >   disastrous proliferation of apis.
> > 
> > - Allow ->attach, but _only_ for drivers/devices which can access
> the
> >   secure buffer correctly, and only if you can put the right secure
> buffer
> >   address into the sg table directly. If dma to a secure buffer for
> a
> >   given struct device * will not work correctly (i.e. without data
> >   corruption), you _must_ reject the attach attempt with -EBUSY.
> > 
> > The 2nd approach would be my preferred one, if it's technically
> possible.
> > 
> > Also my understanding is that arm TEE is standardized, so I think
> we'll at
> > least want some acks from other soc people whether this will work
> for them
> > too.
> > 
> > Finally the usual drill:
> > - this also needs the driver side support, if there's any changes
> needed.
> >   Just the new heap isn't enough.
> 
> Ok I quickly scrolled through your drm patches and that confirms that
> the
> current dma-buf interface you're implementing is just completely
> breaking
> the api. And you need to paper over that will all kinds of very icky
> special-casing.
> 
> So definitely need to rethink the overall design between dma-buf
> heaps and
> drivers here.

Hi,

Thanks very much for the review, and sorry for reply so late.  We
reconstructed our TEE commands so that the kernel can obtain the valid
PA/pages, then the sg operations can run normally. 

I will send the next version.
Thanks.

> -Sima
> 
> > - and for drm you need open userspace for this. Doesn't have to be
> the
> >   full content protection decode pipeline, the drivers in drm that
> landed
> >   secure buffer support thus far enabled it using the
> >   EGL_EXT_protected_content extension using gl, which side steps
> all the
> >   complications around content decryption keys and support
> > 
> > Cheers, Sima
> > 
> > > +
> > > +a->table = table;
> > > +attachment->priv = a;
> > > +
> > > +return 0;
> > > +
> > > +err_free_sgt:
> > > +kfree(table);
> > > +err_free_attach:
> > > +kfree(a);
> > > +return ret;
> > > +}
> > > +
> > > +static void restricted_heap_detach(struct dma_buf *dmabuf,
> struct dma_buf_attachment *attachment)
> > > +{
> > > +struct restricted_heap_attachment *a = attachment->priv;
> > > +
> > > +sg_free_table(a->table);
> > > +kfree(a->table);
> > > +kfree(a);
> > > +}
> > > +
> > > +static struct sg_table *
> > > +restricted_heap_map_dma_buf(struct dma_buf_attachment
> *attachment, enum dma_data_direction direct)
> > > +{
> > > +struct restricted_heap_attachment *a = attachment->priv;
> > > +struct sg_table *table = a->table;
> > > +
> > > +return table;
> > > +}
> > > +
> > > +static void
> > > +restricted_heap_unmap_dma_buf(struct dma_buf_attachment
> *attachment, struct sg_table *table,
> > > +      enum dma_data_direction direction)
> > > +{
> > > +struct restricted_heap_attachment *a = attachment->priv;
> > > +
> > > +WARN_ON(a->table != table);
> > > +}
> > > +
> > > +static int
> > > +restricted_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
> enum dma_data_direction direction)
> > > +{
> > > +return -EPERM;
> > > +}
> > > +
> > > +static int
> > > +restricted_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
> enum dma_data_direction direction)
> > > +{
> > > +return -EPERM;
> > > +}
> > > +
> > > +static int restricted_heap_dma_buf_mmap(struct dma_buf *dmabuf,
> struct vm_area_struct *vma)
> > > +{
> > > +return -EPERM;
> > > +}
> > > +
> > > +static void restricted_heap_free(struct dma_buf *dmabuf)
> > > +{
> > > +struct restricted_buffer *restricted_buf = dmabuf->priv;
> > > +struct restricted_heap *heap =
> dma_heap_get_drvdata(restricted_buf->heap);
> > > +
> > > +restricted_heap_memory_free(heap, restricted_buf);
> > > +kfree(restricted_buf);
> > > +}
> > > +
> > > +static const struct dma_buf_ops restricted_heap_buf_ops = {
> > > +.attach= restricted_heap_attach,
> > > +.detach= restricted_heap_detach,
> > > +.map_dma_buf= restricted_heap_map_dma_buf,
> > > +.unmap_dma_buf= restricted_heap_unmap_dma_buf,
> > > +.begin_cpu_access = restricted_heap_dma_buf_begin_cpu_access,
> > > +.end_cpu_access= restricted_heap_dma_buf_end_cpu_access,
> > > +.mmap= restricted_heap_dma_buf_mmap,
> > > +.release= restricted_heap_free,
> > > +};
> > > +
> > >  static struct dma_buf *
> > >  restricted_heap_allocate(struct dma_heap *heap, unsigned long
> size,
> > >   unsigned long fd_flags, unsigned long heap_flags)
> > > @@ -66,6 +168,7 @@ restricted_heap_allocate(struct dma_heap
> *heap, unsigned long size,
> > >  if (ret)
> > >  goto err_free_buf;
> > >  exp_info.exp_name = dma_heap_get_name(heap);
> > > +exp_info.ops = &restricted_heap_buf_ops;
> > >  exp_info.size = restricted_buf->size;
> > >  exp_info.flags = fd_flags;
> > >  exp_info.priv = restricted_buf;
> > > -- 
> > > 2.25.1
> > > 
> > 
> > -- 
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > http://blog.ffwll.ch
>
diff mbox series

Patch

diff --git a/drivers/dma-buf/heaps/restricted_heap.c b/drivers/dma-buf/heaps/restricted_heap.c
index 8c266a0f6192..ec4c63d2112d 100644
--- a/drivers/dma-buf/heaps/restricted_heap.c
+++ b/drivers/dma-buf/heaps/restricted_heap.c
@@ -12,6 +12,10 @@ 
 
 #include "restricted_heap.h"
 
+struct restricted_heap_attachment {
+	struct sg_table			*table;
+};
+
 static int
 restricted_heap_memory_allocate(struct restricted_heap *heap, struct restricted_buffer *buf)
 {
@@ -45,6 +49,104 @@  restricted_heap_memory_free(struct restricted_heap *heap, struct restricted_buff
 	ops->memory_free(heap, buf);
 }
 
+static int restricted_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+	struct restricted_buffer *restricted_buf = dmabuf->priv;
+	struct restricted_heap_attachment *a;
+	struct sg_table *table;
+	int ret;
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table) {
+		ret = -ENOMEM;
+		goto err_free_attach;
+	}
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err_free_sgt;
+	sg_set_page(table->sgl, NULL, restricted_buf->size, 0);
+
+	a->table = table;
+	attachment->priv = a;
+
+	return 0;
+
+err_free_sgt:
+	kfree(table);
+err_free_attach:
+	kfree(a);
+	return ret;
+}
+
+static void restricted_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+	struct restricted_heap_attachment *a = attachment->priv;
+
+	sg_free_table(a->table);
+	kfree(a->table);
+	kfree(a);
+}
+
+static struct sg_table *
+restricted_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direct)
+{
+	struct restricted_heap_attachment *a = attachment->priv;
+	struct sg_table *table = a->table;
+
+	return table;
+}
+
+static void
+restricted_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table,
+			      enum dma_data_direction direction)
+{
+	struct restricted_heap_attachment *a = attachment->priv;
+
+	WARN_ON(a->table != table);
+}
+
+static int
+restricted_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+	return -EPERM;
+}
+
+static int
+restricted_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
+{
+	return -EPERM;
+}
+
+static int restricted_heap_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	return -EPERM;
+}
+
+static void restricted_heap_free(struct dma_buf *dmabuf)
+{
+	struct restricted_buffer *restricted_buf = dmabuf->priv;
+	struct restricted_heap *heap = dma_heap_get_drvdata(restricted_buf->heap);
+
+	restricted_heap_memory_free(heap, restricted_buf);
+	kfree(restricted_buf);
+}
+
+static const struct dma_buf_ops restricted_heap_buf_ops = {
+	.attach		= restricted_heap_attach,
+	.detach		= restricted_heap_detach,
+	.map_dma_buf	= restricted_heap_map_dma_buf,
+	.unmap_dma_buf	= restricted_heap_unmap_dma_buf,
+	.begin_cpu_access = restricted_heap_dma_buf_begin_cpu_access,
+	.end_cpu_access	= restricted_heap_dma_buf_end_cpu_access,
+	.mmap		= restricted_heap_dma_buf_mmap,
+	.release	= restricted_heap_free,
+};
+
 static struct dma_buf *
 restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
 			 unsigned long fd_flags, unsigned long heap_flags)
@@ -66,6 +168,7 @@  restricted_heap_allocate(struct dma_heap *heap, unsigned long size,
 	if (ret)
 		goto err_free_buf;
 	exp_info.exp_name = dma_heap_get_name(heap);
+	exp_info.ops = &restricted_heap_buf_ops;
 	exp_info.size = restricted_buf->size;
 	exp_info.flags = fd_flags;
 	exp_info.priv = restricted_buf;