diff mbox series

[RESEND,1/3] drm/i915/dmabuf: Implement pwrite() callback

Message ID 20191031082958.10755-1-janusz.krzysztofik@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [RESEND,1/3] drm/i915/dmabuf: Implement pwrite() callback | expand

Commit Message

Janusz Krzysztofik Oct. 31, 2019, 8:29 a.m. UTC
We need dmabuf specific pwrite() callback utilizing dma-buf API,
otherwise GEM_PWRITE IOCTL will no longer work with dma-buf backed
(i.e., PRIME imported) objects on hardware with no mappable aperture.

Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 55 ++++++++++++++++++++++
 1 file changed, 55 insertions(+)

Comments

Daniel Vetter Nov. 5, 2019, 2:27 p.m. UTC | #1
On Thu, Oct 31, 2019 at 09:29:56AM +0100, Janusz Krzysztofik wrote:
> We need dmabuf specific pwrite() callback utilizing dma-buf API,
> otherwise GEM_PWRITE IOCTL will no longer work with dma-buf backed
> (i.e., PRIME imported) objects on hardware with no mappable aperture.
> 
> Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>

Do we have userspace for this (aside from igts)? Specifically for the
gen12 + dma-buf import + pwrite/read/whatever case you're fixing in this
series here.
-Daniel

> ---
>  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 55 ++++++++++++++++++++++
>  1 file changed, 55 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> index 96ce95c8ac5a..93eea1031c82 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> @@ -248,9 +248,64 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
>  				 DMA_BIDIRECTIONAL);
>  }
>  
> +static int i915_gem_object_pwrite_dmabuf(struct drm_i915_gem_object *obj,
> +					 const struct drm_i915_gem_pwrite *args)
> +{
> +	struct dma_buf *dmabuf = obj->base.import_attach->dmabuf;
> +	void __user *user_data = u64_to_user_ptr(args->data_ptr);
> +	struct file *file = dmabuf->file;
> +	const struct file_operations *fop = file->f_op;
> +	void __force *vaddr;
> +	int ret;
> +
> +	if (fop->write) {
> +		loff_t offset = args->offset;
> +
> +		/*
> +		 * fop->write() is supposed to call dma_buf_begin_cpu_access()
> +		 * if O_SYNC flag is set, avoid calling it twice
> +		 */
> +		if (!(file->f_flags & O_SYNC)) {
> +			ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
> +			if (ret)
> +				return ret;
> +		}
> +
> +		ret = fop->write(file, user_data, args->size, &offset);
> +
> +		if (!(file->f_flags & O_SYNC))
> +			dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
> +
> +		if (!ret)
> +			return 0;
> +	}
> +
> +	/* dma-buf file .write() not supported or failed, try dma_buf_vmap() */
> +	ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
> +	if (ret)
> +		return ret;
> +
> +	vaddr = dma_buf_vmap(dmabuf);
> +	if (!vaddr)
> +		goto out_err;
> +
> +	ret = copy_from_user(vaddr + args->offset, user_data, args->size);
> +	dma_buf_vunmap(dmabuf, vaddr);
> +	if (!ret)
> +		goto out_end;
> +
> +out_err:
> +	/* fall back to GTT mapping */
> +	ret = -ENODEV;
> +out_end:
> +	dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
> +	return ret;
> +}
> +
>  static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
>  	.get_pages = i915_gem_object_get_pages_dmabuf,
>  	.put_pages = i915_gem_object_put_pages_dmabuf,
> +	.pwrite = i915_gem_object_pwrite_dmabuf,
>  };
>  
>  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
> -- 
> 2.21.0
>
Janusz Krzysztofik Nov. 5, 2019, 3:08 p.m. UTC | #2
Hi Daniel,

On Tuesday, November 5, 2019 3:27:55 PM CET Daniel Vetter wrote:
> On Thu, Oct 31, 2019 at 09:29:56AM +0100, Janusz Krzysztofik wrote:
> > We need dmabuf specific pwrite() callback utilizing dma-buf API,
> > otherwise GEM_PWRITE IOCTL will no longer work with dma-buf backed
> > (i.e., PRIME imported) objects on hardware with no mappable aperture.
> > 
> > Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
> > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> > Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> 
> Do we have userspace for this (aside from igts)? Specifically for the
> gen12 + dma-buf import + pwrite/read/whatever case you're fixing in this
> series here.

I don't know the answer, sorry, I can only tell that prime_vgem IGT test uses 
gem_read()/gem_write(), which call I915_GEM_PREAD/I915_GEM_PWRITE DRM IOCTLs 
respectively, on PRIME imported vgem objects.

Thanks,
Janusz


> -Daniel
> 
> > ---
> >  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 55 ++++++++++++++++++++++
> >  1 file changed, 55 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > index 96ce95c8ac5a..93eea1031c82 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > @@ -248,9 +248,64 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
> >  				 DMA_BIDIRECTIONAL);
> >  }
> >  
> > +static int i915_gem_object_pwrite_dmabuf(struct drm_i915_gem_object *obj,
> > +					 const struct drm_i915_gem_pwrite *args)
> > +{
> > +	struct dma_buf *dmabuf = obj->base.import_attach->dmabuf;
> > +	void __user *user_data = u64_to_user_ptr(args->data_ptr);
> > +	struct file *file = dmabuf->file;
> > +	const struct file_operations *fop = file->f_op;
> > +	void __force *vaddr;
> > +	int ret;
> > +
> > +	if (fop->write) {
> > +		loff_t offset = args->offset;
> > +
> > +		/*
> > +		 * fop->write() is supposed to call dma_buf_begin_cpu_access()
> > +		 * if O_SYNC flag is set, avoid calling it twice
> > +		 */
> > +		if (!(file->f_flags & O_SYNC)) {
> > +			ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +			if (ret)
> > +				return ret;
> > +		}
> > +
> > +		ret = fop->write(file, user_data, args->size, &offset);
> > +
> > +		if (!(file->f_flags & O_SYNC))
> > +			dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +
> > +		if (!ret)
> > +			return 0;
> > +	}
> > +
> > +	/* dma-buf file .write() not supported or failed, try dma_buf_vmap() */
> > +	ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +	if (ret)
> > +		return ret;
> > +
> > +	vaddr = dma_buf_vmap(dmabuf);
> > +	if (!vaddr)
> > +		goto out_err;
> > +
> > +	ret = copy_from_user(vaddr + args->offset, user_data, args->size);
> > +	dma_buf_vunmap(dmabuf, vaddr);
> > +	if (!ret)
> > +		goto out_end;
> > +
> > +out_err:
> > +	/* fall back to GTT mapping */
> > +	ret = -ENODEV;
> > +out_end:
> > +	dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +	return ret;
> > +}
> > +
> >  static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
> >  	.get_pages = i915_gem_object_get_pages_dmabuf,
> >  	.put_pages = i915_gem_object_put_pages_dmabuf,
> > +	.pwrite = i915_gem_object_pwrite_dmabuf,
> >  };
> >  
> >  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
> 
>
Janusz Krzysztofik Nov. 8, 2019, 5:20 p.m. UTC | #3
Hi,

On Tuesday, November 5, 2019 3:27:55 PM CET Daniel Vetter wrote:
> On Thu, Oct 31, 2019 at 09:29:56AM +0100, Janusz Krzysztofik wrote:
> > We need dmabuf specific pwrite() callback utilizing dma-buf API,
> > otherwise GEM_PWRITE IOCTL will no longer work with dma-buf backed
> > (i.e., PRIME imported) objects on hardware with no mappable aperture.
> > 
> > Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
> > Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> > Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> 
> Do we have userspace for this (aside from igts)? Specifically for the
> gen12 + dma-buf import + pwrite/read/whatever case you're fixing in this
> series here.

I've discussed that on IRC with Daniel and Chris, it looks like 
I915_GEM_PREAD/PWRITE IOCTL support is provided on PRIME imported dma-buf 
objects mainly for completeness of the uAPI, useful for multi-device tests.  
There were conclusions we should ask Dave and Jonas for their position if that 
support should still be provided (and fixed for the no mappable aperture case) 
or maybe dropped as not used (and related (sub)tests also dropped).

Dave, Jonas, could you please give your comments?

Thanks,
Janusz


> -Daniel
> 
> > ---
> >  drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 55 ++++++++++++++++++++++
> >  1 file changed, 55 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/
i915/gem/i915_gem_dmabuf.c
> > index 96ce95c8ac5a..93eea1031c82 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > @@ -248,9 +248,64 @@ static void i915_gem_object_put_pages_dmabuf(struct 
drm_i915_gem_object *obj,
> >  				 DMA_BIDIRECTIONAL);
> >  }
> >  
> > +static int i915_gem_object_pwrite_dmabuf(struct drm_i915_gem_object *obj,
> > +					 const struct 
drm_i915_gem_pwrite *args)
> > +{
> > +	struct dma_buf *dmabuf = obj->base.import_attach->dmabuf;
> > +	void __user *user_data = u64_to_user_ptr(args->data_ptr);
> > +	struct file *file = dmabuf->file;
> > +	const struct file_operations *fop = file->f_op;
> > +	void __force *vaddr;
> > +	int ret;
> > +
> > +	if (fop->write) {
> > +		loff_t offset = args->offset;
> > +
> > +		/*
> > +		 * fop->write() is supposed to call 
dma_buf_begin_cpu_access()
> > +		 * if O_SYNC flag is set, avoid calling it twice
> > +		 */
> > +		if (!(file->f_flags & O_SYNC)) {
> > +			ret = dma_buf_begin_cpu_access(dmabuf, 
DMA_TO_DEVICE);
> > +			if (ret)
> > +				return ret;
> > +		}
> > +
> > +		ret = fop->write(file, user_data, args->size, &offset);
> > +
> > +		if (!(file->f_flags & O_SYNC))
> > +			dma_buf_end_cpu_access(dmabuf, 
DMA_TO_DEVICE);
> > +
> > +		if (!ret)
> > +			return 0;
> > +	}
> > +
> > +	/* dma-buf file .write() not supported or failed, try 
dma_buf_vmap() */
> > +	ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +	if (ret)
> > +		return ret;
> > +
> > +	vaddr = dma_buf_vmap(dmabuf);
> > +	if (!vaddr)
> > +		goto out_err;
> > +
> > +	ret = copy_from_user(vaddr + args->offset, user_data, args->size);
> > +	dma_buf_vunmap(dmabuf, vaddr);
> > +	if (!ret)
> > +		goto out_end;
> > +
> > +out_err:
> > +	/* fall back to GTT mapping */
> > +	ret = -ENODEV;
> > +out_end:
> > +	dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
> > +	return ret;
> > +}
> > +
> >  static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = 
{
> >  	.get_pages = i915_gem_object_get_pages_dmabuf,
> >  	.put_pages = i915_gem_object_put_pages_dmabuf,
> > +	.pwrite = i915_gem_object_pwrite_dmabuf,
> >  };
> >  
> >  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
> 
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..93eea1031c82 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -248,9 +248,64 @@  static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
 				 DMA_BIDIRECTIONAL);
 }
 
+static int i915_gem_object_pwrite_dmabuf(struct drm_i915_gem_object *obj,
+					 const struct drm_i915_gem_pwrite *args)
+{
+	struct dma_buf *dmabuf = obj->base.import_attach->dmabuf;
+	void __user *user_data = u64_to_user_ptr(args->data_ptr);
+	struct file *file = dmabuf->file;
+	const struct file_operations *fop = file->f_op;
+	void __force *vaddr;
+	int ret;
+
+	if (fop->write) {
+		loff_t offset = args->offset;
+
+		/*
+		 * fop->write() is supposed to call dma_buf_begin_cpu_access()
+		 * if O_SYNC flag is set, avoid calling it twice
+		 */
+		if (!(file->f_flags & O_SYNC)) {
+			ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+			if (ret)
+				return ret;
+		}
+
+		ret = fop->write(file, user_data, args->size, &offset);
+
+		if (!(file->f_flags & O_SYNC))
+			dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+
+		if (!ret)
+			return 0;
+	}
+
+	/* dma-buf file .write() not supported or failed, try dma_buf_vmap() */
+	ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	vaddr = dma_buf_vmap(dmabuf);
+	if (!vaddr)
+		goto out_err;
+
+	ret = copy_from_user(vaddr + args->offset, user_data, args->size);
+	dma_buf_vunmap(dmabuf, vaddr);
+	if (!ret)
+		goto out_end;
+
+out_err:
+	/* fall back to GTT mapping */
+	ret = -ENODEV;
+out_end:
+	dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+	return ret;
+}
+
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 	.get_pages = i915_gem_object_get_pages_dmabuf,
 	.put_pages = i915_gem_object_put_pages_dmabuf,
+	.pwrite = i915_gem_object_pwrite_dmabuf,
 };
 
 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,