diff mbox series

[v3,5/7] drm/panfrost: Add a new ioctl to submit batches

Message ID 20210702143225.3347980-6-boris.brezillon@collabora.com (mailing list archive)
State New, archived
Headers show
Series drm/panfrost: drm/panfrost: Add a new submit ioctl | expand

Commit Message

Boris Brezillon July 2, 2021, 2:32 p.m. UTC
This should help limit the number of ioctls when submitting multiple
jobs. The new ioctl also supports syncobj timelines and BO access flags.

v3:
* Re-use panfrost_get_job_bos() and panfrost_get_job_in_syncs() in the
  old submit path

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
---
 drivers/gpu/drm/panfrost/panfrost_drv.c | 366 +++++++++++++++++++-----
 drivers/gpu/drm/panfrost/panfrost_job.c |   3 +
 include/uapi/drm/panfrost_drm.h         |  84 ++++++
 3 files changed, 375 insertions(+), 78 deletions(-)

Comments

Alyssa Rosenzweig July 2, 2021, 3:13 p.m. UTC | #1
```
> +/* Syncobj reference passed at job submission time to encode explicit
> + * input/output fences.
> + */
> +struct drm_panfrost_syncobj_ref {
> +	__u32 handle;
> +	__u32 pad;
> +	__u64 point;
> +};
```

What is handle? What is point? Why is there padding instead of putting
point first?

```
>  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2
```

This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
trying to keep backwards compatibility, but here you're crafting a new
interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
the flag you'd want?

```
> +	/**
> +	 * Stride of the jobs array (needed to ease extension of the
> +	 * BATCH_SUBMIT ioctl). Should be set to
> +	 * sizeof(struct drm_panfrost_job).
> +	 */
> +	__u32 job_stride;
...
> +	/**
> +	 * Stride of the BO and syncobj reference arrays (needed to ease
> +	 * extension of the BATCH_SUBMIT ioctl). Should be set to
> +	 * sizeof(struct drm_panfrost_bo_ref).
> +	 */
> +	__u32 bo_ref_stride;
> +	__u32 syncobj_ref_stride;
```

Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
somewhat unwarranted complexity, and there is a combinatoric explosion
here (if jobs, bo refs, and syncobj refs use 3 different versions, as
this encoding permits... as opposed to just specifying a UABI version or
something like that)

```
> +	/**
> +	 * If the submission fails, this encodes the index of the job
> +	 * failed.
> +	 */
> +	__u32 fail_idx;
```

What if multiple jobs fail?

```
> +	/**
> +	 * ID of the queue to submit those jobs to. 0 is the default
> +	 * submit queue and should always exists. If you need a dedicated
> +	 * queue, create it with DRM_IOCTL_PANFROST_CREATE_SUBMITQUEUE.
> +	 */
> +	__u32 queue;
```

s/exists/exist/
Steven Price July 2, 2021, 3:31 p.m. UTC | #2
On 02/07/2021 15:32, Boris Brezillon wrote:
> This should help limit the number of ioctls when submitting multiple
> jobs. The new ioctl also supports syncobj timelines and BO access flags.
> 
> v3:
> * Re-use panfrost_get_job_bos() and panfrost_get_job_in_syncs() in the
>   old submit path
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>

Better, but I was hoping we can mostly delete panfrost_ioctl_submit(),
leaving something along the lines of:

static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
		struct drm_file *file)
{
	struct panfrost_submitqueue *queue;
	struct drm_panfrost_submit *args = data;
	struct drm_panfrost_job submit_args = {
		.head = args->jc,
		.bos = args->bo_handles,
		.in_syncs = args->in_syncs,
		.out_syncs = &args->out_sync, // FIXME
		.in_sync_count = args->in_sync_count,
		.out_sync_count = args->out_sync > 0 ? 1 : 0,
		.bo_count = args->bo_handle_count,
		.requirements = args->requirements
	};
	int ret;

	queue = panfrost_submitqueue_get(file->driver_priv, 0);

	ret = panfrost_submit_job(dev, file, queue, &submit_args,
				  sizeof(u32), ...);

	return ret;
}

But obviously the out_sync part needs special handling as we can't just
pass a kernel pointer in like that ;)

I'd like the above the duplication of things like this:

> +	kref_init(&job->refcount);
> +
> +	job->pfdev = pfdev;
> +	job->jc = args->head;
> +	job->requirements = args->requirements;
> +	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
> +	job->file_priv = file_priv->driver_priv;
> +	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);

As otherwise someone is going to mess up in the future and this is going
to diverge between the two ioctls.

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 366 +++++++++++++++++++-----
>  drivers/gpu/drm/panfrost/panfrost_job.c |   3 +
>  include/uapi/drm/panfrost_drm.h         |  84 ++++++
>  3 files changed, 375 insertions(+), 78 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 6529e5972b47..e2897de6e77d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -138,111 +138,95 @@ panfrost_get_job_mappings(struct drm_file *file_priv, struct panfrost_job *job)
>  	return 0;
>  }
>  
> -/**
> - * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
> - * referenced by the job.
> - * @dev: DRM device
> - * @file_priv: DRM file for this fd
> - * @args: IOCTL args
> - * @job: job being set up
> - *
> - * Resolve handles from userspace to BOs and attach them to job.
> - *
> - * Note that this function doesn't need to unreference the BOs on
> - * failure, because that will happen at panfrost_job_cleanup() time.
> - */
> +#define PANFROST_BO_REF_ALLOWED_FLAGS \
> +	(PANFROST_BO_REF_EXCLUSIVE | PANFROST_BO_REF_NO_IMPLICIT_DEP)
> +
>  static int
> -panfrost_lookup_bos(struct drm_device *dev,
> -		  struct drm_file *file_priv,
> -		  struct drm_panfrost_submit *args,
> -		  struct panfrost_job *job)
> +panfrost_get_job_bos(struct drm_file *file_priv,
> +		     u64 refs, u32 ref_stride, u32 count,
> +		     struct panfrost_job *job)
>  {
> +	void __user *in = u64_to_user_ptr(refs);
>  	unsigned int i;
> -	int ret;
>  
> -	job->bo_count = args->bo_handle_count;
> +	job->bo_count = count;
>  
> -	if (!job->bo_count)
> +	if (!count)
>  		return 0;
>  
> +	job->bos = kvmalloc_array(job->bo_count, sizeof(*job->bos),
> +				  GFP_KERNEL | __GFP_ZERO);
>  	job->bo_flags = kvmalloc_array(job->bo_count,
>  				       sizeof(*job->bo_flags),
>  				       GFP_KERNEL | __GFP_ZERO);
> -	if (!job->bo_flags)
> +	if (!job->bos || !job->bo_flags)
>  		return -ENOMEM;
>  
> -	for (i = 0; i < job->bo_count; i++)
> -		job->bo_flags[i] = PANFROST_BO_REF_EXCLUSIVE;
> +	for (i = 0; i < count; i++) {
> +		struct drm_panfrost_bo_ref ref = { };
> +		int ret;
>  
> -	ret = drm_gem_objects_lookup(file_priv,
> -				     (void __user *)(uintptr_t)args->bo_handles,
> -				     job->bo_count, &job->bos);
> -	if (ret)
> -		return ret;
> +		ret = copy_struct_from_user(&ref, sizeof(ref),
> +					    in + (i * ref_stride),
> +					    ref_stride);
> +		if (ret)
> +			return ret;
>  
> -	return panfrost_get_job_mappings(file_priv, job);
> +		/* Prior to the BATCH_SUBMIT ioctl all accessed BOs were
> +		 * treated as exclusive.
> +		 */
> +		if (ref_stride == sizeof(u32))
> +			ref.flags = PANFROST_BO_REF_EXCLUSIVE;
> +
> +		if ((ref.flags & ~PANFROST_BO_REF_ALLOWED_FLAGS))
> +			return -EINVAL;
> +
> +		job->bos[i] = drm_gem_object_lookup(file_priv, ref.handle);
> +		if (!job->bos[i])
> +			return -EINVAL;
> +
> +		job->bo_flags[i] = ref.flags;
> +	}
> +
> +	return 0;
>  }
>  
> -/**
> - * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
> - * referenced by the job.
> - * @dev: DRM device
> - * @file_priv: DRM file for this fd
> - * @args: IOCTL args
> - * @job: job being set up
> - *
> - * Resolve syncobjs from userspace to fences and attach them to job.
> - *
> - * Note that this function doesn't need to unreference the fences on
> - * failure, because that will happen at panfrost_job_cleanup() time.
> - */
>  static int
> -panfrost_copy_in_sync(struct drm_device *dev,
> -		  struct drm_file *file_priv,
> -		  struct drm_panfrost_submit *args,
> -		  struct panfrost_job *job)
> +panfrost_get_job_in_syncs(struct drm_file *file_priv,
> +			  u64 refs, u32 ref_stride,
> +			  u32 count, struct panfrost_job *job)
>  {
> -	u32 *handles;
> -	int ret = 0;
> -	int i, in_fence_count;
> +	const void __user *in = u64_to_user_ptr(refs);
> +	unsigned int i;
> +	int ret;
>  
> -	in_fence_count = args->in_sync_count;
> -
> -	if (!in_fence_count)
> +	if (!count)
>  		return 0;
>  
> -	handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
> -	if (!handles) {
> -		ret = -ENOMEM;
> -		DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
> -		goto fail;
> -	}
> -
> -	if (copy_from_user(handles,
> -			   (void __user *)(uintptr_t)args->in_syncs,
> -			   in_fence_count * sizeof(u32))) {
> -		ret = -EFAULT;
> -		DRM_DEBUG("Failed to copy in syncobj handles\n");
> -		goto fail;
> -	}
> -
> -	for (i = 0; i < in_fence_count; i++) {
> +	for (i = 0; i < count; i++) {
> +		struct drm_panfrost_syncobj_ref ref = { };
>  		struct dma_fence *fence;
>  
> -		ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
> -					     &fence);
> +		ret = copy_struct_from_user(&ref, sizeof(ref),
> +					    in + (i * ref_stride),
> +					    ref_stride);
>  		if (ret)
> -			goto fail;
> +			return ret;
> +
> +		if (ref.pad)
> +			return -EINVAL;
> +
> +		ret = drm_syncobj_find_fence(file_priv, ref.handle, ref.point,
> +					     0, &fence);
> +		if (ret)
> +			return ret;
>  
>  		ret = drm_gem_fence_array_add(&job->deps, fence);
> -
>  		if (ret)
> -			goto fail;
> +			return ret;
>  	}
>  
> -fail:
> -	kvfree(handles);
> -	return ret;
> +	return 0;
>  }
>  
>  static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
> @@ -289,11 +273,17 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
>  	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
>  	job->file_priv = file->driver_priv;
>  
> -	ret = panfrost_copy_in_sync(dev, file, args, job);
> +	ret = panfrost_get_job_in_syncs(file, args->in_syncs, sizeof(u32),
> +					args->in_sync_count, job);
>  	if (ret)
>  		goto fail_job;
>  
> -	ret = panfrost_lookup_bos(dev, file, args, job);
> +	ret = panfrost_get_job_bos(file, args->bo_handles, sizeof(u32),
> +				   args->bo_handle_count, job);
> +	if (ret)
> +		goto fail_job;
> +
> +	ret = panfrost_get_job_mappings(file, job);
>  	if (ret)
>  		goto fail_job;
>  
> @@ -491,6 +481,225 @@ panfrost_ioctl_destroy_submitqueue(struct drm_device *dev, void *data,
>  	return panfrost_submitqueue_destroy(priv, id);
>  }
>  
> +struct panfrost_job_out_sync {
> +	struct drm_syncobj *syncobj;
> +	struct dma_fence_chain *chain;
> +	u64 point;
> +};
> +
> +static void
> +panfrost_put_job_out_syncs(struct panfrost_job_out_sync *out_syncs, u32 count)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < count; i++) {
> +		if (!out_syncs[i].syncobj)
> +			break;
> +
> +		drm_syncobj_put(out_syncs[i].syncobj);
> +		kvfree(out_syncs[i].chain);
> +	}
> +
> +	kvfree(out_syncs);
> +}
> +
> +static struct panfrost_job_out_sync *
> +panfrost_get_job_out_syncs(struct drm_file *file_priv,
> +			   u64 refs, u32 ref_stride,
> +			   u32 count)
> +{
> +	void __user *in = u64_to_user_ptr(refs);
> +	struct panfrost_job_out_sync *out_syncs;
> +	unsigned int i;
> +	int ret;
> +
> +	if (!count)
> +		return NULL;
> +
> +	out_syncs = kvmalloc_array(count, sizeof(*out_syncs),
> +				   GFP_KERNEL | __GFP_ZERO);
> +	if (!out_syncs)
> +		return ERR_PTR(-ENOMEM);
> +
> +	for (i = 0; i < count; i++) {
> +		struct drm_panfrost_syncobj_ref ref = { };
> +
> +		ret = copy_struct_from_user(&ref, sizeof(ref),
> +					    in + (i * ref_stride),
> +					    ref_stride);
> +		if (ret)
> +			goto err_free_out_syncs;
> +
> +		if (ref.pad) {
> +			ret = -EINVAL;
> +			goto err_free_out_syncs;
> +		}
> +
> +		out_syncs[i].syncobj = drm_syncobj_find(file_priv, ref.handle);
> +		if (!out_syncs[i].syncobj) {
> +			ret = -EINVAL;
> +			goto err_free_out_syncs;
> +		}
> +
> +		out_syncs[i].point = ref.point;
> +		if (!out_syncs[i].point)
> +			continue;
> +
> +		out_syncs[i].chain = kmalloc(sizeof(*out_syncs[i].chain),
> +					     GFP_KERNEL);
> +		if (!out_syncs[i].chain) {
> +			ret = -ENOMEM;
> +			goto err_free_out_syncs;
> +		}
> +	}
> +
> +	return out_syncs;
> +
> +err_free_out_syncs:
> +	panfrost_put_job_out_syncs(out_syncs, count);
> +	return ERR_PTR(ret);
> +}
> +
> +static void
> +panfrost_set_job_out_fence(struct panfrost_job_out_sync *out_syncs,
> +			   unsigned int count, struct dma_fence *fence)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < count; i++) {
> +		if (out_syncs[i].chain) {
> +			drm_syncobj_add_point(out_syncs[i].syncobj,
> +					      out_syncs[i].chain,
> +					      fence, out_syncs[i].point);
> +			out_syncs[i].chain = NULL;
> +		} else {
> +			drm_syncobj_replace_fence(out_syncs[i].syncobj,
> +						  fence);
> +		}
> +	}
> +}
> +
> +#define PANFROST_JD_ALLOWED_REQS PANFROST_JD_REQ_FS
> +
> +static int
> +panfrost_submit_job(struct drm_device *dev, struct drm_file *file_priv,
> +		    struct panfrost_submitqueue *queue,
> +		    const struct drm_panfrost_job *args,
> +		    u32 bo_stride, u32 syncobj_stride)
> +{
> +	struct panfrost_device *pfdev = dev->dev_private;
> +	struct panfrost_job_out_sync *out_syncs;
> +	struct panfrost_job *job;
> +	int ret;
> +
> +	if (!args->head)
> +		return -EINVAL;
> +
> +	if (args->requirements & ~PANFROST_JD_ALLOWED_REQS)
> +		return -EINVAL;
> +
> +	job = kzalloc(sizeof(*job), GFP_KERNEL);
> +	if (!job)
> +		return -ENOMEM;
> +
> +	kref_init(&job->refcount);
> +
> +	job->pfdev = pfdev;
> +	job->jc = args->head;
> +	job->requirements = args->requirements;
> +	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
> +	job->file_priv = file_priv->driver_priv;
> +	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
> +
> +	ret = panfrost_get_job_in_syncs(file_priv,
> +					args->in_syncs,
> +					syncobj_stride,
> +					args->in_sync_count,
> +					job);
> +	if (ret)
> +		goto err_put_job;
> +
> +	out_syncs = panfrost_get_job_out_syncs(file_priv,
> +					       args->out_syncs,
> +					       syncobj_stride,
> +					       args->out_sync_count);
> +	if (IS_ERR(out_syncs)) {
> +		ret = PTR_ERR(out_syncs);
> +		goto err_put_job;
> +	}
> +
> +	ret = panfrost_get_job_bos(file_priv, args->bos, bo_stride,
> +				   args->bo_count, job);
> +	if (ret)
> +		goto err_put_job;
> +
> +	ret = panfrost_get_job_mappings(file_priv, job);
> +	if (ret)
> +		goto err_put_job;
> +
> +	ret = panfrost_job_push(queue, job);
> +	if (ret) {
> +		panfrost_put_job_out_syncs(out_syncs, args->out_sync_count);
> +		goto err_put_job;
> +	}
> +
> +	panfrost_set_job_out_fence(out_syncs, args->out_sync_count,
> +				   job->render_done_fence);
> +	panfrost_put_job_out_syncs(out_syncs, args->out_sync_count);
> +	return 0;
> +
> +err_put_job:
> +	panfrost_job_put(job);
> +	return ret;
> +}
> +
> +static int
> +panfrost_ioctl_batch_submit(struct drm_device *dev, void *data,
> +			    struct drm_file *file_priv)
> +{
> +	struct drm_panfrost_batch_submit *args = data;
> +	void __user *jobs_args = u64_to_user_ptr(args->jobs);
> +	struct panfrost_submitqueue *queue;
> +	unsigned int i;
> +	int ret;
> +
> +	/* Relax this test if new fields are added to
> +	 * drm_panfrost_{bo_ref,syncobj_ref,job}.
> +	 */
> +	if (args->job_stride < sizeof(struct drm_panfrost_job) ||
> +	    args->bo_ref_stride < sizeof(struct drm_panfrost_bo_ref) ||
> +	    args->syncobj_ref_stride < sizeof(struct drm_panfrost_syncobj_ref))
> +		return -EINVAL;
> +
> +	queue = panfrost_submitqueue_get(file_priv->driver_priv, args->queue);
> +	if (IS_ERR(queue))
> +		return PTR_ERR(queue);
> +
> +	for (i = 0; i < args->job_count; i++) {
> +		struct drm_panfrost_job job_args = { };
> +
> +		ret = copy_struct_from_user(&job_args, sizeof(job_args),
> +					    jobs_args + (i * args->job_stride),
> +					    args->job_stride);
> +		if (ret) {
> +			args->fail_idx = i;
> +			goto out_put_queue;
> +		}
> +
> +		ret = panfrost_submit_job(dev, file_priv, queue, &job_args,
> +					  args->bo_ref_stride,
> +					  args->syncobj_ref_stride);
> +		if (ret) {
> +			args->fail_idx = i;
> +			goto out_put_queue;
> +		}
> +	}
> +
> +out_put_queue:
> +	panfrost_submitqueue_put(queue);
> +	return 0;
> +}
> +
>  int panfrost_unstable_ioctl_check(void)
>  {
>  	if (!unstable_ioctls)
> @@ -570,6 +779,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
>  	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
>  	PANFROST_IOCTL(CREATE_SUBMITQUEUE, create_submitqueue, DRM_RENDER_ALLOW),
>  	PANFROST_IOCTL(DESTROY_SUBMITQUEUE, destroy_submitqueue, DRM_RENDER_ALLOW),
> +	PANFROST_IOCTL(BATCH_SUBMIT,	batch_submit,	DRM_RENDER_ALLOW),
>  };
>  
>  DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 56ae89272e19..4e1540bce865 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -254,6 +254,9 @@ static int panfrost_acquire_object_fences(struct panfrost_job *job)
>  				return ret;
>  		}
>  
> +		if (job->bo_flags[i] & PANFROST_BO_REF_NO_IMPLICIT_DEP)
> +			continue;
> +
>  		ret = drm_gem_fence_array_add_implicit(&job->deps, job->bos[i],
>  						       exclusive);
>  		if (ret)
> diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
> index 7ee02fd1ac75..ce0c1b96a58c 100644
> --- a/include/uapi/drm/panfrost_drm.h
> +++ b/include/uapi/drm/panfrost_drm.h
> @@ -23,6 +23,7 @@ extern "C" {
>  #define DRM_PANFROST_MADVISE			0x08
>  #define DRM_PANFROST_CREATE_SUBMITQUEUE		0x09
>  #define DRM_PANFROST_DESTROY_SUBMITQUEUE	0x0a
> +#define DRM_PANFROST_BATCH_SUBMIT		0x0b
>  
>  #define DRM_IOCTL_PANFROST_SUBMIT		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
>  #define DRM_IOCTL_PANFROST_WAIT_BO		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
> @@ -33,6 +34,7 @@ extern "C" {
>  #define DRM_IOCTL_PANFROST_MADVISE		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
>  #define DRM_IOCTL_PANFROST_CREATE_SUBMITQUEUE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_SUBMITQUEUE, struct drm_panfrost_create_submitqueue)
>  #define DRM_IOCTL_PANFROST_DESTROY_SUBMITQUEUE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_DESTROY_SUBMITQUEUE, __u32)
> +#define DRM_IOCTL_PANFROST_BATCH_SUBMIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_BATCH_SUBMIT, struct drm_panfrost_batch_submit)
>  
>  /*
>   * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
> @@ -241,7 +243,89 @@ struct drm_panfrost_create_submitqueue {
>  	__u32 id;	/* out, identifier */
>  };
>  
> +/* Syncobj reference passed at job submission time to encode explicit
> + * input/output fences.
> + */
> +struct drm_panfrost_syncobj_ref {
> +	__u32 handle;
> +	__u32 pad;
> +	__u64 point;
> +};
> +
>  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2
> +
> +/* Describes a BO referenced by a job and the type of access. */
> +struct drm_panfrost_bo_ref {
> +	__u32 handle;
> +	__u32 flags;
> +};
> +
> +/* Describes a GPU job and the resources attached to it. */
> +struct drm_panfrost_job {
> +	/** GPU pointer to the head of the job chain. */
> +	__u64 head;
> +
> +	/**
> +	 * Array of drm_panfrost_bo_ref objects describing the BOs referenced
> +	 * by this job.
> +	 */
> +	__u64 bos;
> +
> +	/**
> +	 * Arrays of drm_panfrost_syncobj_ref objects describing the input
> +	 * and output fences.
> +	 */
> +	__u64 in_syncs;
> +	__u64 out_syncs;
> +
> +	/** Syncobj reference array sizes. */
> +	__u32 in_sync_count;
> +	__u32 out_sync_count;
> +
> +	/** BO reference array size. */
> +	__u32 bo_count;
> +
> +	/** Combination of PANFROST_JD_REQ_* flags. */
> +	__u32 requirements;
> +};
> +
> +/* Used to submit multiple jobs in one call */
> +struct drm_panfrost_batch_submit {
> +	/**
> +	 * Stride of the jobs array (needed to ease extension of the
> +	 * BATCH_SUBMIT ioctl). Should be set to
> +	 * sizeof(struct drm_panfrost_job).
> +	 */
> +	__u32 job_stride;
> +
> +	/** Number of jobs to submit. */
> +	__u32 job_count;
> +
> +	/* Pointer to a job array. */
> +	__u64 jobs;
> +
> +	/**
> +	 * Stride of the BO and syncobj reference arrays (needed to ease
> +	 * extension of the BATCH_SUBMIT ioctl). Should be set to
> +	 * sizeof(struct drm_panfrost_bo_ref).
> +	 */
> +	__u32 bo_ref_stride;
> +	__u32 syncobj_ref_stride;
> +
> +	/**
> +	 * If the submission fails, this encodes the index of the job
> +	 * failed.
> +	 */
> +	__u32 fail_idx;
> +
> +	/**
> +	 * ID of the queue to submit those jobs to. 0 is the default
> +	 * submit queue and should always exists. If you need a dedicated
> +	 * queue, create it with DRM_IOCTL_PANFROST_CREATE_SUBMITQUEUE.
> +	 */
> +	__u32 queue;
> +};
>  
>  #if defined(__cplusplus)
>  }
>
Alyssa Rosenzweig July 2, 2021, 3:34 p.m. UTC | #3
> Better, but I was hoping we can mostly delete panfrost_ioctl_submit(),
> leaving something along the lines of:
> 
> static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
> 		struct drm_file *file)
> {
> 	struct panfrost_submitqueue *queue;
> 	struct drm_panfrost_submit *args = data;
> 	struct drm_panfrost_job submit_args = {
> 		.head = args->jc,
> 		.bos = args->bo_handles,
> 		.in_syncs = args->in_syncs,
> 		.out_syncs = &args->out_sync, // FIXME
> 		.in_sync_count = args->in_sync_count,
> 		.out_sync_count = args->out_sync > 0 ? 1 : 0,
> 		.bo_count = args->bo_handle_count,
> 		.requirements = args->requirements
> 	};
> 	int ret;
> 
> 	queue = panfrost_submitqueue_get(file->driver_priv, 0);
> 
> 	ret = panfrost_submit_job(dev, file, queue, &submit_args,
> 				  sizeof(u32), ...);
> 
> 	return ret;
> }
> 
> But obviously the out_sync part needs special handling as we can't just
> pass a kernel pointer in like that ;)

This, a dozen times this.
Boris Brezillon July 2, 2021, 3:38 p.m. UTC | #4
On Fri, 2 Jul 2021 11:13:16 -0400
Alyssa Rosenzweig <alyssa@collabora.com> wrote:

> ```
> > +/* Syncobj reference passed at job submission time to encode explicit
> > + * input/output fences.
> > + */
> > +struct drm_panfrost_syncobj_ref {
> > +	__u32 handle;
> > +	__u32 pad;
> > +	__u64 point;
> > +};  
> ```
> 
> What is handle? What is point?

Handle is a syncobj handle, point is the point in a syncobj timeline.
I'll document those fields.

> Why is there padding instead of putting point first?

We can move the point field first, but we need to keep the explicit
padding: the struct has to be 64bit aligned because of the __u64 field
(which the compiler takes care of) but if we don't have an explicit
padding, the unused 32bits are undefined, which might cause trouble if
we extend the struct at some point, since we sort of expect that old
userspace keep this unused 32bit slot to 0, while new users set
non-zero values if they have to.

> 
> ```
> >  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> > +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2  
> ```
> 
> This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
> trying to keep backwards compatibility, but here you're crafting a new
> interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
> the flag you'd want?

AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
didn't want to do things differently in panfrost. But if that's really
an issue, I can make it an opt-out.

> 
> ```
> > +	/**
> > +	 * Stride of the jobs array (needed to ease extension of the
> > +	 * BATCH_SUBMIT ioctl). Should be set to
> > +	 * sizeof(struct drm_panfrost_job).
> > +	 */
> > +	__u32 job_stride;  
> ...
> > +	/**
> > +	 * Stride of the BO and syncobj reference arrays (needed to ease
> > +	 * extension of the BATCH_SUBMIT ioctl). Should be set to
> > +	 * sizeof(struct drm_panfrost_bo_ref).
> > +	 */
> > +	__u32 bo_ref_stride;
> > +	__u32 syncobj_ref_stride;  
> ```
> 
> Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
> somewhat unwarranted complexity, and there is a combinatoric explosion
> here (if jobs, bo refs, and syncobj refs use 3 different versions, as
> this encoding permits... as opposed to just specifying a UABI version or
> something like that)

Sounds like a good idea. I'll add a version field and map that
to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.

> 
> ```
> > +	/**
> > +	 * If the submission fails, this encodes the index of the job
> > +	 * failed.
> > +	 */
> > +	__u32 fail_idx;  
> ```
> 
> What if multiple jobs fail?

We stop at the first failure. Note that it's not an execution failure,
but a submission failure (AKA, userspace passed wrong params, like
invalid BO or synobj handles).
Alyssa Rosenzweig July 2, 2021, 4:49 p.m. UTC | #5
> > What is handle? What is point?
> 
> Handle is a syncobj handle, point is the point in a syncobj timeline.
> I'll document those fields.

OK.

> > Why is there padding instead of putting point first?
> 
> We can move the point field first, but we need to keep the explicit
> padding: the struct has to be 64bit aligned because of the __u64 field
> (which the compiler takes care of) but if we don't have an explicit
> padding, the unused 32bits are undefined, which might cause trouble if
> we extend the struct at some point, since we sort of expect that old
> userspace keep this unused 32bit slot to 0, while new users set
> non-zero values if they have to.

Makes sense. Reordering still probably makes sense.

> > ```
> > >  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> > > +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2  
> > ```
> > 
> > This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
> > trying to keep backwards compatibility, but here you're crafting a new
> > interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
> > the flag you'd want?
> 
> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
> didn't want to do things differently in panfrost. But if that's really
> an issue, I can make it an opt-out.

I don't have strong feelings either way. I was just under the
impressions other drivers did this for b/w compat reasons which don't
apply here.

> > Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
> > somewhat unwarranted complexity, and there is a combinatoric explosion
> > here (if jobs, bo refs, and syncobj refs use 3 different versions, as
> > this encoding permits... as opposed to just specifying a UABI version or
> > something like that)
> 
> Sounds like a good idea. I'll add a version field and map that
> to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.

Cc Steven, does this make sense?

> > > +	/**
> > > +	 * If the submission fails, this encodes the index of the job
> > > +	 * failed.
> > > +	 */
> > > +	__u32 fail_idx;  
> > ```
> > 
> > What if multiple jobs fail?
> 
> We stop at the first failure. Note that it's not an execution failure,
> but a submission failure (AKA, userspace passed wrong params, like
> invalid BO or synobj handles).

I see, ok.
Boris Brezillon July 2, 2021, 5:54 p.m. UTC | #6
On Fri, 2 Jul 2021 12:49:55 -0400
Alyssa Rosenzweig <alyssa@collabora.com> wrote:

> > > Why is there padding instead of putting point first?  
> > 
> > We can move the point field first, but we need to keep the explicit
> > padding: the struct has to be 64bit aligned because of the __u64 field
> > (which the compiler takes care of) but if we don't have an explicit
> > padding, the unused 32bits are undefined, which might cause trouble if
> > we extend the struct at some point, since we sort of expect that old
> > userspace keep this unused 32bit slot to 0, while new users set
> > non-zero values if they have to.  
> 
> Makes sense. Reordering still probably makes sense.

Actually, I can't re-order if I want the new in_syncs parser to work
with the old ioctl(), which you and Steven asked me to do :-).
Boris Brezillon July 2, 2021, 6:11 p.m. UTC | #7
On Fri, 2 Jul 2021 12:49:55 -0400
Alyssa Rosenzweig <alyssa@collabora.com> wrote:

> > > ```  
> > > >  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> > > > +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2    
> > > ```
> > > 
> > > This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
> > > trying to keep backwards compatibility, but here you're crafting a new
> > > interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
> > > the flag you'd want?  
> > 
> > AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
> > didn't want to do things differently in panfrost. But if that's really
> > an issue, I can make it an opt-out.  
> 
> I don't have strong feelings either way. I was just under the
> impressions other drivers did this for b/w compat reasons which don't
> apply here.

Okay, I think I'll keep it like that unless there's a strong reason to
make no-implicit dep the default. It's safer to oversync than the skip
the synchronization, so it does feel like something the user should
explicitly enable.

> 
> > > Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
> > > somewhat unwarranted complexity, and there is a combinatoric explosion
> > > here (if jobs, bo refs, and syncobj refs use 3 different versions, as
> > > this encoding permits... as opposed to just specifying a UABI version or
> > > something like that)  
> > 
> > Sounds like a good idea. I'll add a version field and map that
> > to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.  
> 
> Cc Steven, does this make sense?

I have this approach working, and I must admit I prefer it to the
per-object stride field passed to the submit struct.
Steven Price July 5, 2021, 8:22 a.m. UTC | #8
On 02/07/2021 19:11, Boris Brezillon wrote:
> On Fri, 2 Jul 2021 12:49:55 -0400
> Alyssa Rosenzweig <alyssa@collabora.com> wrote:
> 
>>>> ```  
>>>>>  #define PANFROST_BO_REF_EXCLUSIVE	0x1
>>>>> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2    
>>>> ```
>>>>
>>>> This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
>>>> trying to keep backwards compatibility, but here you're crafting a new
>>>> interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
>>>> the flag you'd want?  
>>>
>>> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
>>> didn't want to do things differently in panfrost. But if that's really
>>> an issue, I can make it an opt-out.  
>>
>> I don't have strong feelings either way. I was just under the
>> impressions other drivers did this for b/w compat reasons which don't
>> apply here.
> 
> Okay, I think I'll keep it like that unless there's a strong reason to
> make no-implicit dep the default. It's safer to oversync than the skip
> the synchronization, so it does feel like something the user should
> explicitly enable.

I don't have strong feelings - ultimately the number of projects caring
about the uABI is so limited the "default" is pretty irrelevant (it's
not as if we are needing to guide random developers who are new to the
interface). But a conservative default seems sensible.

>>
>>>> Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
>>>> somewhat unwarranted complexity, and there is a combinatoric explosion
>>>> here (if jobs, bo refs, and syncobj refs use 3 different versions, as
>>>> this encoding permits... as opposed to just specifying a UABI version or
>>>> something like that)  
>>>
>>> Sounds like a good idea. I'll add a version field and map that
>>> to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.  
>>
>> Cc Steven, does this make sense?
> 
> I have this approach working, and I must admit I prefer it to the
> per-object stride field passed to the submit struct.
> 

There are benefits both ways:

 * Version number: easier to think about, and less worries about
combinatorial explosion of possible options to test.

 * Explicit structure sizes/strides: much harder to accidentally forgot
to change, clients 'naturally' move to newer versions just with recompiling.

For now I'd be tempted to go for the version number, but I suspect we
should also ensure there's a generic 'flags' field in there. That would
allow us to introduce new features/behaviours in a way which can be
backported more easily if necessary.

The main benefit of structure sizes/strides is if you can break binary
backwards compatibility after a few years - because source compatibility
can easily be maintained while dropping the code for the shorter/older
structs. But Linux tries to maintain binary compatibility so this isn't
so relevant.

Steve
Boris Brezillon July 5, 2021, 8:43 a.m. UTC | #9
Hi Steven,

On Mon, 5 Jul 2021 09:22:39 +0100
Steven Price <steven.price@arm.com> wrote:

> On 02/07/2021 19:11, Boris Brezillon wrote:
> > On Fri, 2 Jul 2021 12:49:55 -0400
> > Alyssa Rosenzweig <alyssa@collabora.com> wrote:
> >   
> >>>> ```    
> >>>>>  #define PANFROST_BO_REF_EXCLUSIVE	0x1
> >>>>> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2      
> >>>> ```
> >>>>
> >>>> This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
> >>>> trying to keep backwards compatibility, but here you're crafting a new
> >>>> interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
> >>>> the flag you'd want?    
> >>>
> >>> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
> >>> didn't want to do things differently in panfrost. But if that's really
> >>> an issue, I can make it an opt-out.    
> >>
> >> I don't have strong feelings either way. I was just under the
> >> impressions other drivers did this for b/w compat reasons which don't
> >> apply here.  
> > 
> > Okay, I think I'll keep it like that unless there's a strong reason to
> > make no-implicit dep the default. It's safer to oversync than the skip
> > the synchronization, so it does feel like something the user should
> > explicitly enable.  
> 
> I don't have strong feelings - ultimately the number of projects caring
> about the uABI is so limited the "default" is pretty irrelevant (it's
> not as if we are needing to guide random developers who are new to the
> interface). But a conservative default seems sensible.
> 
> >>  
> >>>> Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
> >>>> somewhat unwarranted complexity, and there is a combinatoric explosion
> >>>> here (if jobs, bo refs, and syncobj refs use 3 different versions, as
> >>>> this encoding permits... as opposed to just specifying a UABI version or
> >>>> something like that)    
> >>>
> >>> Sounds like a good idea. I'll add a version field and map that
> >>> to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.    
> >>
> >> Cc Steven, does this make sense?  
> > 
> > I have this approach working, and I must admit I prefer it to the
> > per-object stride field passed to the submit struct.
> >   
> 
> There are benefits both ways:
> 
>  * Version number: easier to think about, and less worries about
> combinatorial explosion of possible options to test.
> 
>  * Explicit structure sizes/strides: much harder to accidentally forgot
> to change, clients 'naturally' move to newer versions just with recompiling.

The version I just sent has a PANFROST_SUBMIT_BATCH_VERSION macro
defined in the the uAPI header, so getting right without changing the
code should be fine (same has with the sizeof(struct xx)) trick with
the per-desc stride approach).

> 
> For now I'd be tempted to go for the version number, but I suspect we
> should also ensure there's a generic 'flags' field in there. That would
> allow us to introduce new features/behaviours in a way which can be
> backported more easily if necessary.

Adding features at the submit level without changing the version number
is already possible (we can extend drm_panfrost_batch_submit without
breaking the uABI), but I'm not sure that's a good idea...

If you mean adding a flags field at the job level, I can add it, but I
wonder what you have in mind when you say some features might be
interesting to backport. I really thought we'd force people to update
their kernel when they want those new features.

Regards,

Boris
Steven Price July 5, 2021, 8:51 a.m. UTC | #10
On 05/07/2021 09:43, Boris Brezillon wrote:
> Hi Steven,
> 
> On Mon, 5 Jul 2021 09:22:39 +0100
> Steven Price <steven.price@arm.com> wrote:
> 
>> On 02/07/2021 19:11, Boris Brezillon wrote:
>>> On Fri, 2 Jul 2021 12:49:55 -0400
>>> Alyssa Rosenzweig <alyssa@collabora.com> wrote:
>>>   
>>>>>> ```    
>>>>>>>  #define PANFROST_BO_REF_EXCLUSIVE	0x1
>>>>>>> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2      
>>>>>> ```
>>>>>>
>>>>>> This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
>>>>>> trying to keep backwards compatibility, but here you're crafting a new
>>>>>> interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
>>>>>> the flag you'd want?    
>>>>>
>>>>> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
>>>>> didn't want to do things differently in panfrost. But if that's really
>>>>> an issue, I can make it an opt-out.    
>>>>
>>>> I don't have strong feelings either way. I was just under the
>>>> impressions other drivers did this for b/w compat reasons which don't
>>>> apply here.  
>>>
>>> Okay, I think I'll keep it like that unless there's a strong reason to
>>> make no-implicit dep the default. It's safer to oversync than the skip
>>> the synchronization, so it does feel like something the user should
>>> explicitly enable.  
>>
>> I don't have strong feelings - ultimately the number of projects caring
>> about the uABI is so limited the "default" is pretty irrelevant (it's
>> not as if we are needing to guide random developers who are new to the
>> interface). But a conservative default seems sensible.
>>
>>>>  
>>>>>> Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
>>>>>> somewhat unwarranted complexity, and there is a combinatoric explosion
>>>>>> here (if jobs, bo refs, and syncobj refs use 3 different versions, as
>>>>>> this encoding permits... as opposed to just specifying a UABI version or
>>>>>> something like that)    
>>>>>
>>>>> Sounds like a good idea. I'll add a version field and map that
>>>>> to a <job_stride,bo_ref_stride,syncobj_ref_stride> tuple.    
>>>>
>>>> Cc Steven, does this make sense?  
>>>
>>> I have this approach working, and I must admit I prefer it to the
>>> per-object stride field passed to the submit struct.
>>>   
>>
>> There are benefits both ways:
>>
>>  * Version number: easier to think about, and less worries about
>> combinatorial explosion of possible options to test.
>>
>>  * Explicit structure sizes/strides: much harder to accidentally forgot
>> to change, clients 'naturally' move to newer versions just with recompiling.
> 
> The version I just sent has a PANFROST_SUBMIT_BATCH_VERSION macro
> defined in the the uAPI header, so getting right without changing the
> code should be fine (same has with the sizeof(struct xx)) trick with
> the per-desc stride approach).
> 
>>
>> For now I'd be tempted to go for the version number, but I suspect we
>> should also ensure there's a generic 'flags' field in there. That would
>> allow us to introduce new features/behaviours in a way which can be
>> backported more easily if necessary.
> 
> Adding features at the submit level without changing the version number
> is already possible (we can extend drm_panfrost_batch_submit without
> breaking the uABI), but I'm not sure that's a good idea...

Ah, yes I'd forgotten the ioctl itself already had the implicit
sizeof(struct) encoding. I guess there's no need for flags (now) because
it can be added later if it even becomes useful.

> If you mean adding a flags field at the job level, I can add it, but I
> wonder what you have in mind when you say some features might be
> interesting to backport. I really thought we'd force people to update
> their kernel when they want those new features.

My concern is if we ever find a security bug which requires new
information/behaviour in the submit ABI to properly fix. In this case it
would be appropriate to backport a 'feature' (bug fix) which provides a
new ABI but it would need to be a small change. A flags field where we
can set a "PANFROST_ACTUALLY_BE_SECURE" bit would be useful then - but
we wouldn't want to start bumping version numbers in the backport.

But at least for now we could just assume we'll expand the ioctl struct
if we ever hit that situation, so no need for an explicit flags field.

Steve
Alyssa Rosenzweig July 6, 2021, 12:48 p.m. UTC | #11
> My concern is if we ever find a security bug which requires new
> information/behaviour in the submit ABI to properly fix. In this case it
> would be appropriate to backport a 'feature' (bug fix) which provides a
> new ABI but it would need to be a small change. A flags field where we
> can set a "PANFROST_ACTUALLY_BE_SECURE" bit would be useful then - but
> we wouldn't want to start bumping version numbers in the backport.
> 
> But at least for now we could just assume we'll expand the ioctl struct
> if we ever hit that situation, so no need for an explicit flags field.

I'm curious if kbase ever hit something like this? It wouldn't have
occurred to me as a possibility.
Steven Price July 7, 2021, 9:09 a.m. UTC | #12
On 06/07/2021 13:48, Alyssa Rosenzweig wrote:
>> My concern is if we ever find a security bug which requires new
>> information/behaviour in the submit ABI to properly fix. In this case it
>> would be appropriate to backport a 'feature' (bug fix) which provides a
>> new ABI but it would need to be a small change. A flags field where we
>> can set a "PANFROST_ACTUALLY_BE_SECURE" bit would be useful then - but
>> we wouldn't want to start bumping version numbers in the backport.
>>
>> But at least for now we could just assume we'll expand the ioctl struct
>> if we ever hit that situation, so no need for an explicit flags field.
> 
> I'm curious if kbase ever hit something like this? It wouldn't have
> occurred to me as a possibility.
> 

kbase (at least historically) didn't care about backwards compatibility
- so has tended to just break the ABI if necessary.

We have had workarounds such as BASE_HW_ISSUE_8987 (with the lovely
named DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE flag) where the isolation
between address spaces was broken. It might have been reasonable in that
situation to have exposed a new flag which allows security sensitive
applications (e.g. the on-screen keyboard) to force the more secure mode
of operation (taking the performance hit) while not penalising other
applications.

But it's probably just my paranoia ;) All the serious security bugs I
can think of were genuine software bugs and could just be fixed.

Steve
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 6529e5972b47..e2897de6e77d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -138,111 +138,95 @@  panfrost_get_job_mappings(struct drm_file *file_priv, struct panfrost_job *job)
 	return 0;
 }
 
-/**
- * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @args: IOCTL args
- * @job: job being set up
- *
- * Resolve handles from userspace to BOs and attach them to job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at panfrost_job_cleanup() time.
- */
+#define PANFROST_BO_REF_ALLOWED_FLAGS \
+	(PANFROST_BO_REF_EXCLUSIVE | PANFROST_BO_REF_NO_IMPLICIT_DEP)
+
 static int
-panfrost_lookup_bos(struct drm_device *dev,
-		  struct drm_file *file_priv,
-		  struct drm_panfrost_submit *args,
-		  struct panfrost_job *job)
+panfrost_get_job_bos(struct drm_file *file_priv,
+		     u64 refs, u32 ref_stride, u32 count,
+		     struct panfrost_job *job)
 {
+	void __user *in = u64_to_user_ptr(refs);
 	unsigned int i;
-	int ret;
 
-	job->bo_count = args->bo_handle_count;
+	job->bo_count = count;
 
-	if (!job->bo_count)
+	if (!count)
 		return 0;
 
+	job->bos = kvmalloc_array(job->bo_count, sizeof(*job->bos),
+				  GFP_KERNEL | __GFP_ZERO);
 	job->bo_flags = kvmalloc_array(job->bo_count,
 				       sizeof(*job->bo_flags),
 				       GFP_KERNEL | __GFP_ZERO);
-	if (!job->bo_flags)
+	if (!job->bos || !job->bo_flags)
 		return -ENOMEM;
 
-	for (i = 0; i < job->bo_count; i++)
-		job->bo_flags[i] = PANFROST_BO_REF_EXCLUSIVE;
+	for (i = 0; i < count; i++) {
+		struct drm_panfrost_bo_ref ref = { };
+		int ret;
 
-	ret = drm_gem_objects_lookup(file_priv,
-				     (void __user *)(uintptr_t)args->bo_handles,
-				     job->bo_count, &job->bos);
-	if (ret)
-		return ret;
+		ret = copy_struct_from_user(&ref, sizeof(ref),
+					    in + (i * ref_stride),
+					    ref_stride);
+		if (ret)
+			return ret;
 
-	return panfrost_get_job_mappings(file_priv, job);
+		/* Prior to the BATCH_SUBMIT ioctl all accessed BOs were
+		 * treated as exclusive.
+		 */
+		if (ref_stride == sizeof(u32))
+			ref.flags = PANFROST_BO_REF_EXCLUSIVE;
+
+		if ((ref.flags & ~PANFROST_BO_REF_ALLOWED_FLAGS))
+			return -EINVAL;
+
+		job->bos[i] = drm_gem_object_lookup(file_priv, ref.handle);
+		if (!job->bos[i])
+			return -EINVAL;
+
+		job->bo_flags[i] = ref.flags;
+	}
+
+	return 0;
 }
 
-/**
- * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @args: IOCTL args
- * @job: job being set up
- *
- * Resolve syncobjs from userspace to fences and attach them to job.
- *
- * Note that this function doesn't need to unreference the fences on
- * failure, because that will happen at panfrost_job_cleanup() time.
- */
 static int
-panfrost_copy_in_sync(struct drm_device *dev,
-		  struct drm_file *file_priv,
-		  struct drm_panfrost_submit *args,
-		  struct panfrost_job *job)
+panfrost_get_job_in_syncs(struct drm_file *file_priv,
+			  u64 refs, u32 ref_stride,
+			  u32 count, struct panfrost_job *job)
 {
-	u32 *handles;
-	int ret = 0;
-	int i, in_fence_count;
+	const void __user *in = u64_to_user_ptr(refs);
+	unsigned int i;
+	int ret;
 
-	in_fence_count = args->in_sync_count;
-
-	if (!in_fence_count)
+	if (!count)
 		return 0;
 
-	handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
-	if (!handles) {
-		ret = -ENOMEM;
-		DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
-		goto fail;
-	}
-
-	if (copy_from_user(handles,
-			   (void __user *)(uintptr_t)args->in_syncs,
-			   in_fence_count * sizeof(u32))) {
-		ret = -EFAULT;
-		DRM_DEBUG("Failed to copy in syncobj handles\n");
-		goto fail;
-	}
-
-	for (i = 0; i < in_fence_count; i++) {
+	for (i = 0; i < count; i++) {
+		struct drm_panfrost_syncobj_ref ref = { };
 		struct dma_fence *fence;
 
-		ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
-					     &fence);
+		ret = copy_struct_from_user(&ref, sizeof(ref),
+					    in + (i * ref_stride),
+					    ref_stride);
 		if (ret)
-			goto fail;
+			return ret;
+
+		if (ref.pad)
+			return -EINVAL;
+
+		ret = drm_syncobj_find_fence(file_priv, ref.handle, ref.point,
+					     0, &fence);
+		if (ret)
+			return ret;
 
 		ret = drm_gem_fence_array_add(&job->deps, fence);
-
 		if (ret)
-			goto fail;
+			return ret;
 	}
 
-fail:
-	kvfree(handles);
-	return ret;
+	return 0;
 }
 
 static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
@@ -289,11 +273,17 @@  static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
 	job->file_priv = file->driver_priv;
 
-	ret = panfrost_copy_in_sync(dev, file, args, job);
+	ret = panfrost_get_job_in_syncs(file, args->in_syncs, sizeof(u32),
+					args->in_sync_count, job);
 	if (ret)
 		goto fail_job;
 
-	ret = panfrost_lookup_bos(dev, file, args, job);
+	ret = panfrost_get_job_bos(file, args->bo_handles, sizeof(u32),
+				   args->bo_handle_count, job);
+	if (ret)
+		goto fail_job;
+
+	ret = panfrost_get_job_mappings(file, job);
 	if (ret)
 		goto fail_job;
 
@@ -491,6 +481,225 @@  panfrost_ioctl_destroy_submitqueue(struct drm_device *dev, void *data,
 	return panfrost_submitqueue_destroy(priv, id);
 }
 
+struct panfrost_job_out_sync {
+	struct drm_syncobj *syncobj;
+	struct dma_fence_chain *chain;
+	u64 point;
+};
+
+static void
+panfrost_put_job_out_syncs(struct panfrost_job_out_sync *out_syncs, u32 count)
+{
+	unsigned int i;
+
+	for (i = 0; i < count; i++) {
+		if (!out_syncs[i].syncobj)
+			break;
+
+		drm_syncobj_put(out_syncs[i].syncobj);
+		kvfree(out_syncs[i].chain);
+	}
+
+	kvfree(out_syncs);
+}
+
+static struct panfrost_job_out_sync *
+panfrost_get_job_out_syncs(struct drm_file *file_priv,
+			   u64 refs, u32 ref_stride,
+			   u32 count)
+{
+	void __user *in = u64_to_user_ptr(refs);
+	struct panfrost_job_out_sync *out_syncs;
+	unsigned int i;
+	int ret;
+
+	if (!count)
+		return NULL;
+
+	out_syncs = kvmalloc_array(count, sizeof(*out_syncs),
+				   GFP_KERNEL | __GFP_ZERO);
+	if (!out_syncs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		struct drm_panfrost_syncobj_ref ref = { };
+
+		ret = copy_struct_from_user(&ref, sizeof(ref),
+					    in + (i * ref_stride),
+					    ref_stride);
+		if (ret)
+			goto err_free_out_syncs;
+
+		if (ref.pad) {
+			ret = -EINVAL;
+			goto err_free_out_syncs;
+		}
+
+		out_syncs[i].syncobj = drm_syncobj_find(file_priv, ref.handle);
+		if (!out_syncs[i].syncobj) {
+			ret = -EINVAL;
+			goto err_free_out_syncs;
+		}
+
+		out_syncs[i].point = ref.point;
+		if (!out_syncs[i].point)
+			continue;
+
+		out_syncs[i].chain = kmalloc(sizeof(*out_syncs[i].chain),
+					     GFP_KERNEL);
+		if (!out_syncs[i].chain) {
+			ret = -ENOMEM;
+			goto err_free_out_syncs;
+		}
+	}
+
+	return out_syncs;
+
+err_free_out_syncs:
+	panfrost_put_job_out_syncs(out_syncs, count);
+	return ERR_PTR(ret);
+}
+
+static void
+panfrost_set_job_out_fence(struct panfrost_job_out_sync *out_syncs,
+			   unsigned int count, struct dma_fence *fence)
+{
+	unsigned int i;
+
+	for (i = 0; i < count; i++) {
+		if (out_syncs[i].chain) {
+			drm_syncobj_add_point(out_syncs[i].syncobj,
+					      out_syncs[i].chain,
+					      fence, out_syncs[i].point);
+			out_syncs[i].chain = NULL;
+		} else {
+			drm_syncobj_replace_fence(out_syncs[i].syncobj,
+						  fence);
+		}
+	}
+}
+
+#define PANFROST_JD_ALLOWED_REQS PANFROST_JD_REQ_FS
+
+static int
+panfrost_submit_job(struct drm_device *dev, struct drm_file *file_priv,
+		    struct panfrost_submitqueue *queue,
+		    const struct drm_panfrost_job *args,
+		    u32 bo_stride, u32 syncobj_stride)
+{
+	struct panfrost_device *pfdev = dev->dev_private;
+	struct panfrost_job_out_sync *out_syncs;
+	struct panfrost_job *job;
+	int ret;
+
+	if (!args->head)
+		return -EINVAL;
+
+	if (args->requirements & ~PANFROST_JD_ALLOWED_REQS)
+		return -EINVAL;
+
+	job = kzalloc(sizeof(*job), GFP_KERNEL);
+	if (!job)
+		return -ENOMEM;
+
+	kref_init(&job->refcount);
+
+	job->pfdev = pfdev;
+	job->jc = args->head;
+	job->requirements = args->requirements;
+	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+	job->file_priv = file_priv->driver_priv;
+	xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
+
+	ret = panfrost_get_job_in_syncs(file_priv,
+					args->in_syncs,
+					syncobj_stride,
+					args->in_sync_count,
+					job);
+	if (ret)
+		goto err_put_job;
+
+	out_syncs = panfrost_get_job_out_syncs(file_priv,
+					       args->out_syncs,
+					       syncobj_stride,
+					       args->out_sync_count);
+	if (IS_ERR(out_syncs)) {
+		ret = PTR_ERR(out_syncs);
+		goto err_put_job;
+	}
+
+	ret = panfrost_get_job_bos(file_priv, args->bos, bo_stride,
+				   args->bo_count, job);
+	if (ret)
+		goto err_put_job;
+
+	ret = panfrost_get_job_mappings(file_priv, job);
+	if (ret)
+		goto err_put_job;
+
+	ret = panfrost_job_push(queue, job);
+	if (ret) {
+		panfrost_put_job_out_syncs(out_syncs, args->out_sync_count);
+		goto err_put_job;
+	}
+
+	panfrost_set_job_out_fence(out_syncs, args->out_sync_count,
+				   job->render_done_fence);
+	panfrost_put_job_out_syncs(out_syncs, args->out_sync_count);
+	return 0;
+
+err_put_job:
+	panfrost_job_put(job);
+	return ret;
+}
+
+static int
+panfrost_ioctl_batch_submit(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_panfrost_batch_submit *args = data;
+	void __user *jobs_args = u64_to_user_ptr(args->jobs);
+	struct panfrost_submitqueue *queue;
+	unsigned int i;
+	int ret;
+
+	/* Relax this test if new fields are added to
+	 * drm_panfrost_{bo_ref,syncobj_ref,job}.
+	 */
+	if (args->job_stride < sizeof(struct drm_panfrost_job) ||
+	    args->bo_ref_stride < sizeof(struct drm_panfrost_bo_ref) ||
+	    args->syncobj_ref_stride < sizeof(struct drm_panfrost_syncobj_ref))
+		return -EINVAL;
+
+	queue = panfrost_submitqueue_get(file_priv->driver_priv, args->queue);
+	if (IS_ERR(queue))
+		return PTR_ERR(queue);
+
+	for (i = 0; i < args->job_count; i++) {
+		struct drm_panfrost_job job_args = { };
+
+		ret = copy_struct_from_user(&job_args, sizeof(job_args),
+					    jobs_args + (i * args->job_stride),
+					    args->job_stride);
+		if (ret) {
+			args->fail_idx = i;
+			goto out_put_queue;
+		}
+
+		ret = panfrost_submit_job(dev, file_priv, queue, &job_args,
+					  args->bo_ref_stride,
+					  args->syncobj_ref_stride);
+		if (ret) {
+			args->fail_idx = i;
+			goto out_put_queue;
+		}
+	}
+
+out_put_queue:
+	panfrost_submitqueue_put(queue);
+	return 0;
+}
+
 int panfrost_unstable_ioctl_check(void)
 {
 	if (!unstable_ioctls)
@@ -570,6 +779,7 @@  static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
 	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(CREATE_SUBMITQUEUE, create_submitqueue, DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(DESTROY_SUBMITQUEUE, destroy_submitqueue, DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(BATCH_SUBMIT,	batch_submit,	DRM_RENDER_ALLOW),
 };
 
 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 56ae89272e19..4e1540bce865 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -254,6 +254,9 @@  static int panfrost_acquire_object_fences(struct panfrost_job *job)
 				return ret;
 		}
 
+		if (job->bo_flags[i] & PANFROST_BO_REF_NO_IMPLICIT_DEP)
+			continue;
+
 		ret = drm_gem_fence_array_add_implicit(&job->deps, job->bos[i],
 						       exclusive);
 		if (ret)
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 7ee02fd1ac75..ce0c1b96a58c 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -23,6 +23,7 @@  extern "C" {
 #define DRM_PANFROST_MADVISE			0x08
 #define DRM_PANFROST_CREATE_SUBMITQUEUE		0x09
 #define DRM_PANFROST_DESTROY_SUBMITQUEUE	0x0a
+#define DRM_PANFROST_BATCH_SUBMIT		0x0b
 
 #define DRM_IOCTL_PANFROST_SUBMIT		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
 #define DRM_IOCTL_PANFROST_WAIT_BO		DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -33,6 +34,7 @@  extern "C" {
 #define DRM_IOCTL_PANFROST_MADVISE		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
 #define DRM_IOCTL_PANFROST_CREATE_SUBMITQUEUE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_SUBMITQUEUE, struct drm_panfrost_create_submitqueue)
 #define DRM_IOCTL_PANFROST_DESTROY_SUBMITQUEUE	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_DESTROY_SUBMITQUEUE, __u32)
+#define DRM_IOCTL_PANFROST_BATCH_SUBMIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_BATCH_SUBMIT, struct drm_panfrost_batch_submit)
 
 /*
  * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -241,7 +243,89 @@  struct drm_panfrost_create_submitqueue {
 	__u32 id;	/* out, identifier */
 };
 
+/* Syncobj reference passed at job submission time to encode explicit
+ * input/output fences.
+ */
+struct drm_panfrost_syncobj_ref {
+	__u32 handle;
+	__u32 pad;
+	__u64 point;
+};
+
 #define PANFROST_BO_REF_EXCLUSIVE	0x1
+#define PANFROST_BO_REF_NO_IMPLICIT_DEP	0x2
+
+/* Describes a BO referenced by a job and the type of access. */
+struct drm_panfrost_bo_ref {
+	__u32 handle;
+	__u32 flags;
+};
+
+/* Describes a GPU job and the resources attached to it. */
+struct drm_panfrost_job {
+	/** GPU pointer to the head of the job chain. */
+	__u64 head;
+
+	/**
+	 * Array of drm_panfrost_bo_ref objects describing the BOs referenced
+	 * by this job.
+	 */
+	__u64 bos;
+
+	/**
+	 * Arrays of drm_panfrost_syncobj_ref objects describing the input
+	 * and output fences.
+	 */
+	__u64 in_syncs;
+	__u64 out_syncs;
+
+	/** Syncobj reference array sizes. */
+	__u32 in_sync_count;
+	__u32 out_sync_count;
+
+	/** BO reference array size. */
+	__u32 bo_count;
+
+	/** Combination of PANFROST_JD_REQ_* flags. */
+	__u32 requirements;
+};
+
+/* Used to submit multiple jobs in one call */
+struct drm_panfrost_batch_submit {
+	/**
+	 * Stride of the jobs array (needed to ease extension of the
+	 * BATCH_SUBMIT ioctl). Should be set to
+	 * sizeof(struct drm_panfrost_job).
+	 */
+	__u32 job_stride;
+
+	/** Number of jobs to submit. */
+	__u32 job_count;
+
+	/* Pointer to a job array. */
+	__u64 jobs;
+
+	/**
+	 * Stride of the BO and syncobj reference arrays (needed to ease
+	 * extension of the BATCH_SUBMIT ioctl). Should be set to
+	 * sizeof(struct drm_panfrost_bo_ref).
+	 */
+	__u32 bo_ref_stride;
+	__u32 syncobj_ref_stride;
+
+	/**
+	 * If the submission fails, this encodes the index of the job
+	 * failed.
+	 */
+	__u32 fail_idx;
+
+	/**
+	 * ID of the queue to submit those jobs to. 0 is the default
+	 * submit queue and should always exists. If you need a dedicated
+	 * queue, create it with DRM_IOCTL_PANFROST_CREATE_SUBMITQUEUE.
+	 */
+	__u32 queue;
+};
 
 #if defined(__cplusplus)
 }