diff mbox

[09/13] drm/msm: Add drawqueues

Message ID 1494275709-25782-10-git-send-email-jcrouse@codeaurora.org (mailing list archive)
State New, archived
Headers show

Commit Message

Jordan Crouse May 8, 2017, 8:35 p.m. UTC
Currently the priority and other behavior of a command stream
is provided by the user application during submission and
the application is expected to internally maintain the settings
for each 'context' or 'rendering queue' and specify the correct
ones.

This works okay for simple cases but as applications become more
complex we might want to start doing permissions checks for some
operations. For example we may want to restrict access to the
highest priority ringbuffer only to processes identified as the
master (usually the compositor). This would be easiest if we did
this once and then reused the settings for each submission.

Add kernel-side draw queues to be analogous to 'contexts' or
'rendering queues' on the application side. Each file descriptor
instance will maintain its own list of draw queues indexed starting
at 1. Draw queues cannot be shared between file descriptors.

For backwards compatibility context id '0' is defined as a default
context specifying middle priority and no special flags. This is
intended to be the usual configuration for 99% of applications so
that a garden variety application can function correctly without
creating a draw queue. Only those applications intending
to use different priority levels or submission flags need to
create and use a "new" draw queue.

Missing from this patch is the 'queue' part of a 'drawqueue'. The
code isn't quite to the point where we need individual render queues
for a file descriptor instance but if we did this is where it would
go so there is no harm in laying down the basis for the future.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c        | 99 +++++++++++++++++++++++++++++++++---
 drivers/gpu/drm/msm/msm_drv.h        |  8 ++-
 drivers/gpu/drm/msm/msm_gem_submit.c | 32 ++++++++++++
 drivers/gpu/drm/msm/msm_gpu.h        |  7 +++
 include/uapi/drm/msm_drm.h           | 20 ++++++++
 5 files changed, 155 insertions(+), 11 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 005ae06..3e97123 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -509,22 +509,51 @@  static void load_gpu(struct drm_device *dev)
 	mutex_unlock(&init_lock);
 }
 
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static int context_init(struct drm_file *file)
 {
 	struct msm_file_private *ctx;
 
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ctx->drawqueue_list);
+	rwlock_init(&ctx->lock);
+
+	ctx->drawqueueid = 1;
+
+	file->driver_priv = ctx;
+
+	return 0;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
 	/* For now, load gpu on open.. to avoid the requirement of having
 	 * firmware in the initrd.
 	 */
 	load_gpu(dev);
 
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	return context_init(file);
+}
+
+static void context_close(struct msm_file_private *ctx)
+{
+	struct msm_gpu_drawqueue *entry, *tmp;
+
 	if (!ctx)
-		return -ENOMEM;
+		return;
 
-	file->driver_priv = ctx;
+	/*
+	 * A lock isn't needed here because we are in postclose and we
+	 * don't have to worry about any ioctls coming along
+	 */
+	list_for_each_entry_safe(entry, tmp, &ctx->drawqueue_list, node) {
+		list_del(&entry->node);
+		kfree(entry);
+	}
 
-	return 0;
+	kfree(ctx);
 }
 
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -537,7 +566,7 @@  static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 		priv->lastctx = NULL;
 	mutex_unlock(&dev->struct_mutex);
 
-	kfree(ctx);
+	context_close(ctx);
 }
 
 static void msm_lastclose(struct drm_device *dev)
@@ -782,6 +811,62 @@  static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
 	return ret;
 }
 
+static int msm_ioctl_drawqueue_new(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_drawqueue *args = data;
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_gpu_drawqueue *drawqueue;
+
+	if (args->flags & ~MSM_DRAWQUEUE_FLAGS)
+		return -EINVAL;
+
+	drawqueue = kzalloc(sizeof(*drawqueue), GFP_KERNEL);
+	if (!drawqueue)
+		return -ENOMEM;
+
+	drawqueue->flags = args->flags;
+	drawqueue->prio = args->prio;
+
+	write_lock(&ctx->lock);
+
+	/* per-instance IDs are allocated starting at 1 */
+	drawqueue->id = ctx->drawqueueid++;
+	list_add_tail(&drawqueue->node, &ctx->drawqueue_list);
+	args->id = drawqueue->id;
+
+	write_unlock(&ctx->lock);
+
+	return 0;
+}
+
+static int msm_ioctl_drawqueue_close(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_msm_drawqueue *args = data;
+	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_gpu_drawqueue *entry;
+	int ret = -EINVAL;
+
+	if (!args->id)
+		return -EINVAL;
+
+	write_lock(&ctx->lock);
+
+	list_for_each_entry(entry, &ctx->drawqueue_list, node) {
+		if (entry->id == args->id) {
+			list_del(&entry->node);
+			kfree(entry);
+			ret = 0;
+			break;
+		}
+	}
+
+	write_unlock(&ctx->lock);
+
+	return ret;
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -791,6 +876,8 @@  static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
 	DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_DRAWQUEUE_NEW,   msm_ioctl_drawqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(MSM_DRAWQUEUE_CLOSE, msm_ioctl_drawqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 69e839c..192147c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -58,11 +58,9 @@ 
 #define NUM_DOMAINS 2    /* one for KMS, then one per gpu core (?) */
 
 struct msm_file_private {
-	/* currently we don't do anything useful with this.. but when
-	 * per-context address spaces are supported we'd keep track of
-	 * the context's page-tables here.
-	 */
-	int dummy;
+	rwlock_t lock;
+	struct list_head drawqueue_list;
+	int drawqueueid;
 };
 
 enum msm_mdp_plane_property {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 4c2525e..0129ca2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -378,6 +378,33 @@  static void submit_cleanup(struct msm_gem_submit *submit)
 	ww_acquire_fini(&submit->ticket);
 }
 
+static int drawqueue_get_info(struct msm_file_private *ctx,
+		u32 id, u32 *prio)
+{
+	struct msm_gpu_drawqueue *entry;
+	int ret = -EINVAL;
+
+	if (!id) {
+		*prio = 0;
+		return 0;
+	}
+
+	read_lock(&ctx->lock);
+
+	list_for_each_entry(entry, &ctx->drawqueue_list, node) {
+		if (entry->id != id)
+			continue;
+
+		*prio = entry->prio;
+
+		ret = 0;
+		break;
+	}
+
+	read_unlock(&ctx->lock);
+	return ret;
+}
+
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
@@ -390,6 +417,7 @@  int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	struct sync_file *sync_file = NULL;
 	int out_fence_fd = -1;
 	unsigned i;
+	u32 prio = 0;
 	int ret;
 
 	if (!gpu)
@@ -404,6 +432,10 @@  int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 	if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
 		return -EINVAL;
 
+	ret = drawqueue_get_info(ctx, args->drawid, &prio);
+	if (ret)
+		return ret;
+
 	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
 		in_fence = sync_file_get_fence(args->fence_fd);
 
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index df4e277..ca07a21 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -132,6 +132,13 @@  struct msm_gpu {
 	struct list_head submit_list;
 };
 
+struct msm_gpu_drawqueue {
+	int id;
+	u32 flags;
+	u32 prio;
+	struct list_head node;
+};
+
 static inline bool msm_gpu_active(struct msm_gpu *gpu)
 {
 	return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 26c54f6..cad0133 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -218,6 +218,7 @@  struct drm_msm_gem_submit {
 	__u64 __user bos;     /* in, ptr to array of submit_bo's */
 	__u64 __user cmds;    /* in, ptr to array of submit_cmd's */
 	__s32 fence_fd;       /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
+	__u32 drawid;         /* in, drawqueue id */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -254,6 +255,21 @@  struct drm_msm_gem_madvise {
 	__u32 retained;       /* out, whether backing store still exists */
 };
 
+/*
+ * Draw queues allow the user to set specific submission parameter. Command
+ * submissions specify a specific drawqueue to use.  ID 0 is reserved for
+ * backwards compatibility as a "default" drawqueue with medium priority and no
+ * special flags
+ */
+
+#define MSM_DRAWQUEUE_FLAGS (0)
+
+struct drm_msm_drawqueue {
+	__u32 flags;   /* in, MSM_DRAWQUEUE_x */
+	__u32 prio;    /* in, Priority level */
+	__u32 id;      /* out, identifier */
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -265,6 +281,8 @@  struct drm_msm_gem_madvise {
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
 #define DRM_MSM_GEM_MADVISE            0x08
+#define DRM_MSM_DRAWQUEUE_NEW          0x09
+#define DRM_MSM_DRAWQUEUE_CLOSE        0x0A
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -274,6 +292,8 @@  struct drm_msm_gem_madvise {
 #define DRM_IOCTL_MSM_GEM_SUBMIT       DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
 #define DRM_IOCTL_MSM_WAIT_FENCE       DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
 #define DRM_IOCTL_MSM_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+#define DRM_IOCTL_MSM_DRAWQUEUE_NEW    DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_DRAWQUEUE_NEW, struct drm_msm_drawqueue)
+#define DRM_IOCTL_MSM_DRAWQUEUE_CLOSE  DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_DRAWQUEUE_CLOSE, struct drm_msm_drawqueue)
 
 #if defined(__cplusplus)
 }