@@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (!submit)
return ERR_PTR(-ENOMEM);
- ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) {
kfree(submit);
return ERR_PTR(ret);
@@ -290,6 +290,19 @@ struct msm_file_private {
struct msm_gem_address_space *aspace;
struct kref ref;
int seqno;
+
+ /**
+ * entities:
+ *
+ * Table of per-priority-level sched entities used by submitqueues
+ * associated with this &drm_file. Because some userspace apps
+ * make assumptions about rendering from multiple gl contexts
+ * (of the same priority) within the process happening in FIFO
+ * order without requiring any fencing beyond MakeCurrent(), we
+ * create at most one &drm_sched_entity per-process per-priority-
+ * level.
+ */
+ struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
};
/**
@@ -370,7 +383,7 @@ struct msm_gpu_submitqueue {
struct idr fence_idr;
struct mutex lock;
struct kref ref;
- struct drm_sched_entity entity;
+ struct drm_sched_entity *entity;
};
struct msm_gpu_state_bo {
@@ -471,14 +484,7 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
-static inline void __msm_file_private_destroy(struct kref *kref)
-{
- struct msm_file_private *ctx = container_of(kref,
- struct msm_file_private, ref);
-
- msm_gem_address_space_put(ctx->aspace);
- kfree(ctx);
-}
+void __msm_file_private_destroy(struct kref *kref);
static inline void msm_file_private_put(struct msm_file_private *ctx)
{
@@ -7,6 +7,24 @@
#include "msm_gpu.h"
+void __msm_file_private_destroy(struct kref *kref)
+{
+ struct msm_file_private *ctx = container_of(kref,
+ struct msm_file_private, ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+ if (!ctx->entities[i])
+ continue;
+
+ drm_sched_entity_destroy(ctx->entities[i]);
+ kfree(ctx->entities[i]);
+ }
+
+ msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx);
+}
+
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
@@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref)
idr_destroy(&queue->fence_idr);
- drm_sched_entity_destroy(&queue->entity);
-
msm_file_private_put(queue->ctx);
kfree(queue);
@@ -61,13 +77,47 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
}
}
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+ unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+ static DEFINE_MUTEX(entity_lock);
+ unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+ /* We should have already validated that the requested priority is
+ * valid by the time we get here.
+ */
+ if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&entity_lock);
+
+ if (!ctx->entities[idx]) {
+ struct drm_sched_entity *entity;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int ret;
+
+ entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+ ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+ if (ret) {
+ kfree(entity);
+ return ERR_PTR(ret);
+ }
+
+ ctx->entities[idx] = entity;
+ }
+
+ mutex_unlock(&entity_lock);
+
+ return ctx->entities[idx];
+}
+
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
- struct msm_ringbuffer *ring;
- struct drm_gpu_scheduler *sched;
enum drm_sched_priority sched_prio;
unsigned ring_nr;
int ret;
@@ -91,12 +141,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
queue->ring_nr = ring_nr;
- ring = priv->gpu->rb[ring_nr];
- sched = &ring->sched;
-
- ret = drm_sched_entity_init(&queue->entity,
- sched_prio, &sched, 1, NULL);
- if (ret) {
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ if (IS_ERR(queue->entity)) {
+ ret = PTR_ERR(queue->entity);
kfree(queue);
return ret;
}