@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring)
continue;
- r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, &ctx->guilty);
+ r = drm_sched_entity_init(&ctx->rings[i].entity,
+ &rq, 1, &ctx->guilty);
if (r)
goto failed;
}
@@ -140,8 +140,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
goto error_entity;
@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
ring = &adev->uvd.inst[j].ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
+ 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r;
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -2562,8 +2562,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, NULL);
+ r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
if (r)
return r;
@@ -429,8 +429,8 @@ static int uvd_v6_0_sw_init(void *handle)
struct drm_sched_rq *rq;
ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
+ &rq, 1, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;
@@ -431,8 +431,8 @@ static int uvd_v7_0_sw_init(void *handle)
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.inst[j].ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
- rq, NULL);
+ r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
+ &rq, 1, NULL);
if (r) {
DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
return r;
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
+ struct drm_sched_rq *rq;
if (gpu) {
- drm_sched_entity_init(&gpu->sched,
- &ctx->sched_entity[i],
- &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&ctx->sched_entity[i],
+ &rq, 1, NULL);
}
}
@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
* drm_sched_entity_init - Init a context entity used by scheduler when
* submit to HW ring.
*
- * @sched: scheduler instance
* @entity: scheduler entity to init
- * @rq: the run queue this entity belongs
+ * @rq_list: the list of run queue on which jobs from this
+ * entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
+ * Note: the rq_list should have atleast one element to schedule
+ * the entity
+ *
* Returns 0 on success or a negative error code on failure.
*/
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty)
{
- if (!(sched && entity && rq))
+ if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq;
- entity->sched = sched;
+ entity->rq = rq_list[0];
+ entity->sched = rq_list[0]->sched;
entity->guilty = guilty;
entity->last_scheduled = NULL;
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv;
+ struct drm_sched_rq *rq;
int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
- drm_sched_entity_init(&v3d->queue[i].sched,
- &v3d_priv->sched_entity[i],
- &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- NULL);
+ rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
}
file->driver_priv = v3d_priv;
@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout);