@@ -284,7 +284,7 @@ static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
}
amdgpu_xcp_release_sched(adev, entity);
-
+ drm_sched_entity_destroy(&entity->entity);
kfree(entity);
return res;
}
@@ -503,24 +503,6 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return r;
}
-static void amdgpu_ctx_do_release(struct kref *ref)
-{
- struct amdgpu_ctx *ctx;
- u32 i, j;
-
- ctx = container_of(ref, struct amdgpu_ctx, refcount);
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- if (!ctx->entities[i][j])
- continue;
-
- drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
- }
- }
-
- amdgpu_ctx_fini(ref);
-}
-
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -529,7 +511,7 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
mutex_lock(&mgr->lock);
ctx = idr_remove(&mgr->ctx_handles, id);
if (ctx)
- kref_put(&ctx->refcount, amdgpu_ctx_do_release);
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
mutex_unlock(&mgr->lock);
return ctx ? 0 : -EINVAL;
}
@@ -742,7 +724,7 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
if (ctx == NULL)
return -EINVAL;
- kref_put(&ctx->refcount, amdgpu_ctx_do_release);
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
return 0;
}
@@ -911,45 +893,12 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
-{
- struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id, i, j;
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
- if (kref_read(&ctx->refcount) != 1) {
- DRM_ERROR("ctx %p is still alive\n", ctx);
- continue;
- }
-
- for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
- for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
- if (!ctx->entities[i][j])
- continue;
-
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_fini(entity);
- }
- }
- }
-}
-
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
- struct idr *idp;
uint32_t id;
- amdgpu_ctx_mgr_entity_fini(mgr);
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
@@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
@@ -1401,6 +1401,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return;
pm_runtime_get_sync(dev->dev);
+ amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
amdgpu_uvd_free_handles(adev, file_priv);
@@ -1424,7 +1425,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(pd);
}
- amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)