@@ -127,23 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
{
- switch (args->index) {
+ switch (capability) {
case DRM_IVPU_CAP_METRIC_STREAMER:
- args->value = 1;
- break;
+ return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
- args->value = 1;
- break;
+ return true;
case DRM_IVPU_CAP_MANAGE_CMDQ:
- args->value = 1;
- break;
+ return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
- return -EINVAL;
+ return false;
}
-
- return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -203,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
- ret = ivpu_get_capabilities(vdev, args);
+ args->value = ivpu_is_capable(vdev, args->index);
break;
default:
ret = -EINVAL;
@@ -213,6 +213,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
@@ -123,7 +123,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
cmdq->priority = priority;
cmdq->is_legacy = is_legacy;
- cmdq->is_valid = true;
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
@@ -307,7 +306,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
lockdep_assert_held(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
- if (!cmdq || !cmdq->is_valid) {
+ if (!cmdq) {
ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
return NULL;
}
@@ -832,6 +831,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_cmdq_submit *args = data;
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
return -EINVAL;
@@ -857,6 +859,9 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
struct drm_ivpu_cmdq_create *args = data;
struct ivpu_cmdq *cmdq;
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
@@ -880,24 +885,17 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
u32 cmdq_id;
int ret = 0;
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ return -ENODEV;
+
mutex_lock(&file_priv->lock);
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
- if (!cmdq || !cmdq->is_valid || cmdq->is_legacy) {
+ if (!cmdq || cmdq->is_legacy) {
ret = -ENOENT;
goto unlock;
}
- /*
- * There is no way to stop executing jobs per command queue
- * in OS scheduling mode, mark command queue as invalid instead
- * and it will be freed together with context release.
- */
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) {
- cmdq->is_valid = false;
- goto unlock;
- }
-
cmdq_id = cmdq->id;
ivpu_cmdq_destroy(file_priv, cmdq);
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
@@ -31,7 +31,6 @@ struct ivpu_cmdq {
u32 id;
u32 db_id;
u8 priority;
- bool is_valid;
bool is_legacy;
};