kref_put(&file_priv->ref, file_priv_release);
 }
 
-static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
+bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
 {
-       switch (args->index) {
+       switch (capability) {
        case DRM_IVPU_CAP_METRIC_STREAMER:
-               args->value = 1;
-               break;
+               return true;
        case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
-               args->value = 1;
-               break;
+               return true;
        case DRM_IVPU_CAP_MANAGE_CMDQ:
-               args->value = 1;
-               break;
+               return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
        default:
-               return -EINVAL;
+               return false;
        }
-
-       return 0;
 }
 
 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                args->value = vdev->hw->sku;
                break;
        case DRM_IVPU_PARAM_CAPABILITIES:
-               ret = ivpu_get_capabilities(vdev, args);
+               args->value = ivpu_is_capable(vdev, args->index);
                break;
        default:
                ret = -EINVAL;
 
 
        cmdq->priority = priority;
        cmdq->is_legacy = is_legacy;
-       cmdq->is_valid = true;
 
        ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
                              &file_priv->cmdq_id_next, GFP_KERNEL);
        lockdep_assert_held(&file_priv->lock);
 
        cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
-       if (!cmdq || !cmdq->is_valid) {
+       if (!cmdq) {
                ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
                return NULL;
        }
        struct ivpu_file_priv *file_priv = file->driver_priv;
        struct drm_ivpu_cmdq_submit *args = data;
 
+       if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
                return -EINVAL;
 
        struct drm_ivpu_cmdq_create *args = data;
        struct ivpu_cmdq *cmdq;
 
+       if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
                return -EINVAL;
 
        u32 cmdq_id;
        int ret = 0;
 
+       if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+               return -ENODEV;
+
        mutex_lock(&file_priv->lock);
 
        cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
-       if (!cmdq || !cmdq->is_valid || cmdq->is_legacy) {
+       if (!cmdq || cmdq->is_legacy) {
                ret = -ENOENT;
                goto unlock;
        }
 
-       /*
-        * There is no way to stop executing jobs per command queue
-        * in OS scheduling mode, mark command queue as invalid instead
-        * and it will be freed together with context release.
-        */
-       if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) {
-               cmdq->is_valid = false;
-               goto unlock;
-       }
-
        cmdq_id = cmdq->id;
        ivpu_cmdq_destroy(file_priv, cmdq);
        ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);