int r, queue_type;
uint64_t wptr_addr_off;
+ if (!dqm->sched_running || dqm->sched_halt)
+ return 0;
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
int r;
struct mes_remove_queue_input queue_input;
+ if (!dqm->sched_running || dqm->sched_halt)
+ return 0;
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
return r;
}
-static int remove_all_queues_mes(struct device_queue_manager *dqm)
+static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm)
{
struct device_process_node *cur;
struct device *dev = dqm->dev->adev->dev;
return retval;
}
+static int add_all_kfd_queues_mes(struct device_queue_manager *dqm)
+{
+ struct device_process_node *cur;
+ struct device *dev = dqm->dev->adev->dev;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+ int retval = 0;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+ retval = add_queue_mes(dqm, q, qpd);
+ if (retval) {
+ dev_err(dev, "%s: Failed to add queue %d for dev %d",
+ __func__,
+ q->properties.queue_id,
+ dqm->dev->id);
+ return retval;
+ }
+ }
+ }
+
+ return retval;
+}
+
static int suspend_all_queues_mes(struct device_queue_manager *dqm)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD, false);
else
- ret = remove_all_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
}
dqm->sched_halt = true;
dqm_unlock(dqm);
ret = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
0, USE_DEFAULT_GRACE_PERIOD);
+ else
+ ret = add_all_kfd_queues_mes(dqm);
+
dqm_unlock(dqm);
return ret;
if (!dqm->dev->kfd->shared_resources.enable_mes)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_queues_mes(dqm);
+ remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;