summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
authorAlex Deucher2019-03-28 16:15:49 +0100
committerAlex Deucher2019-03-28 16:15:49 +0100
commit20d059278ebef66d9a915893d67e5e76f92e633f (patch)
tree76a74e8289f8852b83dcc7c229c89cf8c3936d17 /drivers/gpu/drm/amd/amdkfd
parentRevert "drm/amdgpu: replace get_user_pages with HMM mirror helpers" (diff)
downloadkernel-qcow2-linux-20d059278ebef66d9a915893d67e5e76f92e633f.tar.gz
kernel-qcow2-linux-20d059278ebef66d9a915893d67e5e76f92e633f.tar.xz
kernel-qcow2-linux-20d059278ebef66d9a915893d67e5e76f92e633f.zip
Revert "drm/amdkfd: avoid HMM change cause circular lock"
This reverts commit 8dd69e69f42397c9b17764a951c44480b340858e. This depends on an HMM fix which is not upstream yet. Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 1d6b15788ebf..c6c9530e704e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1162,17 +1162,21 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
int retval;
struct mqd_manager *mqd_mgr;
+ retval = 0;
+
+ dqm_lock(dqm);
+
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
- goto out;
+ goto out_unlock;
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
- goto out;
+ goto out_unlock;
q->properties.sdma_queue_id =
q->sdma_id / get_num_sdma_engines(dqm);
q->properties.sdma_engine_id =
@@ -1183,9 +1187,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_deallocate_sdma_queue;
- /* Do init_mqd before dqm_lock(dqm) to avoid circular locking order:
- * lock(dqm) -> bo::reserve
- */
mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
@@ -1193,7 +1194,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
-
/*
* Eviction state logic: we only mark active queues as evicted
* to avoid the overhead of restoring inactive queues later
@@ -1202,7 +1202,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.is_evicted = (q->properties.queue_size > 0 &&
q->properties.queue_percent > 0 &&
q->properties.queue_address != 0);
+
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
@@ -1210,8 +1212,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_deallocate_doorbell;
- dqm_lock(dqm);
-
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active) {
@@ -1239,7 +1239,9 @@ out_deallocate_doorbell:
out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q->sdma_id);
-out:
+out_unlock:
+ dqm_unlock(dqm);
+
return retval;
}
@@ -1402,6 +1404,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
+
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
@@ -1412,9 +1416,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
dqm_unlock(dqm);
- /* Do uninit_mqd after dqm_unlock(dqm) to avoid circular locking */
- mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
-
return retval;
failed:
@@ -1636,11 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}
- dqm_unlock(dqm);
-
- /* Lastly, free mqd resources.
- * Do uninit_mqd() after dqm_unlock to avoid circular locking.
- */
+ /* lastly, free mqd resources */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
@@ -1654,6 +1651,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
out:
+ dqm_unlock(dqm);
return retval;
}