summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorChristian König2015-11-05 19:49:48 +0100
committerAlex Deucher2015-11-16 17:05:58 +0100
commite284022163716ecf11c37fd1057c35d689ef2c11 (patch)
tree90269611e4360eab59eeb534fcf8725d4835ed42 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentdrm/amdgpu: cleanup scheduler fence get/put dance (diff)
downloadkernel-qcow2-linux-e284022163716ecf11c37fd1057c35d689ef2c11.tar.gz
kernel-qcow2-linux-e284022163716ecf11c37fd1057c35d689ef2c11.tar.xz
kernel-qcow2-linux-e284022163716ecf11c37fd1057c35d689ef2c11.zip
drm/amdgpu: fix incorrect mutex usage v3
Before this patch the scheduler fence was created when we push the job into the queue, so we could only get the fence after pushing it. The mutex now was necessary to prevent the thread pushing the jobs to the hardware from running faster than the thread pushing the jobs into the queue. Otherwise the thread pushing jobs into the queue would have accessed possible freed up memory when it tries to get a reference to the fence. So what you get in the end is thread A: mutex_lock(&job->lock); ... Kick of thread B. ... mutex_unlock(&job->lock); And thread B: mutex_lock(&job->lock); .... mutex_unlock(&job->lock); kfree(job); I'm actually not sure if I'm still up to date on this, but this usage pattern used to be not allowed with mutexes. See here as well https://lwn.net/Articles/575460/. v2: remove unrelated changes, fix missing owner v3: rebased, add more commit message Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2ae73d5232dd..44cf977ae4f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -845,8 +845,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
if (amdgpu_enable_scheduler && parser.num_ibs) {
- struct amdgpu_job *job;
struct amdgpu_ring * ring = parser.ibs->ring;
+ struct amd_sched_fence *fence;
+ struct amdgpu_job *job;
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) {
@@ -859,37 +860,41 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job->adev = parser.adev;
job->ibs = parser.ibs;
job->num_ibs = parser.num_ibs;
- job->base.owner = parser.filp;
- mutex_init(&job->job_lock);
+ job->owner = parser.filp;
+ job->free_job = amdgpu_cs_free_job;
+
if (job->ibs[job->num_ibs - 1].user) {
job->uf = parser.uf;
job->ibs[job->num_ibs - 1].user = &job->uf;
parser.uf.bo = NULL;
}
- parser.ibs = NULL;
- parser.num_ibs = 0;
-
- job->free_job = amdgpu_cs_free_job;
- mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job(&job->base);
- if (r) {
- mutex_unlock(&job->job_lock);
+ fence = amd_sched_fence_create(job->base.s_entity,
+ parser.filp);
+ if (!fence) {
+ r = -ENOMEM;
amdgpu_cs_free_job(job);
kfree(job);
goto out;
}
- cs->out.handle =
- amdgpu_ctx_add_fence(parser.ctx, ring,
- &job->base.s_fence->base);
+ job->base.s_fence = fence;
+ fence_get(&fence->base);
+
+ cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
+ &fence->base);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
- list_sort(NULL, &parser.validated, cmp_size_smaller_first);
- ttm_eu_fence_buffer_objects(&parser.ticket,
- &parser.validated,
- &job->base.s_fence->base);
+ parser.ibs = NULL;
+ parser.num_ibs = 0;
+
trace_amdgpu_cs_ioctl(job);
- mutex_unlock(&job->job_lock);
+ amd_sched_entity_push_job(&job->base);
+
+ list_sort(NULL, &parser.validated, cmp_size_smaller_first);
+ ttm_eu_fence_buffer_objects(&parser.ticket, &parser.validated,
+ &fence->base);
+ fence_put(&fence->base);
+
amdgpu_cs_parser_fini_late(&parser);
mutex_unlock(&vm->mutex);
return 0;