From 573edb241b44162a1478cc74429f94df86e6e71d Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 7 Aug 2018 14:52:13 +0200 Subject: drm/scheduler: fix last_scheduled handling Make sure we access last_scheduled only after checking that there are no more jobs on the entity. Signed-off-by: Christian König Reviewed-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/scheduler/gpu_scheduler.c') diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 6be554499be9..f40a504e3d68 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -565,19 +565,20 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { struct drm_sched_rq *rq = entity->rq; - bool first, reschedule, idle; + bool first; - idle = entity->last_scheduled == NULL || - dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0; - reschedule = idle && first && (entity->num_rq_list > 1); + if (first && (entity->num_rq_list > 1)) { + struct dma_fence *fence; - if (reschedule) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); + fence = READ_ONCE(entity->last_scheduled); + if (fence == NULL || dma_fence_is_signaled(fence)) { + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); + } } sched_job->sched = entity->rq->sched; -- cgit v1.2.3-55-g7522