diff options
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 68 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 19 |
3 files changed, 96 insertions, 15 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 283a0dc25e84..07129e6c31a9 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -1,4 +1,26 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + #if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define _GPU_SCHED_TRACE_H_ diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..92ec663fdada 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, entity->rq = rq; entity->sched = sched; + spin_lock_init(&entity->rq_lock); spin_lock_init(&entity->queue_lock); r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); if (r) @@ -187,7 +188,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) if (kfifo_is_empty(&entity->job_queue)) return false; - if (ACCESS_ONCE(entity->dependency)) + if (READ_ONCE(entity->dependency)) return false; return true; @@ -204,18 +205,38 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity) { - struct amd_sched_rq *rq = entity->rq; + int r; if (!amd_sched_entity_is_initialized(sched, entity)) return; - /** * The client will not queue more IBs during this fini, consume existing - * queued IBs + * queued IBs or discard them on SIGKILL */ - wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + amd_sched_entity_is_idle(entity)); + amd_sched_entity_set_rq(entity, NULL); + if (r) { + struct amd_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + while (kfifo_out(&entity->job_queue, &job, sizeof(job))) { + struct amd_sched_fence *s_fence = job->s_fence; + amd_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + amd_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); + sched->ops->free_job(job); + } - amd_sched_rq_remove_entity(rq, entity); + } kfifo_free(&entity->job_queue); } @@ -236,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb dma_fence_put(f); } +void amd_sched_entity_set_rq(struct amd_sched_entity *entity, + struct amd_sched_rq *rq) +{ + if (entity->rq == rq) + return; + + spin_lock(&entity->rq_lock); + + if (entity->rq) + amd_sched_rq_remove_entity(entity->rq, entity); + + entity->rq = rq; + if (rq) + amd_sched_rq_add_entity(rq, entity); + + spin_unlock(&entity->rq_lock); +} + bool amd_sched_dependency_optimized(struct dma_fence* fence, struct amd_sched_entity *entity) { @@ -293,7 +332,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) } static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity) +amd_sched_entity_peek_job(struct amd_sched_entity *entity) { struct amd_gpu_scheduler *sched = entity->sched; struct amd_sched_job *sched_job; @@ -333,14 +372,15 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) /* first job wakes up scheduler */ if (first) { /* Add the entity to the run queue */ + spin_lock(&entity->rq_lock); amd_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); amd_sched_wakeup(sched); } return added; } -/* job_finish is called after hw fence signaled, and - * the job had already been deleted from ring_mirror_list +/* job_finish is called after hw fence signaled */ static void amd_sched_job_finish(struct work_struct *work) { @@ -366,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work) schedule_delayed_work(&next->work_tdr, sched->timeout); } spin_unlock(&sched->job_list_lock); + dma_fence_put(&s_job->s_fence->finished); sched->ops->free_job(s_job); } @@ -381,6 +422,9 @@ static void amd_sched_job_begin(struct amd_sched_job *s_job) { struct amd_gpu_scheduler *sched = s_job->sched; + dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, + amd_sched_job_finish_cb); + spin_lock(&sched->job_list_lock); list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && @@ -473,8 +517,6 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) struct amd_sched_entity *entity = sched_job->s_entity; trace_amd_sched_job(sched_job); - dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, - amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } @@ -545,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; + dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count); amd_sched_fence_finished(s_fence); @@ -585,7 +628,7 @@ static int amd_sched_main(void *param) if (!entity) continue; - sched_job = amd_sched_entity_pop_job(entity); + sched_job = amd_sched_entity_peek_job(entity); if (!sched_job) continue; @@ -596,6 +639,7 @@ static int amd_sched_main(void *param) fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); + if (fence) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &s_fence->cb, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index f9d8f28efd16..52c8e5447624 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -39,6 +39,7 @@ struct amd_sched_rq; struct amd_sched_entity { struct list_head list; struct amd_sched_rq *rq; + spinlock_t rq_lock; struct amd_gpu_scheduler *sched; spinlock_t queue_lock; @@ -115,9 +116,14 @@ struct amd_sched_backend_ops { enum amd_sched_priority { AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN, + AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN, + AMD_SCHED_PRIORITY_NORMAL, + AMD_SCHED_PRIORITY_HIGH_SW, + AMD_SCHED_PRIORITY_HIGH_HW, AMD_SCHED_PRIORITY_KERNEL, - AMD_SCHED_PRIORITY_MAX + AMD_SCHED_PRIORITY_MAX, + AMD_SCHED_PRIORITY_INVALID = -1, + AMD_SCHED_PRIORITY_UNSET = -2 }; /** @@ -150,6 +156,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); void amd_sched_entity_push_job(struct amd_sched_job *sched_job); +void amd_sched_entity_set_rq(struct amd_sched_entity *entity, + struct amd_sched_rq *rq); int amd_sched_fence_slab_init(void); void amd_sched_fence_slab_fini(void); @@ -167,4 +175,11 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); bool amd_sched_dependency_optimized(struct dma_fence* fence, struct amd_sched_entity *entity); void amd_sched_job_kickout(struct amd_sched_job *s_job); + +static inline enum amd_sched_priority +amd_sched_get_job_priority(struct amd_sched_job *job) +{ + return (job->s_entity->rq - job->sched->sched_rq); +} + #endif |