diff options
author | Dave Airlie | 2019-08-15 05:29:18 +0200 |
---|---|---|
committer | Dave Airlie | 2019-08-15 05:29:18 +0200 |
commit | 2f62c5d6ed0abae8e70bd83f4d41b9d63acde45a (patch) | |
tree | 6101c03f38f09707f4d6a991cb7f2cba1b152109 /drivers/gpu | |
parent | drm/nouveau: Only recalculate PBN/VCPI on mode/connector changes (diff) | |
parent | drm/scheduler: use job count instead of peek (diff) | |
download | kernel-qcow2-linux-2f62c5d6ed0abae8e70bd83f4d41b9d63acde45a.tar.gz kernel-qcow2-linux-2f62c5d6ed0abae8e70bd83f4d41b9d63acde45a.tar.xz kernel-qcow2-linux-2f62c5d6ed0abae8e70bd83f4d41b9d63acde45a.zip |
Merge tag 'drm-fixes-5.3-2019-08-14' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
drm-fixes-5.3-2019-08-14:
amdgpu:
- Use kvalloc for dc_state to avoid allocation
failures in some cases.
- Fix gfx9 soft recovery
scheduler:
- Fix a race condition when destroying entities
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190815024919.3434-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dc/core/dc.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 4 |
3 files changed, 9 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1cf639a51178..04b8ac4432c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - WREG32(mmSQ_CMD, value); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); } static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index fa20201eef3a..cbc480a33376 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -23,6 +23,7 @@ */ #include <linux/slab.h> +#include <linux/mm.h> #include "dm_services.h" @@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) struct dc_state *dc_create_state(struct dc *dc) { - struct dc_state *context = kzalloc(sizeof(struct dc_state), - GFP_KERNEL); + struct dc_state *context = kvzalloc(sizeof(struct dc_state), + GFP_KERNEL); if (!context) return NULL; @@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc) struct dc_state *dc_copy_state(struct dc_state *src_ctx) { int i, j; - struct dc_state *new_ctx = kmemdup(src_ctx, - sizeof(struct dc_state), GFP_KERNEL); + struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); if (!new_ctx) return NULL; + memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; @@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref) { struct dc_state *context = container_of(kref, struct dc_state, refcount); dc_resource_state_destruct(context); - kfree(context); + kvfree(context); } void dc_release_state(struct dc_state *context) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 35ddbec1375a..671c90f34ede 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) rmb(); /* for list_empty to work without lock */ if (list_empty(&entity->list) || - spsc_queue_peek(&entity->job_queue) == NULL) + spsc_queue_count(&entity->job_queue) == 0) return true; return false; @@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) /* Consumption of existing IBs wasn't completed. Forcefully * remove them here. */ - if (spsc_queue_peek(&entity->job_queue)) { + if (spsc_queue_count(&entity->job_queue)) { if (sched) { /* Park the kernel for a moment to make sure it isn't processing * our enity. |