summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
diff options
context:
space:
mode:
authorAlex Deucher2017-06-07 17:05:26 +0200
committerAlex Deucher2017-06-07 21:43:28 +0200
commit41f6a99abdb423691b24c12f0a0578755b2c1126 (patch)
treed3c6b25e148ef331cb29abe10fb2bd9a6f617dd4 /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
parentdrm/amdgpu: fix mec queue policy on single MEC asics (diff)
downloadkernel-qcow2-linux-41f6a99abdb423691b24c12f0a0578755b2c1126.tar.gz
kernel-qcow2-linux-41f6a99abdb423691b24c12f0a0578755b2c1126.tar.xz
kernel-qcow2-linux-41f6a99abdb423691b24c12f0a0578755b2c1126.zip
drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code
Same function was duplicated in all gfx IP files. Reviewed-by: Alex Xie <AlexBin.Xie@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c39
1 files changed, 1 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1370b3980791..cfa37f1ba06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1448,43 +1448,6 @@ static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring,
amdgpu_ring_fini(ring);
}
-static void gfx_v8_0_compute_queue_acquire(struct amdgpu_device *adev)
-{
- int i, queue, pipe, mec;
-
- /* policy for amdgpu compute queue ownership */
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- queue = i % adev->gfx.mec.num_queue_per_pipe;
- pipe = (i / adev->gfx.mec.num_queue_per_pipe)
- % adev->gfx.mec.num_pipe_per_mec;
- mec = (i / adev->gfx.mec.num_queue_per_pipe)
- / adev->gfx.mec.num_pipe_per_mec;
-
- /* we've run out of HW */
- if (mec >= adev->gfx.mec.num_mec)
- break;
-
- if (adev->gfx.mec.num_mec > 1) {
- /* policy: amdgpu owns the first two queues of the first MEC */
- if (mec == 0 && queue < 2)
- set_bit(i, adev->gfx.mec.queue_bitmap);
- } else {
- /* policy: amdgpu owns all queues in the first pipe */
- if (mec == 0 && pipe == 0)
- set_bit(i, adev->gfx.mec.queue_bitmap);
- }
- }
-
- /* update the number of active compute rings */
- adev->gfx.num_compute_rings =
- bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
-
- /* If you hit this case and edited the policy, you probably just
- * need to increase AMDGPU_MAX_COMPUTE_RINGS */
- if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
-}
-
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
{
int r;
@@ -1513,7 +1476,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
adev->gfx.mec.num_queue_per_pipe = 8;
/* take ownership of the relevant compute queues */
- gfx_v8_0_compute_queue_acquire(adev);
+ amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;