summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorChristian König2016-01-21 10:19:11 +0100
committerAlex Deucher2016-02-10 20:16:58 +0100
commita9a78b329a3e31a977f8d8ef64b2f3a574899992 (patch)
treef37eadb3fd535501d79f793cf568c99e4932f652 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parentdrm/amdgpu: merge vm_grab_id and vm_fence v2 (diff)
downloadkernel-qcow2-linux-a9a78b329a3e31a977f8d8ef64b2f3a574899992.tar.gz
kernel-qcow2-linux-a9a78b329a3e31a977f8d8ef64b2f3a574899992.tar.xz
kernel-qcow2-linux-a9a78b329a3e31a977f8d8ef64b2f3a574899992.zip
drm/amdgpu: use a global LRU list for VMIDs
With the scheduler enabled managing per ring LRUs don't make much sense any more. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c88
1 files changed, 40 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d4718e1cd050..2dd73ca57221 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -161,79 +161,52 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence)
{
- struct fence *best[AMDGPU_MAX_RINGS] = {};
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct amdgpu_device *adev = ring->adev;
-
- unsigned choices[2] = {};
- unsigned i;
+ struct amdgpu_vm_manager_id *id;
+ int r;
mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */
if (vm_id->id) {
- unsigned id = vm_id->id;
long owner;
- owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+ id = &adev->vm_manager.ids[vm_id->id];
+ owner = atomic_long_read(&id->owner);
if (owner == (long)vm) {
+ list_move_tail(&id->list, &adev->vm_manager.ids_lru);
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
- fence_put(adev->vm_manager.ids[id].active);
- adev->vm_manager.ids[id].active = fence_get(fence);
- mutex_unlock(&adev->vm_manager.lock);
- return 0;
- }
- }
- /* we definately need to flush */
- vm_id->pd_gpu_addr = ~0ll;
+ fence_put(id->active);
+ id->active = fence_get(fence);
- /* skip over VMID 0, since it is the system VM */
- for (i = 1; i < adev->vm_manager.nvm; ++i) {
- struct fence *fence = adev->vm_manager.ids[i].active;
- struct amdgpu_ring *fring;
-
- if (fence == NULL) {
- /* found a free one */
- vm_id->id = i;
- trace_amdgpu_vm_grab_id(vm, i, ring->idx);
mutex_unlock(&adev->vm_manager.lock);
return 0;
}
-
- fring = amdgpu_ring_from_fence(fence);
- if (best[fring->idx] == NULL ||
- fence_is_later(best[fring->idx], fence)) {
- best[fring->idx] = fence;
- choices[fring == ring ? 0 : 1] = i;
- }
}
- for (i = 0; i < 2; ++i) {
- struct fence *active;
- int r;
-
- if (!choices[i])
- continue;
+ /* we definately need to flush */
+ vm_id->pd_gpu_addr = ~0ll;
- vm_id->id = choices[i];
- active = adev->vm_manager.ids[vm_id->id].active;
- r = amdgpu_sync_fence(ring->adev, sync, active);
+ id = list_first_entry(&adev->vm_manager.ids_lru,
+ struct amdgpu_vm_manager_id,
+ list);
+ list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+ atomic_long_set(&id->owner, (long)vm);
- trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
- atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm);
+ vm_id->id = id - adev->vm_manager.ids;
+ trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
- fence_put(adev->vm_manager.ids[vm_id->id].active);
- adev->vm_manager.ids[vm_id->id].active = fence_get(fence);
+ r = amdgpu_sync_fence(ring->adev, sync, id->active);
- mutex_unlock(&adev->vm_manager.lock);
- return r;
+ if (!r) {
+ fence_put(id->active);
+ id->active = fence_get(fence);
}
- /* should never happen */
- BUG();
mutex_unlock(&adev->vm_manager.lock);
- return -EINVAL;
+ return r;
}
/**
@@ -1359,6 +1332,25 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_manager_init - init the VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < adev->vm_manager.num_ids; ++i)
+ list_add_tail(&adev->vm_manager.ids[i].list,
+ &adev->vm_manager.ids_lru);
+}
+
+/**
* amdgpu_vm_manager_fini - cleanup VM manager
*
* @adev: amdgpu_device pointer