diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 169 |
1 files changed, 83 insertions, 86 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index e3cb186c440b..277b6ec04e24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -21,19 +21,17 @@ * * Authors: Ben Skeggs */ +#include <subdev/mmu.h> +#include <subdev/fb.h> #include <core/gpuobj.h> -#include <core/mm.h> - -#include <subdev/fb.h> -#include <subdev/mmu.h> void -nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) +nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) { - struct nouveau_vm *vm = vma->vm; - struct nouveau_mmu *mmu = vm->mmu; - struct nouveau_mm_node *r; + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_mm_node *r; int big = vma->node->type != mmu->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; @@ -48,7 +46,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) u32 num = r->length >> bits; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -73,11 +71,11 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) } static void -nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, - struct nouveau_mem *mem) +nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, + struct nvkm_mem *mem) { - struct nouveau_vm *vm = vma->vm; - struct nouveau_mmu *mmu = vm->mmu; + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; int big = vma->node->type != mmu->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; @@ -91,7 +89,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, struct scatterlist *sg; for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; sglen = sg_dma_len(sg) >> PAGE_SHIFT; end = pte + sglen; @@ -131,11 +129,11 @@ finish: } static void -nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, - struct nouveau_mem *mem) +nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, + struct nvkm_mem *mem) { - struct nouveau_vm *vm = vma->vm; - struct nouveau_mmu *mmu = vm->mmu; + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; dma_addr_t *list = mem->pages; int big = vma->node->type != mmu->spg_shift; u32 offset = vma->node->offset + (delta >> 12); @@ -147,7 +145,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, u32 end, len; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -169,22 +167,22 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, } void -nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) +nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) { if (node->sg) - nouveau_vm_map_sg_table(vma, 0, node->size << 12, node); + nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); else if (node->pages) - nouveau_vm_map_sg(vma, 0, node->size << 12, node); + nvkm_vm_map_sg(vma, 0, node->size << 12, node); else - nouveau_vm_map_at(vma, 0, node); + nvkm_vm_map_at(vma, 0, node); } void -nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) +nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) { - struct nouveau_vm *vm = vma->vm; - struct nouveau_mmu *mmu = vm->mmu; + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; int big = vma->node->type != mmu->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; @@ -195,7 +193,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) u32 end, len; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -216,18 +214,18 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) } void -nouveau_vm_unmap(struct nouveau_vma *vma) +nvkm_vm_unmap(struct nvkm_vma *vma) { - nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); + nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); } static void -nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) +nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) { - struct nouveau_mmu *mmu = vm->mmu; - struct nouveau_vm_pgd *vpgd; - struct nouveau_vm_pgt *vpgt; - struct nouveau_gpuobj *pgt; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd; + struct nvkm_vm_pgt *vpgt; + struct nvkm_gpuobj *pgt; u32 pde; for (pde = fpde; pde <= lpde; pde++) { @@ -243,18 +241,18 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) } mutex_unlock(&nv_subdev(mmu)->mutex); - nouveau_gpuobj_ref(NULL, &pgt); + nvkm_gpuobj_ref(NULL, &pgt); mutex_lock(&nv_subdev(mmu)->mutex); } } static int -nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) +nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) { - struct nouveau_mmu *mmu = vm->mmu; - struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; - struct nouveau_vm_pgd *vpgd; - struct nouveau_gpuobj *pgt; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nvkm_vm_pgd *vpgd; + struct nvkm_gpuobj *pgt; int big = (type != mmu->spg_shift); u32 pgt_size; int ret; @@ -263,8 +261,8 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) pgt_size *= 8; mutex_unlock(&nv_subdev(mmu)->mutex); - ret = nouveau_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &pgt); + ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); mutex_lock(&nv_subdev(mmu)->mutex); if (unlikely(ret)) return ret; @@ -272,7 +270,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) /* someone beat us to filling the PDE while we didn't have the lock */ if (unlikely(vpgt->refcount[big]++)) { mutex_unlock(&nv_subdev(mmu)->mutex); - nouveau_gpuobj_ref(NULL, &pgt); + nvkm_gpuobj_ref(NULL, &pgt); mutex_lock(&nv_subdev(mmu)->mutex); return 0; } @@ -286,18 +284,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) } int -nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, - u32 access, struct nouveau_vma *vma) +nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, + struct nvkm_vma *vma) { - struct nouveau_mmu *mmu = vm->mmu; + struct nvkm_mmu *mmu = vm->mmu; u32 align = (1 << page_shift) >> 12; u32 msize = size >> 12; u32 fpde, lpde, pde; int ret; mutex_lock(&nv_subdev(mmu)->mutex); - ret = nouveau_mm_head(&vm->mm, 0, page_shift, msize, msize, align, - &vma->node); + ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, + &vma->node); if (unlikely(ret != 0)) { mutex_unlock(&nv_subdev(mmu)->mutex); return ret; @@ -307,7 +305,7 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; for (pde = fpde; pde <= lpde; pde++) { - struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; int big = (vma->node->type != mmu->spg_shift); if (likely(vpgt->refcount[big])) { @@ -315,11 +313,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, continue; } - ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); + ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); if (ret) { if (pde != fpde) - nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); - nouveau_mm_free(&vm->mm, &vma->node); + nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); + nvkm_mm_free(&vm->mm, &vma->node); mutex_unlock(&nv_subdev(mmu)->mutex); return ret; } @@ -327,17 +325,17 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, mutex_unlock(&nv_subdev(mmu)->mutex); vma->vm = NULL; - nouveau_vm_ref(vm, &vma->vm, NULL); + nvkm_vm_ref(vm, &vma->vm, NULL); vma->offset = (u64)vma->node->offset << 12; vma->access = access; return 0; } void -nouveau_vm_put(struct nouveau_vma *vma) +nvkm_vm_put(struct nvkm_vma *vma) { - struct nouveau_vm *vm = vma->vm; - struct nouveau_mmu *mmu = vm->mmu; + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; u32 fpde, lpde; if (unlikely(vma->node == NULL)) @@ -346,18 +344,18 @@ nouveau_vm_put(struct nouveau_vma *vma) lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; mutex_lock(&nv_subdev(mmu)->mutex); - nouveau_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); - nouveau_mm_free(&vm->mm, &vma->node); + nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); + nvkm_mm_free(&vm->mm, &vma->node); mutex_unlock(&nv_subdev(mmu)->mutex); - nouveau_vm_ref(NULL, &vma->vm, NULL); + nvkm_vm_ref(NULL, &vma->vm, NULL); } int -nouveau_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, - u64 mm_offset, u32 block, struct nouveau_vm **pvm) +nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, + u32 block, struct nvkm_vm **pvm) { - struct nouveau_vm *vm; + struct nvkm_vm *vm; u64 mm_length = (offset + length) - mm_offset; int ret; @@ -377,8 +375,8 @@ nouveau_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, return -ENOMEM; } - ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, - block >> 12); + ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); if (ret) { vfree(vm->pgt); kfree(vm); @@ -391,18 +389,18 @@ nouveau_vm_create(struct nouveau_mmu *mmu, u64 offset, u64 length, } int -nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length, - u64 mm_offset, struct nouveau_vm **pvm) +nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, + struct nvkm_vm **pvm) { - struct nouveau_mmu *mmu = nouveau_mmu(device); + struct nvkm_mmu *mmu = nvkm_mmu(device); return mmu->create(mmu, offset, length, mm_offset, pvm); } static int -nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) { - struct nouveau_mmu *mmu = vm->mmu; - struct nouveau_vm_pgd *vpgd; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd; int i; if (!pgd) @@ -412,7 +410,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) if (!vpgd) return -ENOMEM; - nouveau_gpuobj_ref(pgd, &vpgd->obj); + nvkm_gpuobj_ref(pgd, &vpgd->obj); mutex_lock(&nv_subdev(mmu)->mutex); for (i = vm->fpde; i <= vm->lpde; i++) @@ -423,11 +421,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) } static void -nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) +nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) { - struct nouveau_mmu *mmu = vm->mmu; - struct nouveau_vm_pgd *vpgd, *tmp; - struct nouveau_gpuobj *pgd = NULL; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd, *tmp; + struct nvkm_gpuobj *pgd = NULL; if (!mpgd) return; @@ -443,30 +441,29 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) } mutex_unlock(&nv_subdev(mmu)->mutex); - nouveau_gpuobj_ref(NULL, &pgd); + nvkm_gpuobj_ref(NULL, &pgd); } static void -nouveau_vm_del(struct kref *kref) +nvkm_vm_del(struct kref *kref) { - struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); - struct nouveau_vm_pgd *vpgd, *tmp; + struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); + struct nvkm_vm_pgd *vpgd, *tmp; list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { - nouveau_vm_unlink(vm, vpgd->obj); + nvkm_vm_unlink(vm, vpgd->obj); } - nouveau_mm_fini(&vm->mm); + nvkm_mm_fini(&vm->mm); vfree(vm->pgt); kfree(vm); } int -nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, - struct nouveau_gpuobj *pgd) +nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd) { if (ref) { - int ret = nouveau_vm_link(ref, pgd); + int ret = nvkm_vm_link(ref, pgd); if (ret) return ret; @@ -474,8 +471,8 @@ nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, } if (*ptr) { - nouveau_vm_unlink(*ptr, pgd); - kref_put(&(*ptr)->refcount, nouveau_vm_del); + nvkm_vm_unlink(*ptr, pgd); + kref_put(&(*ptr)->refcount, nvkm_vm_del); } *ptr = ref; |