summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson2019-06-17 16:04:26 +0200
committerChris Wilson2019-06-17 21:53:03 +0200
commit32a1963148277407f7af0160da2b7c0527afc8a4 (patch)
tree4d077fc715d46755017e9d84634906f98e2c6cf5
parentdrm/i915/dmc: protect against loading wrong firmware (diff)
downloadkernel-qcow2-linux-32a1963148277407f7af0160da2b7c0527afc8a4.tar.gz
kernel-qcow2-linux-32a1963148277407f7af0160da2b7c0527afc8a4.tar.xz
kernel-qcow2-linux-32a1963148277407f7af0160da2b7c0527afc8a4.zip
drm/i915/gtt: Serialise both updates to PDE and our shadow
Currently, we perform a locked update of the shadow entry when allocating a page directory entry such that if two clients are concurrently allocating neighbouring ranges we only insert one new entry for the pair of them. However, we also need to serialise both clients wrt to the actual entry in the HW table, or else we may allow one client or even a third client to proceed ahead of the HW write. My handwave before was that under the _pathological_ condition we would see the scratch entry instead of the expected entry, causing a temporary glitch. That starvation condition will eventually show up in practice, so fix it. The reason for the previous cheat was to avoid having to free the extra allocation while under the spinlock. Now, we keep the extra entry allocated until the end instead. v2: Fix error paths for gen6 Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190617140426.7203-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c133
1 files changed, 73 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ec00fccd0c6f..8ab820145ea6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1346,81 +1346,86 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
u64 start, u64 length)
{
- struct i915_page_table *pt;
+ struct i915_page_table *pt, *alloc = NULL;
u64 from = start;
unsigned int pde;
+ int ret = 0;
spin_lock(&pd->lock);
gen8_for_each_pde(pt, pd, start, length, pde) {
const int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
- struct i915_page_table *old;
-
spin_unlock(&pd->lock);
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
goto unwind;
+ }
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
- old = cmpxchg(&pd->entry[pde], vm->scratch_pt, pt);
- if (old == vm->scratch_pt) {
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
gen8_ppgtt_set_pde(vm, pd, pt, pde);
+ pd->entry[pde] = pt;
atomic_inc(&pd->used);
} else {
- free_pt(vm, pt);
- pt = old;
+ alloc = pt;
+ pt = pd->entry[pde];
}
-
- spin_lock(&pd->lock);
}
atomic_add(count, &pt->used);
}
spin_unlock(&pd->lock);
-
- return 0;
+ goto out;
unwind:
gen8_ppgtt_clear_pd(vm, pd, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
struct i915_page_directory *pdp,
u64 start, u64 length)
{
- struct i915_page_directory *pd;
+ struct i915_page_directory *pd, *alloc = NULL;
u64 from = start;
unsigned int pdpe;
- int ret;
+ int ret = 0;
spin_lock(&pdp->lock);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
- struct i915_page_directory *old;
-
spin_unlock(&pdp->lock);
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
+ pd = fetch_and_zero(&alloc);
+ if (!pd)
+ pd = alloc_pd(vm);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto unwind;
+ }
init_pd_with_page(vm, pd, vm->scratch_pt);
- old = cmpxchg(&pdp->entry[pdpe], vm->scratch_pd, pd);
- if (old == vm->scratch_pd) {
+ spin_lock(&pdp->lock);
+ if (pdp->entry[pdpe] == vm->scratch_pd) {
gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+ pdp->entry[pdpe] = pd;
atomic_inc(&pdp->used);
} else {
- free_pd(vm, pd);
- pd = old;
+ alloc = pd;
+ pd = pdp->entry[pdpe];
}
-
- spin_lock(&pdp->lock);
}
atomic_inc(&pd->used);
spin_unlock(&pdp->lock);
@@ -1433,8 +1438,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
atomic_dec(&pd->used);
}
spin_unlock(&pdp->lock);
-
- return 0;
+ goto out;
unwind_pd:
spin_lock(&pdp->lock);
@@ -1447,7 +1451,10 @@ unwind_pd:
spin_unlock(&pdp->lock);
unwind:
gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
@@ -1462,33 +1469,34 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp;
+ struct i915_page_directory *pdp, *alloc = NULL;
u64 from = start;
+ int ret = 0;
u32 pml4e;
- int ret;
spin_lock(&pml4->lock);
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (pdp == vm->scratch_pdp) {
- struct i915_page_directory *old;
-
spin_unlock(&pml4->lock);
- pdp = alloc_pd(vm);
- if (IS_ERR(pdp))
+ pdp = fetch_and_zero(&alloc);
+ if (!pdp)
+ pdp = alloc_pd(vm);
+ if (IS_ERR(pdp)) {
+ ret = PTR_ERR(pdp);
goto unwind;
+ }
init_pd(vm, pdp, vm->scratch_pd);
- old = cmpxchg(&pml4->entry[pml4e], vm->scratch_pdp, pdp);
- if (old == vm->scratch_pdp) {
+ spin_lock(&pml4->lock);
+ if (pml4->entry[pml4e] == vm->scratch_pdp) {
gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ pml4->entry[pml4e] = pdp;
} else {
- free_pd(vm, pdp);
- pdp = old;
+ alloc = pdp;
+ pdp = pml4->entry[pml4e];
}
-
- spin_lock(&pml4->lock);
}
atomic_inc(&pdp->used);
spin_unlock(&pml4->lock);
@@ -1501,8 +1509,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
atomic_dec(&pdp->used);
}
spin_unlock(&pml4->lock);
-
- return 0;
+ goto out;
unwind_pdp:
spin_lock(&pml4->lock);
@@ -1513,7 +1520,10 @@ unwind_pdp:
spin_unlock(&pml4->lock);
unwind:
gen8_ppgtt_clear_4lvl(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -1792,11 +1802,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt;
+ struct i915_page_table *pt, *alloc = NULL;
intel_wakeref_t wakeref;
u64 from = start;
unsigned int pde;
bool flush = false;
+ int ret = 0;
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
@@ -1805,29 +1816,30 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
const unsigned int count = gen6_pte_count(start, length);
if (pt == vm->scratch_pt) {
- struct i915_page_table *old;
-
spin_unlock(&pd->lock);
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
goto unwind_out;
+ }
gen6_initialize_pt(vm, pt);
- old = cmpxchg(&pd->entry[pde], vm->scratch_pt, pt);
- if (old == vm->scratch_pt) {
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
+ pd->entry[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
I915_VMA_GLOBAL_BIND)) {
gen6_write_pde(ppgtt, pde, pt);
flush = true;
}
} else {
- free_pt(vm, pt);
- pt = old;
+ alloc = pt;
+ pt = pd->entry[pde];
}
-
- spin_lock(&pd->lock);
}
atomic_add(count, &pt->used);
@@ -1839,14 +1851,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
gen6_ggtt_invalidate(vm->i915);
}
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
- return 0;
+ goto out;
unwind_out:
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
gen6_ppgtt_clear_range(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
}
static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)