summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
authorMichel Thierry2015-04-08 13:13:29 +0200
committerDaniel Vetter2015-04-10 08:56:10 +0200
commit7cb6d7ac635ab0c80607e6ffaf8682d48752523f (patch)
tree9ee8a0409e0ab6808ac83cb7d8bf8c304709bd9b /drivers/gpu/drm/i915/i915_gem_gtt.c
parentdrm/i915/gen8: pagetable allocation rework (diff)
downloadkernel-qcow2-linux-7cb6d7ac635ab0c80607e6ffaf8682d48752523f.tar.gz
kernel-qcow2-linux-7cb6d7ac635ab0c80607e6ffaf8682d48752523f.tar.xz
kernel-qcow2-linux-7cb6d7ac635ab0c80607e6ffaf8682d48752523f.zip
drm/i915/gen8: Update pdp switch and point unused PDPs to scratch page
One important part of this patch is we now write a scratch page directory into any unused PDP descriptors. This matters for 2 reasons, first, we're not allowed to just use 0, or an invalid pointer, and second, we must wipe out any previous contents from the last context. The latter point only matters with full PPGTT. The former point only effect platforms with less than 4GB memory. v2: Updated commit message to point that we must set unused PDPs to the scratch page. v3: Unmap scratch_pd in gen8_ppgtt_free. v4: Initialize scratch_pd. (Mika) Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index aad3d6ffbabc..4b91cc18eef3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -457,8 +457,9 @@ static struct i915_page_directory *alloc_pd_single(void)
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
- uint64_t val)
+static int gen8_write_pdp(struct intel_engine_cs *ring,
+ unsigned entry,
+ dma_addr_t addr)
{
int ret;
@@ -470,10 +471,10 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
- intel_ring_emit(ring, (u32)(val >> 32));
+ intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
- intel_ring_emit(ring, (u32)(val));
+ intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
return 0;
@@ -484,12 +485,12 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
{
int i, ret;
- /* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / I915_PDES;
-
- for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
- ret = gen8_write_pdp(ring, i, addr);
+ for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
+ dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
+ /* The page directory might be NULL, but we need to clear out
+ * whatever the previous context might have used. */
+ ret = gen8_write_pdp(ring, i, pd_daddr);
if (ret)
return ret;
}
@@ -664,6 +665,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
}
+ unmap_and_free_pd(ppgtt->scratch_pd);
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
}
@@ -880,12 +882,20 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
if (IS_ERR(ppgtt->scratch_pt))
return PTR_ERR(ppgtt->scratch_pt);
+ ppgtt->scratch_pd = alloc_pd_single();
+ if (IS_ERR(ppgtt->scratch_pd))
+ return PTR_ERR(ppgtt->scratch_pd);
+
gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+ gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
/* 1. Do all our allocations for page directories and page tables. */
ret = gen8_ppgtt_alloc(ppgtt, ppgtt->base.start, ppgtt->base.total);
- if (ret)
+ if (ret) {
+ unmap_and_free_pd(ppgtt->scratch_pd);
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
return ret;
+ }
/*
* 2. Create DMA mappings for the page directories and page tables.