summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_request.c
diff options
context:
space:
mode:
authorChris Wilson2016-12-18 16:37:20 +0100
committerChris Wilson2016-12-18 17:18:50 +0100
commite8a9c58fcd9a5081f71f57f370af1347ed6a310b (patch)
tree61b8fe4eb3219a120f8e941ac4b0aa4a8f10fa69 /drivers/gpu/drm/i915/i915_gem_request.c
parentdrm/i915: Move intel_lrc_context_pin() to avoid the forward declaration (diff)
downloadkernel-qcow2-linux-e8a9c58fcd9a5081f71f57f370af1347ed6a310b.tar.gz
kernel-qcow2-linux-e8a9c58fcd9a5081f71f57f370af1347ed6a310b.tar.xz
kernel-qcow2-linux-e8a9c58fcd9a5081f71f57f370af1347ed6a310b.zip
drm/i915: Unify active context tracking between legacy/execlists/guc
The requests conversion introduced a nasty bug where we could generate a new request in the middle of constructing a request if we needed to idle the system in order to evict space for a context. The request to idle would be executed (and waited upon) before the current one, creating a minor havoc in the seqno accounting, as we will consider the current request to already be completed (prior to deferred seqno assignment) but ring->last_retired_head would have been updated and still could allow us to overwrite the current request before execution. We also employed two different mechanisms to track the active context until it was switched out. The legacy method allowed for waiting upon an active context (it could forcibly evict any vma, including context's), but the execlists method took a step backwards by pinning the vma for the entire active lifespan of the context (the only way to evict was to idle the entire GPU, not individual contexts). However, to circumvent the tricky issue of locking (i.e. we cannot take struct_mutex at the time of i915_gem_request_submit(), where we would want to move the previous context onto the active tracker and unpin it), we take the execlists approach and keep the contexts pinned until retirement. The benefit of the execlists approach, more important for execlists than legacy, was the reduction in work in pinning the context for each request - as the context was kept pinned until idle, it could short circuit the pinning for all active contexts. We introduce new engine vfuncs to pin and unpin the context respectively. The context is pinned at the start of the request, and only unpinned when the following request is retired (this ensures that the context is idle and coherent in main memory before we unpin it). We move the engine->last_context tracking into the retirement itself (rather than during request submission) in order to allow the submission to be reordered or unwound without undue difficultly. And finally an ulterior motive for unifying context handling was to prepare for mock requests. v2: Rename to last_retired_context, split out legacy_context tracking for MI_SET_CONTEXT. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161218153724.8439-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index fcf22b0e2967..475d557f2301 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -206,6 +206,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -216,9 +217,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
trace_i915_gem_request_retire(request);
- spin_lock_irq(&request->engine->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
- spin_unlock_irq(&request->engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -266,17 +267,20 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_request_remove_from_client(request);
- if (request->previous_context) {
- if (i915.enable_execlists)
- intel_lr_context_unpin(request->previous_context,
- request->engine);
- }
-
/* Retirement decays the ban score as it is a sign of ctx progress */
if (request->ctx->ban_score > 0)
request->ctx->ban_score--;
- i915_gem_context_put(request->ctx);
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. However, since we
+ * cannot take the required locks at i915_gem_request_submit() we
+ * defer the unpinning of the active context to now, retirement of
+ * the subsequent request.
+ */
+ if (engine->last_retired_context)
+ engine->context_unpin(engine, engine->last_retired_context);
+ engine->last_retired_context = request->ctx;
dma_fence_signal(&request->fence);
@@ -524,10 +528,18 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
- ret = reserve_global_seqno(dev_priv);
+ /* Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ ret = engine->context_pin(engine, ctx);
if (ret)
return ERR_PTR(ret);
+ ret = reserve_global_seqno(dev_priv);
+ if (ret)
+ goto err_unpin;
+
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
@@ -593,11 +605,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
req->engine = engine;
- req->ctx = i915_gem_context_get(ctx);
+ req->ctx = ctx;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
- req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -633,10 +644,11 @@ err_ctx:
GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
- i915_gem_context_put(ctx);
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
dev_priv->gt.active_requests--;
+err_unpin:
+ engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
}