summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_context.c
diff options
context:
space:
mode:
authorChris Wilson2018-05-15 16:31:49 +0200
committerChris Wilson2018-05-16 08:32:10 +0200
commitf75f91574617a3c6fbc821c6b156f5777a59d0ed (patch)
tree71757e711a9485b25e376c95a0249af06a9120d3 /drivers/gpu/drm/i915/i915_gem_context.c
parentdrm/i915: Update DRIVER_DATE to 20180514 (diff)
downloadkernel-qcow2-linux-f75f91574617a3c6fbc821c6b156f5777a59d0ed.tar.gz
kernel-qcow2-linux-f75f91574617a3c6fbc821c6b156f5777a59d0ed.tar.xz
kernel-qcow2-linux-f75f91574617a3c6fbc821c6b156f5777a59d0ed.zip
drm/i915: Shrink search list for active timelines
When switching to the kernel context, we force the switch to occur after all currently active requests (so that we know the GPU won't switch immediately away and the kernel context remains current as we work). To do so we have to inspect all the timelines and add a fence from the active work to queue our switch afterwards. We can use the tracked set of active rings to shrink our search for active timelines. v2: Use a local to shrink the list_for_each_entry() Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180515143149.4795-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..4bf18b5c6f1d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,41 +596,44 @@ last_request_on_engine(struct i915_timeline *timeline,
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct list_head * const active_rings = &engine->i915->gt.active_rings;
+ struct intel_ring *ring;
- list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
- if (last_request_on_engine(timeline, engine))
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+ list_for_each_entry(ring, active_rings, active_link) {
+ if (last_request_on_engine(ring->timeline, engine))
return false;
}
return intel_engine_has_kernel_context(engine);
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
enum intel_engine_id id;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- i915_retire_requests(dev_priv);
+ i915_retire_requests(i915);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
+ struct intel_ring *ring;
struct i915_request *rq;
if (engine_has_idle_kernel_context(engine))
continue;
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
/* Queue this switch after all other activity */
- list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+ list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev;
- prev = last_request_on_engine(timeline, engine);
+ prev = last_request_on_engine(ring->timeline, engine);
if (prev)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,